problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_3300
|
rasdani/github-patches
|
git_diff
|
fal-ai__dbt-fal-128
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make logging of insert statements for write_to_source optional or just remove it
`cesarsantos#3303` in Discord asked for this probably to avoid logging sensible information or filling the logger with all the data there is.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/faldbt/lib.py`
Content:
```
1 # NOTE: INSPIRED IN https://github.com/dbt-labs/dbt-core/blob/43edc887f97e359b02b6317a9f91898d3d66652b/core/dbt/lib.py
2 import six
3 from datetime import datetime
4 from dataclasses import dataclass
5 from uuid import uuid4
6 from typing import List, Tuple, Union
7
8 import dbt.version
9 import dbt.semver
10 import dbt.flags as flags
11 import dbt.adapters.factory as adapters_factory
12 from dbt.config.runtime import RuntimeConfig
13 from dbt.contracts.connection import AdapterResponse
14 from dbt.contracts.graph.manifest import Manifest
15 from dbt.parser.manifest import process_node
16 from dbt.logger import GLOBAL_LOGGER as logger
17
18 from . import parse
19
20 import pandas as pd
21 from pandas.io import sql as pdsql
22
23 import sqlalchemy
24 from sqlalchemy.sql.ddl import CreateTable
25 from sqlalchemy.sql import Insert
26 from sqlalchemy.sql.schema import MetaData
27
28
29 DBT_V1 = dbt.semver.VersionSpecifier.from_version_string("1.0.0")
30 DBT_VCURRENT = dbt.version.get_installed_version()
31
32 if DBT_VCURRENT.compare(DBT_V1) >= 0:
33 from dbt.parser.sql import SqlBlockParser
34 from dbt.contracts.graph.parsed import ParsedModelNode, ParsedSourceDefinition
35 from dbt.contracts.sql import ResultTable, RemoteRunResult
36 else:
37 from faldbt.cp.parser.sql import SqlBlockParser
38 from faldbt.cp.contracts.graph.parsed import ParsedModelNode, ParsedSourceDefinition
39 from faldbt.cp.contracts.sql import ResultTable, RemoteRunResult
40
41
42 @dataclass
43 class FlagsArgs:
44 profiles_dir: str
45 use_colors: bool
46
47
48 def initialize_dbt_flags(profiles_dir: str):
49 """
50 Initializes the flags module from dbt, since it's accessed from around their code.
51 """
52 args = FlagsArgs(profiles_dir, None)
53 user_config = parse.get_dbt_user_config(profiles_dir)
54 try:
55 flags.set_from_args(args, user_config)
56 except TypeError:
57 flags.set_from_args(args)
58
59 # Set invocation id
60 if DBT_VCURRENT.compare(DBT_V1) >= 0:
61 import dbt.events.functions as events_functions
62
63 events_functions.set_invocation_id()
64
65 # Re-enable logging for 1.0.0 through old API of logger
66 # TODO: migrate for 1.0.0 code to new event system
67 if DBT_VCURRENT.compare(DBT_V1) >= 0:
68 flags.ENABLE_LEGACY_LOGGER = "1"
69 if logger.disabled:
70 logger.enable()
71
72
73 def register_adapters(config: RuntimeConfig):
74 # Clear previously registered adapters. This fixes cacheing behavior on the dbt-server
75 adapters_factory.reset_adapters()
76 # Load the relevant adapter
77 adapters_factory.register_adapter(config)
78
79
80 def _get_operation_node(manifest: Manifest, project_path, profiles_dir, sql):
81
82 config = parse.get_dbt_config(project_path, profiles_dir)
83 block_parser = SqlBlockParser(
84 project=config,
85 manifest=manifest,
86 root_project=config,
87 )
88
89 # NOTE: nodes get registered to the manifest automatically,
90 # HACK: we need to include uniqueness (UUID4) to avoid clashes
91 name = "SQL:" + str(hash(sql)) + ":" + str(uuid4())
92 sql_node = block_parser.parse_remote(sql, name)
93 process_node(config, manifest, sql_node)
94 return sql_node
95
96
97 # NOTE: Once we get an adapter, we must call `connection_for` or `connection_named` to use it
98 def _get_adapter(project_path: str, profiles_dir: str):
99 config = parse.get_dbt_config(project_path, profiles_dir)
100
101 adapters_factory.cleanup_connections()
102 return adapters_factory.get_adapter(config)
103
104
105 def _execute_sql(
106 manifest: Manifest, project_path: str, profiles_dir: str, sql: str
107 ) -> Tuple[AdapterResponse, RemoteRunResult]:
108 node = _get_operation_node(manifest, project_path, profiles_dir, sql)
109 adapter = _get_adapter(project_path, profiles_dir)
110
111 logger.info("Running query\n{}", sql)
112
113 result = None
114 with adapter.connection_for(node):
115 adapter.connections.begin()
116 response, execute_result = adapter.execute(sql, fetch=True)
117
118 table = ResultTable(
119 column_names=list(execute_result.column_names),
120 rows=[list(row) for row in execute_result],
121 )
122
123 result = RemoteRunResult(
124 raw_sql=sql,
125 compiled_sql=sql,
126 node=node,
127 table=table,
128 timing=[],
129 logs=[],
130 generated_at=datetime.utcnow(),
131 )
132 adapter.connections.commit()
133
134 return response, result
135
136
137 def _get_target_relation(
138 target: Union[ParsedModelNode, ParsedSourceDefinition],
139 project_path: str,
140 profiles_dir: str,
141 ):
142 adapter = _get_adapter(project_path, profiles_dir)
143
144 relation = None
145 with adapter.connection_named(str(uuid4())):
146 # This ROLLBACKs so it has to be a new connection
147 relation = adapter.get_relation(
148 target.database, target.schema, target.identifier
149 )
150 return relation
151
152
153 def execute_sql(
154 manifest: Manifest, project_path: str, profiles_dir: str, sql: str
155 ) -> RemoteRunResult:
156 _, result = _execute_sql(manifest, project_path, profiles_dir, sql)
157 return result
158
159
160 def fetch_target(
161 manifest: Manifest,
162 project_path: str,
163 profiles_dir: str,
164 target: Union[ParsedModelNode, ParsedSourceDefinition],
165 ) -> RemoteRunResult:
166 relation = _get_target_relation(target, project_path, profiles_dir)
167
168 if relation is None:
169 raise Exception(f"Could not get relation for '{target.unique_id}'")
170
171 query = f"SELECT * FROM {relation}"
172 _, result = _execute_sql(manifest, project_path, profiles_dir, query)
173 return result
174
175
176 def write_target(
177 data: pd.DataFrame,
178 manifest: Manifest,
179 project_path: str,
180 profiles_dir: str,
181 target: Union[ParsedModelNode, ParsedSourceDefinition],
182 ) -> RemoteRunResult:
183 adapter = _get_adapter(project_path, profiles_dir)
184
185 relation = _get_target_relation(target, project_path, profiles_dir)
186
187 engine = _alchemy_engine(adapter, target)
188 pddb = pdsql.SQLDatabase(engine, schema=target.schema)
189 pdtable = pdsql.SQLTable(target.name, pddb, data, index=False)
190 alchemy_table: sqlalchemy.Table = pdtable.table.to_metadata(pdtable.pd_sql.meta)
191
192 column_names: List[str] = list(data.columns)
193 rows = data.to_records(index=False)
194 row_dicts = list(map(lambda row: dict(zip(column_names, row)), rows))
195
196 if relation is None:
197 create_stmt = CreateTable(alchemy_table).compile(
198 bind=engine, compile_kwargs={"literal_binds": True}
199 )
200
201 _execute_sql(
202 manifest, project_path, profiles_dir, six.text_type(create_stmt).strip()
203 )
204
205 insert_stmt = (
206 Insert(alchemy_table)
207 .values(row_dicts)
208 .compile(bind=engine, compile_kwargs={"literal_binds": True})
209 )
210
211 _, result = _execute_sql(
212 manifest, project_path, profiles_dir, six.text_type(insert_stmt).strip()
213 )
214 return result
215
216
217 def _alchemy_engine(
218 adapter: adapters_factory.Adapter,
219 target: Union[ParsedModelNode, ParsedSourceDefinition],
220 ):
221 url_string = f"{adapter.type()}://"
222 if adapter.type() == "bigquery":
223 url_string = f"bigquery://{target.database}"
224 if adapter.type() == "postgres":
225 url_string = "postgresql://"
226 # TODO: add special cases as needed
227
228 def null_dump(sql, *multiparams, **params):
229 pass
230
231 return sqlalchemy.create_mock_engine(url_string, executor=null_dump)
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/faldbt/lib.py b/src/faldbt/lib.py
--- a/src/faldbt/lib.py
+++ b/src/faldbt/lib.py
@@ -108,7 +108,7 @@
node = _get_operation_node(manifest, project_path, profiles_dir, sql)
adapter = _get_adapter(project_path, profiles_dir)
- logger.info("Running query\n{}", sql)
+ logger.debug("Running query\n{}", sql)
result = None
with adapter.connection_for(node):
|
{"golden_diff": "diff --git a/src/faldbt/lib.py b/src/faldbt/lib.py\n--- a/src/faldbt/lib.py\n+++ b/src/faldbt/lib.py\n@@ -108,7 +108,7 @@\n node = _get_operation_node(manifest, project_path, profiles_dir, sql)\n adapter = _get_adapter(project_path, profiles_dir)\n \n- logger.info(\"Running query\\n{}\", sql)\n+ logger.debug(\"Running query\\n{}\", sql)\n \n result = None\n with adapter.connection_for(node):\n", "issue": "Make logging of insert statements for write_to_source optional or just remove it\n`cesarsantos#3303` in Discord asked for this probably to avoid logging sensible information or filling the logger with all the data there is.\n", "before_files": [{"content": "# NOTE: INSPIRED IN https://github.com/dbt-labs/dbt-core/blob/43edc887f97e359b02b6317a9f91898d3d66652b/core/dbt/lib.py\nimport six\nfrom datetime import datetime\nfrom dataclasses import dataclass\nfrom uuid import uuid4\nfrom typing import List, Tuple, Union\n\nimport dbt.version\nimport dbt.semver\nimport dbt.flags as flags\nimport dbt.adapters.factory as adapters_factory\nfrom dbt.config.runtime import RuntimeConfig\nfrom dbt.contracts.connection import AdapterResponse\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.parser.manifest import process_node\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\nfrom . import parse\n\nimport pandas as pd\nfrom pandas.io import sql as pdsql\n\nimport sqlalchemy\nfrom sqlalchemy.sql.ddl import CreateTable\nfrom sqlalchemy.sql import Insert\nfrom sqlalchemy.sql.schema import MetaData\n\n\nDBT_V1 = dbt.semver.VersionSpecifier.from_version_string(\"1.0.0\")\nDBT_VCURRENT = dbt.version.get_installed_version()\n\nif DBT_VCURRENT.compare(DBT_V1) >= 0:\n from dbt.parser.sql import SqlBlockParser\n from dbt.contracts.graph.parsed import ParsedModelNode, ParsedSourceDefinition\n from dbt.contracts.sql import ResultTable, RemoteRunResult\nelse:\n from faldbt.cp.parser.sql import SqlBlockParser\n from faldbt.cp.contracts.graph.parsed import ParsedModelNode, ParsedSourceDefinition\n from faldbt.cp.contracts.sql import ResultTable, RemoteRunResult\n\n\n@dataclass\nclass FlagsArgs:\n profiles_dir: str\n use_colors: bool\n\n\ndef initialize_dbt_flags(profiles_dir: str):\n \"\"\"\n Initializes the flags module from dbt, since it's accessed from around their code.\n \"\"\"\n args = FlagsArgs(profiles_dir, None)\n user_config = parse.get_dbt_user_config(profiles_dir)\n try:\n flags.set_from_args(args, user_config)\n except TypeError:\n flags.set_from_args(args)\n\n # Set invocation id\n if DBT_VCURRENT.compare(DBT_V1) >= 0:\n import dbt.events.functions as events_functions\n\n events_functions.set_invocation_id()\n\n # Re-enable logging for 1.0.0 through old API of logger\n # TODO: migrate for 1.0.0 code to new event system\n if DBT_VCURRENT.compare(DBT_V1) >= 0:\n flags.ENABLE_LEGACY_LOGGER = \"1\"\n if logger.disabled:\n logger.enable()\n\n\ndef register_adapters(config: RuntimeConfig):\n # Clear previously registered adapters. This fixes cacheing behavior on the dbt-server\n adapters_factory.reset_adapters()\n # Load the relevant adapter\n adapters_factory.register_adapter(config)\n\n\ndef _get_operation_node(manifest: Manifest, project_path, profiles_dir, sql):\n\n config = parse.get_dbt_config(project_path, profiles_dir)\n block_parser = SqlBlockParser(\n project=config,\n manifest=manifest,\n root_project=config,\n )\n\n # NOTE: nodes get registered to the manifest automatically,\n # HACK: we need to include uniqueness (UUID4) to avoid clashes\n name = \"SQL:\" + str(hash(sql)) + \":\" + str(uuid4())\n sql_node = block_parser.parse_remote(sql, name)\n process_node(config, manifest, sql_node)\n return sql_node\n\n\n# NOTE: Once we get an adapter, we must call `connection_for` or `connection_named` to use it\ndef _get_adapter(project_path: str, profiles_dir: str):\n config = parse.get_dbt_config(project_path, profiles_dir)\n\n adapters_factory.cleanup_connections()\n return adapters_factory.get_adapter(config)\n\n\ndef _execute_sql(\n manifest: Manifest, project_path: str, profiles_dir: str, sql: str\n) -> Tuple[AdapterResponse, RemoteRunResult]:\n node = _get_operation_node(manifest, project_path, profiles_dir, sql)\n adapter = _get_adapter(project_path, profiles_dir)\n\n logger.info(\"Running query\\n{}\", sql)\n\n result = None\n with adapter.connection_for(node):\n adapter.connections.begin()\n response, execute_result = adapter.execute(sql, fetch=True)\n\n table = ResultTable(\n column_names=list(execute_result.column_names),\n rows=[list(row) for row in execute_result],\n )\n\n result = RemoteRunResult(\n raw_sql=sql,\n compiled_sql=sql,\n node=node,\n table=table,\n timing=[],\n logs=[],\n generated_at=datetime.utcnow(),\n )\n adapter.connections.commit()\n\n return response, result\n\n\ndef _get_target_relation(\n target: Union[ParsedModelNode, ParsedSourceDefinition],\n project_path: str,\n profiles_dir: str,\n):\n adapter = _get_adapter(project_path, profiles_dir)\n\n relation = None\n with adapter.connection_named(str(uuid4())):\n # This ROLLBACKs so it has to be a new connection\n relation = adapter.get_relation(\n target.database, target.schema, target.identifier\n )\n return relation\n\n\ndef execute_sql(\n manifest: Manifest, project_path: str, profiles_dir: str, sql: str\n) -> RemoteRunResult:\n _, result = _execute_sql(manifest, project_path, profiles_dir, sql)\n return result\n\n\ndef fetch_target(\n manifest: Manifest,\n project_path: str,\n profiles_dir: str,\n target: Union[ParsedModelNode, ParsedSourceDefinition],\n) -> RemoteRunResult:\n relation = _get_target_relation(target, project_path, profiles_dir)\n\n if relation is None:\n raise Exception(f\"Could not get relation for '{target.unique_id}'\")\n\n query = f\"SELECT * FROM {relation}\"\n _, result = _execute_sql(manifest, project_path, profiles_dir, query)\n return result\n\n\ndef write_target(\n data: pd.DataFrame,\n manifest: Manifest,\n project_path: str,\n profiles_dir: str,\n target: Union[ParsedModelNode, ParsedSourceDefinition],\n) -> RemoteRunResult:\n adapter = _get_adapter(project_path, profiles_dir)\n\n relation = _get_target_relation(target, project_path, profiles_dir)\n\n engine = _alchemy_engine(adapter, target)\n pddb = pdsql.SQLDatabase(engine, schema=target.schema)\n pdtable = pdsql.SQLTable(target.name, pddb, data, index=False)\n alchemy_table: sqlalchemy.Table = pdtable.table.to_metadata(pdtable.pd_sql.meta)\n\n column_names: List[str] = list(data.columns)\n rows = data.to_records(index=False)\n row_dicts = list(map(lambda row: dict(zip(column_names, row)), rows))\n\n if relation is None:\n create_stmt = CreateTable(alchemy_table).compile(\n bind=engine, compile_kwargs={\"literal_binds\": True}\n )\n\n _execute_sql(\n manifest, project_path, profiles_dir, six.text_type(create_stmt).strip()\n )\n\n insert_stmt = (\n Insert(alchemy_table)\n .values(row_dicts)\n .compile(bind=engine, compile_kwargs={\"literal_binds\": True})\n )\n\n _, result = _execute_sql(\n manifest, project_path, profiles_dir, six.text_type(insert_stmt).strip()\n )\n return result\n\n\ndef _alchemy_engine(\n adapter: adapters_factory.Adapter,\n target: Union[ParsedModelNode, ParsedSourceDefinition],\n):\n url_string = f\"{adapter.type()}://\"\n if adapter.type() == \"bigquery\":\n url_string = f\"bigquery://{target.database}\"\n if adapter.type() == \"postgres\":\n url_string = \"postgresql://\"\n # TODO: add special cases as needed\n\n def null_dump(sql, *multiparams, **params):\n pass\n\n return sqlalchemy.create_mock_engine(url_string, executor=null_dump)\n", "path": "src/faldbt/lib.py"}], "after_files": [{"content": "# NOTE: INSPIRED IN https://github.com/dbt-labs/dbt-core/blob/43edc887f97e359b02b6317a9f91898d3d66652b/core/dbt/lib.py\nimport six\nfrom datetime import datetime\nfrom dataclasses import dataclass\nfrom uuid import uuid4\nfrom typing import List, Tuple, Union\n\nimport dbt.version\nimport dbt.semver\nimport dbt.flags as flags\nimport dbt.adapters.factory as adapters_factory\nfrom dbt.config.runtime import RuntimeConfig\nfrom dbt.contracts.connection import AdapterResponse\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.parser.manifest import process_node\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\nfrom . import parse\n\nimport pandas as pd\nfrom pandas.io import sql as pdsql\n\nimport sqlalchemy\nfrom sqlalchemy.sql.ddl import CreateTable\nfrom sqlalchemy.sql import Insert\nfrom sqlalchemy.sql.schema import MetaData\n\n\nDBT_V1 = dbt.semver.VersionSpecifier.from_version_string(\"1.0.0\")\nDBT_VCURRENT = dbt.version.get_installed_version()\n\nif DBT_VCURRENT.compare(DBT_V1) >= 0:\n from dbt.parser.sql import SqlBlockParser\n from dbt.contracts.graph.parsed import ParsedModelNode, ParsedSourceDefinition\n from dbt.contracts.sql import ResultTable, RemoteRunResult\nelse:\n from faldbt.cp.parser.sql import SqlBlockParser\n from faldbt.cp.contracts.graph.parsed import ParsedModelNode, ParsedSourceDefinition\n from faldbt.cp.contracts.sql import ResultTable, RemoteRunResult\n\n\n@dataclass\nclass FlagsArgs:\n profiles_dir: str\n use_colors: bool\n\n\ndef initialize_dbt_flags(profiles_dir: str):\n \"\"\"\n Initializes the flags module from dbt, since it's accessed from around their code.\n \"\"\"\n args = FlagsArgs(profiles_dir, None)\n user_config = parse.get_dbt_user_config(profiles_dir)\n try:\n flags.set_from_args(args, user_config)\n except TypeError:\n flags.set_from_args(args)\n\n # Set invocation id\n if DBT_VCURRENT.compare(DBT_V1) >= 0:\n import dbt.events.functions as events_functions\n\n events_functions.set_invocation_id()\n\n # Re-enable logging for 1.0.0 through old API of logger\n # TODO: migrate for 1.0.0 code to new event system\n if DBT_VCURRENT.compare(DBT_V1) >= 0:\n flags.ENABLE_LEGACY_LOGGER = \"1\"\n if logger.disabled:\n logger.enable()\n\n\ndef register_adapters(config: RuntimeConfig):\n # Clear previously registered adapters. This fixes cacheing behavior on the dbt-server\n adapters_factory.reset_adapters()\n # Load the relevant adapter\n adapters_factory.register_adapter(config)\n\n\ndef _get_operation_node(manifest: Manifest, project_path, profiles_dir, sql):\n\n config = parse.get_dbt_config(project_path, profiles_dir)\n block_parser = SqlBlockParser(\n project=config,\n manifest=manifest,\n root_project=config,\n )\n\n # NOTE: nodes get registered to the manifest automatically,\n # HACK: we need to include uniqueness (UUID4) to avoid clashes\n name = \"SQL:\" + str(hash(sql)) + \":\" + str(uuid4())\n sql_node = block_parser.parse_remote(sql, name)\n process_node(config, manifest, sql_node)\n return sql_node\n\n\n# NOTE: Once we get an adapter, we must call `connection_for` or `connection_named` to use it\ndef _get_adapter(project_path: str, profiles_dir: str):\n config = parse.get_dbt_config(project_path, profiles_dir)\n\n adapters_factory.cleanup_connections()\n return adapters_factory.get_adapter(config)\n\n\ndef _execute_sql(\n manifest: Manifest, project_path: str, profiles_dir: str, sql: str\n) -> Tuple[AdapterResponse, RemoteRunResult]:\n node = _get_operation_node(manifest, project_path, profiles_dir, sql)\n adapter = _get_adapter(project_path, profiles_dir)\n\n logger.debug(\"Running query\\n{}\", sql)\n\n result = None\n with adapter.connection_for(node):\n adapter.connections.begin()\n response, execute_result = adapter.execute(sql, fetch=True)\n\n table = ResultTable(\n column_names=list(execute_result.column_names),\n rows=[list(row) for row in execute_result],\n )\n\n result = RemoteRunResult(\n raw_sql=sql,\n compiled_sql=sql,\n node=node,\n table=table,\n timing=[],\n logs=[],\n generated_at=datetime.utcnow(),\n )\n adapter.connections.commit()\n\n return response, result\n\n\ndef _get_target_relation(\n target: Union[ParsedModelNode, ParsedSourceDefinition],\n project_path: str,\n profiles_dir: str,\n):\n adapter = _get_adapter(project_path, profiles_dir)\n\n relation = None\n with adapter.connection_named(str(uuid4())):\n # This ROLLBACKs so it has to be a new connection\n relation = adapter.get_relation(\n target.database, target.schema, target.identifier\n )\n return relation\n\n\ndef execute_sql(\n manifest: Manifest, project_path: str, profiles_dir: str, sql: str\n) -> RemoteRunResult:\n _, result = _execute_sql(manifest, project_path, profiles_dir, sql)\n return result\n\n\ndef fetch_target(\n manifest: Manifest,\n project_path: str,\n profiles_dir: str,\n target: Union[ParsedModelNode, ParsedSourceDefinition],\n) -> RemoteRunResult:\n relation = _get_target_relation(target, project_path, profiles_dir)\n\n if relation is None:\n raise Exception(f\"Could not get relation for '{target.unique_id}'\")\n\n query = f\"SELECT * FROM {relation}\"\n _, result = _execute_sql(manifest, project_path, profiles_dir, query)\n return result\n\n\ndef write_target(\n data: pd.DataFrame,\n manifest: Manifest,\n project_path: str,\n profiles_dir: str,\n target: Union[ParsedModelNode, ParsedSourceDefinition],\n) -> RemoteRunResult:\n adapter = _get_adapter(project_path, profiles_dir)\n\n relation = _get_target_relation(target, project_path, profiles_dir)\n\n engine = _alchemy_engine(adapter, target)\n pddb = pdsql.SQLDatabase(engine, schema=target.schema)\n pdtable = pdsql.SQLTable(target.name, pddb, data, index=False)\n alchemy_table: sqlalchemy.Table = pdtable.table.to_metadata(pdtable.pd_sql.meta)\n\n column_names: List[str] = list(data.columns)\n rows = data.to_records(index=False)\n row_dicts = list(map(lambda row: dict(zip(column_names, row)), rows))\n\n if relation is None:\n create_stmt = CreateTable(alchemy_table).compile(\n bind=engine, compile_kwargs={\"literal_binds\": True}\n )\n\n _execute_sql(\n manifest, project_path, profiles_dir, six.text_type(create_stmt).strip()\n )\n\n insert_stmt = (\n Insert(alchemy_table)\n .values(row_dicts)\n .compile(bind=engine, compile_kwargs={\"literal_binds\": True})\n )\n\n _, result = _execute_sql(\n manifest, project_path, profiles_dir, six.text_type(insert_stmt).strip()\n )\n return result\n\n\ndef _alchemy_engine(\n adapter: adapters_factory.Adapter,\n target: Union[ParsedModelNode, ParsedSourceDefinition],\n):\n url_string = f\"{adapter.type()}://\"\n if adapter.type() == \"bigquery\":\n url_string = f\"bigquery://{target.database}\"\n if adapter.type() == \"postgres\":\n url_string = \"postgresql://\"\n # TODO: add special cases as needed\n\n def null_dump(sql, *multiparams, **params):\n pass\n\n return sqlalchemy.create_mock_engine(url_string, executor=null_dump)\n", "path": "src/faldbt/lib.py"}]}
| 2,639 | 120 |
gh_patches_debug_2342
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-411
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use codespell in precommit hook
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `run.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import argparse
7 import csv
8 import os
9 from datetime import datetime, timedelta
10
11 import numpy as np
12
13 from bugbug import repository # noqa
14 from bugbug import bugzilla, db
15 from bugbug.models import get_model_class
16
17 if __name__ == "__main__":
18 parser = argparse.ArgumentParser()
19 parser.add_argument(
20 "--lemmatization",
21 help="Perform lemmatization (using spaCy)",
22 action="store_true",
23 )
24 parser.add_argument("--train", help="Perform training", action="store_true")
25 parser.add_argument(
26 "--goal",
27 help="Goal of the classifier",
28 choices=[
29 # bug classifiers
30 "defect",
31 "regression",
32 "tracking",
33 "qaneeded",
34 "uplift",
35 "component",
36 "devdocneeded",
37 "defectenhancementtask",
38 "assignee",
39 "bugtype",
40 "stepstoreproduce",
41 # commit classifiers
42 "backout",
43 ],
44 default="defect",
45 )
46 parser.add_argument(
47 "--classifier",
48 help="Type of the classifier",
49 choices=["default", "nn"],
50 default="default",
51 )
52 parser.add_argument("--classify", help="Perform evaluation", action="store_true")
53 parser.add_argument(
54 "--generate-sheet",
55 help="Perform evaluation on bugs from last week and generate a csv file",
56 action="store_true",
57 )
58 parser.add_argument("--token", help="Bugzilla token", action="store")
59 parser.add_argument(
60 "--historical", help="Analyze historical bugs", action="store_true"
61 )
62 args = parser.parse_args()
63
64 model_file_name = "{}{}model".format(
65 args.goal, "" if args.classifier == "default" else args.classifier
66 )
67
68 model_class_name = args.goal
69
70 if args.goal == "component":
71 if args.classifier == "default":
72 model_class_name = "component"
73 elif args.classifier == "nn":
74 model_class_name = "component_nn"
75 else:
76 raise ValueError(f"Unkown value {args.classifier}")
77
78 model_class = get_model_class(model_class_name)
79
80 if args.train:
81 db.download()
82
83 if args.historical:
84 model = model_class(args.lemmatization, args.historical)
85 else:
86 model = model_class(args.lemmatization)
87 model.train()
88 else:
89 model = model_class.load(model_file_name)
90
91 if args.classify:
92 for bug in bugzilla.get_bugs():
93 print(
94 f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug["id"] } - { bug["summary"]} '
95 )
96
97 if model.calculate_importance:
98 probas, importances = model.classify(
99 bug, probabilities=True, importances=True
100 )
101
102 feature_names = model.get_feature_names()
103 for i, (importance, index, is_positive) in enumerate(importances):
104 print(
105 f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})'
106 )
107 else:
108 probas = model.classify(bug, probabilities=True, importances=False)
109
110 if np.argmax(probas) == 1:
111 print(f"Positive! {probas}")
112 else:
113 print(f"Negative! {probas}")
114 input()
115
116 if args.generate_sheet:
117 assert (
118 args.token is not None
119 ), "A Bugzilla token should be set in order to download bugs"
120 today = datetime.utcnow()
121 a_week_ago = today - timedelta(7)
122 bugzilla.set_token(args.token)
123 bugs = bugzilla.download_bugs_between(a_week_ago, today)
124
125 print(f"Classifying {len(bugs)} bugs...")
126
127 rows = [["Bug", f"{args.goal}(model)", args.goal, "Title"]]
128
129 for bug in bugs:
130 p = model.classify(bug, probabilities=True)
131 rows.append(
132 [
133 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]}',
134 "y" if p[0][1] >= 0.7 else "n",
135 "",
136 bug["summary"],
137 ]
138 )
139
140 os.makedirs("sheets", exist_ok=True)
141 with open(
142 os.path.join(
143 "sheets",
144 f'{args.goal}-{datetime.utcnow().strftime("%Y-%m-%d")}-labels.csv',
145 ),
146 "w",
147 ) as f:
148 writer = csv.writer(f)
149 writer.writerows(rows)
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/run.py b/run.py
--- a/run.py
+++ b/run.py
@@ -73,7 +73,7 @@
elif args.classifier == "nn":
model_class_name = "component_nn"
else:
- raise ValueError(f"Unkown value {args.classifier}")
+ raise ValueError(f"Unknown value {args.classifier}")
model_class = get_model_class(model_class_name)
|
{"golden_diff": "diff --git a/run.py b/run.py\n--- a/run.py\n+++ b/run.py\n@@ -73,7 +73,7 @@\n elif args.classifier == \"nn\":\n model_class_name = \"component_nn\"\n else:\n- raise ValueError(f\"Unkown value {args.classifier}\")\n+ raise ValueError(f\"Unknown value {args.classifier}\")\n \n model_class = get_model_class(model_class_name)\n", "issue": "Use codespell in precommit hook\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport os\nfrom datetime import datetime, timedelta\n\nimport numpy as np\n\nfrom bugbug import repository # noqa\nfrom bugbug import bugzilla, db\nfrom bugbug.models import get_model_class\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--lemmatization\",\n help=\"Perform lemmatization (using spaCy)\",\n action=\"store_true\",\n )\n parser.add_argument(\"--train\", help=\"Perform training\", action=\"store_true\")\n parser.add_argument(\n \"--goal\",\n help=\"Goal of the classifier\",\n choices=[\n # bug classifiers\n \"defect\",\n \"regression\",\n \"tracking\",\n \"qaneeded\",\n \"uplift\",\n \"component\",\n \"devdocneeded\",\n \"defectenhancementtask\",\n \"assignee\",\n \"bugtype\",\n \"stepstoreproduce\",\n # commit classifiers\n \"backout\",\n ],\n default=\"defect\",\n )\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--classify\", help=\"Perform evaluation\", action=\"store_true\")\n parser.add_argument(\n \"--generate-sheet\",\n help=\"Perform evaluation on bugs from last week and generate a csv file\",\n action=\"store_true\",\n )\n parser.add_argument(\"--token\", help=\"Bugzilla token\", action=\"store\")\n parser.add_argument(\n \"--historical\", help=\"Analyze historical bugs\", action=\"store_true\"\n )\n args = parser.parse_args()\n\n model_file_name = \"{}{}model\".format(\n args.goal, \"\" if args.classifier == \"default\" else args.classifier\n )\n\n model_class_name = args.goal\n\n if args.goal == \"component\":\n if args.classifier == \"default\":\n model_class_name = \"component\"\n elif args.classifier == \"nn\":\n model_class_name = \"component_nn\"\n else:\n raise ValueError(f\"Unkown value {args.classifier}\")\n\n model_class = get_model_class(model_class_name)\n\n if args.train:\n db.download()\n\n if args.historical:\n model = model_class(args.lemmatization, args.historical)\n else:\n model = model_class(args.lemmatization)\n model.train()\n else:\n model = model_class.load(model_file_name)\n\n if args.classify:\n for bug in bugzilla.get_bugs():\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug[\"id\"] } - { bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importances = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_feature_names()\n for i, (importance, index, is_positive) in enumerate(importances):\n print(\n f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})'\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n if args.generate_sheet:\n assert (\n args.token is not None\n ), \"A Bugzilla token should be set in order to download bugs\"\n today = datetime.utcnow()\n a_week_ago = today - timedelta(7)\n bugzilla.set_token(args.token)\n bugs = bugzilla.download_bugs_between(a_week_ago, today)\n\n print(f\"Classifying {len(bugs)} bugs...\")\n\n rows = [[\"Bug\", f\"{args.goal}(model)\", args.goal, \"Title\"]]\n\n for bug in bugs:\n p = model.classify(bug, probabilities=True)\n rows.append(\n [\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]}',\n \"y\" if p[0][1] >= 0.7 else \"n\",\n \"\",\n bug[\"summary\"],\n ]\n )\n\n os.makedirs(\"sheets\", exist_ok=True)\n with open(\n os.path.join(\n \"sheets\",\n f'{args.goal}-{datetime.utcnow().strftime(\"%Y-%m-%d\")}-labels.csv',\n ),\n \"w\",\n ) as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n", "path": "run.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport os\nfrom datetime import datetime, timedelta\n\nimport numpy as np\n\nfrom bugbug import repository # noqa\nfrom bugbug import bugzilla, db\nfrom bugbug.models import get_model_class\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--lemmatization\",\n help=\"Perform lemmatization (using spaCy)\",\n action=\"store_true\",\n )\n parser.add_argument(\"--train\", help=\"Perform training\", action=\"store_true\")\n parser.add_argument(\n \"--goal\",\n help=\"Goal of the classifier\",\n choices=[\n # bug classifiers\n \"defect\",\n \"regression\",\n \"tracking\",\n \"qaneeded\",\n \"uplift\",\n \"component\",\n \"devdocneeded\",\n \"defectenhancementtask\",\n \"assignee\",\n \"bugtype\",\n \"stepstoreproduce\",\n # commit classifiers\n \"backout\",\n ],\n default=\"defect\",\n )\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--classify\", help=\"Perform evaluation\", action=\"store_true\")\n parser.add_argument(\n \"--generate-sheet\",\n help=\"Perform evaluation on bugs from last week and generate a csv file\",\n action=\"store_true\",\n )\n parser.add_argument(\"--token\", help=\"Bugzilla token\", action=\"store\")\n parser.add_argument(\n \"--historical\", help=\"Analyze historical bugs\", action=\"store_true\"\n )\n args = parser.parse_args()\n\n model_file_name = \"{}{}model\".format(\n args.goal, \"\" if args.classifier == \"default\" else args.classifier\n )\n\n model_class_name = args.goal\n\n if args.goal == \"component\":\n if args.classifier == \"default\":\n model_class_name = \"component\"\n elif args.classifier == \"nn\":\n model_class_name = \"component_nn\"\n else:\n raise ValueError(f\"Unknown value {args.classifier}\")\n\n model_class = get_model_class(model_class_name)\n\n if args.train:\n db.download()\n\n if args.historical:\n model = model_class(args.lemmatization, args.historical)\n else:\n model = model_class(args.lemmatization)\n model.train()\n else:\n model = model_class.load(model_file_name)\n\n if args.classify:\n for bug in bugzilla.get_bugs():\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug[\"id\"] } - { bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importances = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_feature_names()\n for i, (importance, index, is_positive) in enumerate(importances):\n print(\n f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})'\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n if args.generate_sheet:\n assert (\n args.token is not None\n ), \"A Bugzilla token should be set in order to download bugs\"\n today = datetime.utcnow()\n a_week_ago = today - timedelta(7)\n bugzilla.set_token(args.token)\n bugs = bugzilla.download_bugs_between(a_week_ago, today)\n\n print(f\"Classifying {len(bugs)} bugs...\")\n\n rows = [[\"Bug\", f\"{args.goal}(model)\", args.goal, \"Title\"]]\n\n for bug in bugs:\n p = model.classify(bug, probabilities=True)\n rows.append(\n [\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]}',\n \"y\" if p[0][1] >= 0.7 else \"n\",\n \"\",\n bug[\"summary\"],\n ]\n )\n\n os.makedirs(\"sheets\", exist_ok=True)\n with open(\n os.path.join(\n \"sheets\",\n f'{args.goal}-{datetime.utcnow().strftime(\"%Y-%m-%d\")}-labels.csv',\n ),\n \"w\",\n ) as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n", "path": "run.py"}]}
| 1,649 | 93 |
gh_patches_debug_22273
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-545
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log format is not properly setup for KFServer
/kind bug
**What steps did you take and what happened:**
Log format is not properly setup
```
INFO:root:Copying contents of /mnt/models to local
INFO:root:Registering model:cifar10
INFO:root:Listening on port 8080
INFO:tornado.general:Starting 40 processes
INFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 11488.05ms
INFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 22800.67ms
INFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 24200.31ms
INFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 8301.00ms
INFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 38398.63ms
INFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 38799.67ms
INFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 7599.63ms
INFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 39800.00ms
INFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 32200.33ms
```
**What did you expect to happen:**
The log format should include timestamp.
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
**Environment:**
- Istio Version:
- Knative Version:
- KFServing Version: 0.2.0
- Kubeflow version:
- Minikube version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kfserving/kfserving/kfserver.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import tornado.ioloop
16 import tornado.web
17 import tornado.httpserver
18 import argparse
19 import logging
20 import json
21 from typing import List, Dict
22 from kfserving.handlers.http import PredictHandler, ExplainHandler
23 from kfserving import KFModel
24 from kfserving.constants import constants
25
26 DEFAULT_HTTP_PORT = 8080
27 DEFAULT_GRPC_PORT = 8081
28
29 parser = argparse.ArgumentParser(add_help=False)
30 parser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,
31 help='The HTTP Port listened to by the model server.')
32 parser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,
33 help='The GRPC Port listened to by the model server.')
34 parser.add_argument('--workers', default=0, type=int,
35 help='The number of works to fork')
36 args, _ = parser.parse_known_args()
37
38 logging.basicConfig(level=constants.KFSERVING_LOGLEVEL)
39
40
41 class KFServer():
42 def __init__(self, http_port: int = args.http_port,
43 grpc_port: int = args.grpc_port,
44 workers: int = args.workers):
45 self.registered_models = {}
46 self.http_port = http_port
47 self.grpc_port = grpc_port
48 self.workers = workers
49 self._http_server = None
50
51 def create_application(self):
52 return tornado.web.Application([
53 # Server Liveness API returns 200 if server is alive.
54 (r"/", LivenessHandler),
55 (r"/v1/models",
56 ListHandler, dict(models=self.registered_models)),
57 # Model Health API returns 200 if model is ready to serve.
58 (r"/v1/models/([a-zA-Z0-9_-]+)",
59 HealthHandler, dict(models=self.registered_models)),
60 (r"/v1/models/([a-zA-Z0-9_-]+):predict",
61 PredictHandler, dict(models=self.registered_models)),
62 (r"/v1/models/([a-zA-Z0-9_-]+):explain",
63 ExplainHandler, dict(models=self.registered_models)),
64 ])
65
66 def start(self, models: List[KFModel]):
67 for model in models:
68 self.register_model(model)
69
70 self._http_server = tornado.httpserver.HTTPServer(
71 self.create_application())
72
73 logging.info("Listening on port %s", self.http_port)
74 self._http_server.bind(self.http_port)
75 logging.info("Will fork %d workers", self.workers)
76 self._http_server.start(self.workers)
77 tornado.ioloop.IOLoop.current().start()
78
79 def register_model(self, model: KFModel):
80 if not model.name:
81 raise Exception(
82 "Failed to register model, model.name must be provided.")
83 self.registered_models[model.name] = model
84 logging.info("Registering model: %s", model.name)
85
86
87 class LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods
88 def get(self):
89 self.write("Alive")
90
91
92 class HealthHandler(tornado.web.RequestHandler):
93 def initialize(self, models: Dict[str, KFModel]):
94 self.models = models # pylint:disable=attribute-defined-outside-init
95
96 def get(self, name: str):
97 if name not in self.models:
98 raise tornado.web.HTTPError(
99 status_code=404,
100 reason="Model with name %s does not exist." % name
101 )
102
103 model = self.models[name]
104 self.write(json.dumps({
105 "name": model.name,
106 "ready": model.ready
107 }))
108
109
110 class ListHandler(tornado.web.RequestHandler):
111 def initialize(self, models: Dict[str, KFModel]):
112 self.models = models # pylint:disable=attribute-defined-outside-init
113
114 def get(self):
115 self.write(json.dumps(list(self.models.values())))
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/kfserving/kfserving/kfserver.py b/python/kfserving/kfserving/kfserver.py
--- a/python/kfserving/kfserving/kfserver.py
+++ b/python/kfserving/kfserving/kfserver.py
@@ -12,16 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import tornado.ioloop
-import tornado.web
-import tornado.httpserver
import argparse
import logging
import json
from typing import List, Dict
+import tornado.ioloop
+import tornado.web
+import tornado.httpserver
+import tornado.log
from kfserving.handlers.http import PredictHandler, ExplainHandler
from kfserving import KFModel
-from kfserving.constants import constants
DEFAULT_HTTP_PORT = 8080
DEFAULT_GRPC_PORT = 8081
@@ -35,8 +35,7 @@
help='The number of works to fork')
args, _ = parser.parse_known_args()
-logging.basicConfig(level=constants.KFSERVING_LOGLEVEL)
-
+tornado.log.enable_pretty_logging()
class KFServer():
def __init__(self, http_port: int = args.http_port,
|
{"golden_diff": "diff --git a/python/kfserving/kfserving/kfserver.py b/python/kfserving/kfserving/kfserver.py\n--- a/python/kfserving/kfserving/kfserver.py\n+++ b/python/kfserving/kfserving/kfserver.py\n@@ -12,16 +12,16 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import tornado.ioloop\n-import tornado.web\n-import tornado.httpserver\n import argparse\n import logging\n import json\n from typing import List, Dict\n+import tornado.ioloop\n+import tornado.web\n+import tornado.httpserver\n+import tornado.log\n from kfserving.handlers.http import PredictHandler, ExplainHandler\n from kfserving import KFModel\n-from kfserving.constants import constants\n \n DEFAULT_HTTP_PORT = 8080\n DEFAULT_GRPC_PORT = 8081\n@@ -35,8 +35,7 @@\n help='The number of works to fork')\n args, _ = parser.parse_known_args()\n \n-logging.basicConfig(level=constants.KFSERVING_LOGLEVEL)\n-\n+tornado.log.enable_pretty_logging()\n \n class KFServer():\n def __init__(self, http_port: int = args.http_port,\n", "issue": "Log format is not properly setup for KFServer\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nLog format is not properly setup\r\n\r\n```\r\nINFO:root:Copying contents of /mnt/models to local\r\nINFO:root:Registering model:cifar10\r\nINFO:root:Listening on port 8080\r\nINFO:tornado.general:Starting 40 processes\r\nINFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 11488.05ms\r\nINFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 22800.67ms\r\nINFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 24200.31ms\r\nINFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 8301.00ms\r\nINFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 38398.63ms\r\nINFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 38799.67ms\r\nINFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 7599.63ms\r\nINFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 39800.00ms\r\nINFO:tornado.access:200 POST /v1/models/cifar10:predict (127.0.0.1) 32200.33ms\r\n```\r\n\r\n**What did you expect to happen:**\r\nThe log format should include timestamp.\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KFServing Version: 0.2.0\r\n- Kubeflow version:\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport argparse\nimport logging\nimport json\nfrom typing import List, Dict\nfrom kfserving.handlers.http import PredictHandler, ExplainHandler\nfrom kfserving import KFModel\nfrom kfserving.constants import constants\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,\n help='The HTTP Port listened to by the model server.')\nparser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,\n help='The GRPC Port listened to by the model server.')\nparser.add_argument('--workers', default=0, type=int,\n help='The number of works to fork')\nargs, _ = parser.parse_known_args()\n\nlogging.basicConfig(level=constants.KFSERVING_LOGLEVEL)\n\n\nclass KFServer():\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n workers: int = args.workers):\n self.registered_models = {}\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.workers = workers\n self._http_server = None\n\n def create_application(self):\n return tornado.web.Application([\n # Server Liveness API returns 200 if server is alive.\n (r\"/\", LivenessHandler),\n (r\"/v1/models\",\n ListHandler, dict(models=self.registered_models)),\n # Model Health API returns 200 if model is ready to serve.\n (r\"/v1/models/([a-zA-Z0-9_-]+)\",\n HealthHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):predict\",\n PredictHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):explain\",\n ExplainHandler, dict(models=self.registered_models)),\n ])\n\n def start(self, models: List[KFModel]):\n for model in models:\n self.register_model(model)\n\n self._http_server = tornado.httpserver.HTTPServer(\n self.create_application())\n\n logging.info(\"Listening on port %s\", self.http_port)\n self._http_server.bind(self.http_port)\n logging.info(\"Will fork %d workers\", self.workers)\n self._http_server.start(self.workers)\n tornado.ioloop.IOLoop.current().start()\n\n def register_model(self, model: KFModel):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models[model.name] = model\n logging.info(\"Registering model: %s\", model.name)\n\n\nclass LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods\n def get(self):\n self.write(\"Alive\")\n\n\nclass HealthHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self, name: str):\n if name not in self.models:\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Model with name %s does not exist.\" % name\n )\n\n model = self.models[name]\n self.write(json.dumps({\n \"name\": model.name,\n \"ready\": model.ready\n }))\n\n\nclass ListHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self):\n self.write(json.dumps(list(self.models.values())))\n", "path": "python/kfserving/kfserving/kfserver.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport json\nfrom typing import List, Dict\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.log\nfrom kfserving.handlers.http import PredictHandler, ExplainHandler\nfrom kfserving import KFModel\n\nDEFAULT_HTTP_PORT = 8080\nDEFAULT_GRPC_PORT = 8081\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument('--http_port', default=DEFAULT_HTTP_PORT, type=int,\n help='The HTTP Port listened to by the model server.')\nparser.add_argument('--grpc_port', default=DEFAULT_GRPC_PORT, type=int,\n help='The GRPC Port listened to by the model server.')\nparser.add_argument('--workers', default=0, type=int,\n help='The number of works to fork')\nargs, _ = parser.parse_known_args()\n\ntornado.log.enable_pretty_logging()\n\nclass KFServer():\n def __init__(self, http_port: int = args.http_port,\n grpc_port: int = args.grpc_port,\n workers: int = args.workers):\n self.registered_models = {}\n self.http_port = http_port\n self.grpc_port = grpc_port\n self.workers = workers\n self._http_server = None\n\n def create_application(self):\n return tornado.web.Application([\n # Server Liveness API returns 200 if server is alive.\n (r\"/\", LivenessHandler),\n (r\"/v1/models\",\n ListHandler, dict(models=self.registered_models)),\n # Model Health API returns 200 if model is ready to serve.\n (r\"/v1/models/([a-zA-Z0-9_-]+)\",\n HealthHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):predict\",\n PredictHandler, dict(models=self.registered_models)),\n (r\"/v1/models/([a-zA-Z0-9_-]+):explain\",\n ExplainHandler, dict(models=self.registered_models)),\n ])\n\n def start(self, models: List[KFModel]):\n for model in models:\n self.register_model(model)\n\n self._http_server = tornado.httpserver.HTTPServer(\n self.create_application())\n\n logging.info(\"Listening on port %s\", self.http_port)\n self._http_server.bind(self.http_port)\n logging.info(\"Will fork %d workers\", self.workers)\n self._http_server.start(self.workers)\n tornado.ioloop.IOLoop.current().start()\n\n def register_model(self, model: KFModel):\n if not model.name:\n raise Exception(\n \"Failed to register model, model.name must be provided.\")\n self.registered_models[model.name] = model\n logging.info(\"Registering model: %s\", model.name)\n\n\nclass LivenessHandler(tornado.web.RequestHandler): # pylint:disable=too-few-public-methods\n def get(self):\n self.write(\"Alive\")\n\n\nclass HealthHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self, name: str):\n if name not in self.models:\n raise tornado.web.HTTPError(\n status_code=404,\n reason=\"Model with name %s does not exist.\" % name\n )\n\n model = self.models[name]\n self.write(json.dumps({\n \"name\": model.name,\n \"ready\": model.ready\n }))\n\n\nclass ListHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get(self):\n self.write(json.dumps(list(self.models.values())))\n", "path": "python/kfserving/kfserving/kfserver.py"}]}
| 2,017 | 270 |
gh_patches_debug_35906
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-5754
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.bigo: Unable to parse JSON
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest release
### Description
Hello,
the bigo.py is not working at the moment.
It is giving a parse JSON error.
Debug log is following...
### Debug log
```text
error: Unable to parse JSON: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE html>\n<html lang="en" s ...)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/bigo.py`
Content:
```
1 """
2 $description Global live streaming platform for live video game broadcasts and individual live streams.
3 $url live.bigo.tv
4 $url bigoweb.co
5 $type live
6 """
7
8 import re
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import useragents, validate
12 from streamlink.stream.hls import HLSStream
13
14
15 @pluginmatcher(re.compile(
16 r"https?://(?:www\.)?bigo\.tv/([^/]+)$",
17 ))
18 class Bigo(Plugin):
19 _api_url = "https://www.bigo.tv/OInterface/getVideoParam?bigoId={0}"
20
21 _video_info_schema = validate.Schema({
22 "code": 0,
23 "msg": "success",
24 "data": {
25 "videoSrc": validate.any(None, "", validate.url()),
26 },
27 })
28
29 def _get_streams(self):
30 res = self.session.http.get(
31 self._api_url.format(self.match.group(1)),
32 allow_redirects=True,
33 headers={"User-Agent": useragents.IPHONE_6},
34 )
35 data = self.session.http.json(res, schema=self._video_info_schema)
36 videourl = data["data"]["videoSrc"]
37 if videourl:
38 yield "live", HLSStream(self.session, videourl)
39
40
41 __plugin__ = Bigo
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/bigo.py b/src/streamlink/plugins/bigo.py
--- a/src/streamlink/plugins/bigo.py
+++ b/src/streamlink/plugins/bigo.py
@@ -1,41 +1,68 @@
"""
-$description Global live streaming platform for live video game broadcasts and individual live streams.
-$url live.bigo.tv
-$url bigoweb.co
+$description Global live-streaming platform for live video game broadcasts and individual live streams.
+$url bigo.tv
$type live
+$metadata id
+$metadata author
+$metadata category
+$metadata title
"""
+import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
-from streamlink.plugin.api import useragents, validate
+from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
+log = logging.getLogger(__name__)
+
+
@pluginmatcher(re.compile(
- r"https?://(?:www\.)?bigo\.tv/([^/]+)$",
+ r"https?://(?:www\.)?bigo\.tv/(?P<site_id>[^/]+)$",
))
class Bigo(Plugin):
- _api_url = "https://www.bigo.tv/OInterface/getVideoParam?bigoId={0}"
-
- _video_info_schema = validate.Schema({
- "code": 0,
- "msg": "success",
- "data": {
- "videoSrc": validate.any(None, "", validate.url()),
- },
- })
+ _URL_API = "https://ta.bigo.tv/official_website/studio/getInternalStudioInfo"
def _get_streams(self):
- res = self.session.http.get(
- self._api_url.format(self.match.group(1)),
- allow_redirects=True,
- headers={"User-Agent": useragents.IPHONE_6},
+ self.id, self.author, self.category, self.title, hls_url = self.session.http.post(
+ self._URL_API,
+ params={
+ "siteId": self.match["site_id"],
+ "verify": "",
+ },
+ schema=validate.Schema(
+ validate.parse_json(),
+ {
+ "code": 0,
+ "msg": "success",
+ "data": {
+ "roomId": validate.any(None, str),
+ "clientBigoId": validate.any(None, str),
+ "gameTitle": str,
+ "roomTopic": str,
+ "hls_src": validate.any(None, "", validate.url()),
+ },
+ },
+ validate.union_get(
+ ("data", "roomId"),
+ ("data", "clientBigoId"),
+ ("data", "gameTitle"),
+ ("data", "roomTopic"),
+ ("data", "hls_src"),
+ ),
+ ),
)
- data = self.session.http.json(res, schema=self._video_info_schema)
- videourl = data["data"]["videoSrc"]
- if videourl:
- yield "live", HLSStream(self.session, videourl)
+
+ if not self.id:
+ return
+
+ if not hls_url:
+ log.info("Channel is offline")
+ return
+
+ yield "live", HLSStream(self.session, hls_url)
__plugin__ = Bigo
|
{"golden_diff": "diff --git a/src/streamlink/plugins/bigo.py b/src/streamlink/plugins/bigo.py\n--- a/src/streamlink/plugins/bigo.py\n+++ b/src/streamlink/plugins/bigo.py\n@@ -1,41 +1,68 @@\n \"\"\"\n-$description Global live streaming platform for live video game broadcasts and individual live streams.\n-$url live.bigo.tv\n-$url bigoweb.co\n+$description Global live-streaming platform for live video game broadcasts and individual live streams.\n+$url bigo.tv\n $type live\n+$metadata id\n+$metadata author\n+$metadata category\n+$metadata title\n \"\"\"\n \n+import logging\n import re\n \n from streamlink.plugin import Plugin, pluginmatcher\n-from streamlink.plugin.api import useragents, validate\n+from streamlink.plugin.api import validate\n from streamlink.stream.hls import HLSStream\n \n \n+log = logging.getLogger(__name__)\n+\n+\n @pluginmatcher(re.compile(\n- r\"https?://(?:www\\.)?bigo\\.tv/([^/]+)$\",\n+ r\"https?://(?:www\\.)?bigo\\.tv/(?P<site_id>[^/]+)$\",\n ))\n class Bigo(Plugin):\n- _api_url = \"https://www.bigo.tv/OInterface/getVideoParam?bigoId={0}\"\n-\n- _video_info_schema = validate.Schema({\n- \"code\": 0,\n- \"msg\": \"success\",\n- \"data\": {\n- \"videoSrc\": validate.any(None, \"\", validate.url()),\n- },\n- })\n+ _URL_API = \"https://ta.bigo.tv/official_website/studio/getInternalStudioInfo\"\n \n def _get_streams(self):\n- res = self.session.http.get(\n- self._api_url.format(self.match.group(1)),\n- allow_redirects=True,\n- headers={\"User-Agent\": useragents.IPHONE_6},\n+ self.id, self.author, self.category, self.title, hls_url = self.session.http.post(\n+ self._URL_API,\n+ params={\n+ \"siteId\": self.match[\"site_id\"],\n+ \"verify\": \"\",\n+ },\n+ schema=validate.Schema(\n+ validate.parse_json(),\n+ {\n+ \"code\": 0,\n+ \"msg\": \"success\",\n+ \"data\": {\n+ \"roomId\": validate.any(None, str),\n+ \"clientBigoId\": validate.any(None, str),\n+ \"gameTitle\": str,\n+ \"roomTopic\": str,\n+ \"hls_src\": validate.any(None, \"\", validate.url()),\n+ },\n+ },\n+ validate.union_get(\n+ (\"data\", \"roomId\"),\n+ (\"data\", \"clientBigoId\"),\n+ (\"data\", \"gameTitle\"),\n+ (\"data\", \"roomTopic\"),\n+ (\"data\", \"hls_src\"),\n+ ),\n+ ),\n )\n- data = self.session.http.json(res, schema=self._video_info_schema)\n- videourl = data[\"data\"][\"videoSrc\"]\n- if videourl:\n- yield \"live\", HLSStream(self.session, videourl)\n+\n+ if not self.id:\n+ return\n+\n+ if not hls_url:\n+ log.info(\"Channel is offline\")\n+ return\n+\n+ yield \"live\", HLSStream(self.session, hls_url)\n \n \n __plugin__ = Bigo\n", "issue": "plugins.bigo: Unable to parse JSON\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest release\n\n### Description\n\nHello,\r\n\r\nthe bigo.py is not working at the moment.\r\n\r\nIt is giving a parse JSON error.\r\n\r\nDebug log is following...\n\n### Debug log\n\n```text\nerror: Unable to parse JSON: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE html>\\n<html lang=\"en\" s ...)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Global live streaming platform for live video game broadcasts and individual live streams.\n$url live.bigo.tv\n$url bigoweb.co\n$type live\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream.hls import HLSStream\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?bigo\\.tv/([^/]+)$\",\n))\nclass Bigo(Plugin):\n _api_url = \"https://www.bigo.tv/OInterface/getVideoParam?bigoId={0}\"\n\n _video_info_schema = validate.Schema({\n \"code\": 0,\n \"msg\": \"success\",\n \"data\": {\n \"videoSrc\": validate.any(None, \"\", validate.url()),\n },\n })\n\n def _get_streams(self):\n res = self.session.http.get(\n self._api_url.format(self.match.group(1)),\n allow_redirects=True,\n headers={\"User-Agent\": useragents.IPHONE_6},\n )\n data = self.session.http.json(res, schema=self._video_info_schema)\n videourl = data[\"data\"][\"videoSrc\"]\n if videourl:\n yield \"live\", HLSStream(self.session, videourl)\n\n\n__plugin__ = Bigo\n", "path": "src/streamlink/plugins/bigo.py"}], "after_files": [{"content": "\"\"\"\n$description Global live-streaming platform for live video game broadcasts and individual live streams.\n$url bigo.tv\n$type live\n$metadata id\n$metadata author\n$metadata category\n$metadata title\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?bigo\\.tv/(?P<site_id>[^/]+)$\",\n))\nclass Bigo(Plugin):\n _URL_API = \"https://ta.bigo.tv/official_website/studio/getInternalStudioInfo\"\n\n def _get_streams(self):\n self.id, self.author, self.category, self.title, hls_url = self.session.http.post(\n self._URL_API,\n params={\n \"siteId\": self.match[\"site_id\"],\n \"verify\": \"\",\n },\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"code\": 0,\n \"msg\": \"success\",\n \"data\": {\n \"roomId\": validate.any(None, str),\n \"clientBigoId\": validate.any(None, str),\n \"gameTitle\": str,\n \"roomTopic\": str,\n \"hls_src\": validate.any(None, \"\", validate.url()),\n },\n },\n validate.union_get(\n (\"data\", \"roomId\"),\n (\"data\", \"clientBigoId\"),\n (\"data\", \"gameTitle\"),\n (\"data\", \"roomTopic\"),\n (\"data\", \"hls_src\"),\n ),\n ),\n )\n\n if not self.id:\n return\n\n if not hls_url:\n log.info(\"Channel is offline\")\n return\n\n yield \"live\", HLSStream(self.session, hls_url)\n\n\n__plugin__ = Bigo\n", "path": "src/streamlink/plugins/bigo.py"}]}
| 866 | 724 |
gh_patches_debug_10737
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1137
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash on starlette middleware in mount endpoints (StaticFiles)
**Describe the bug**: Elastic middleware crash on access to static files
**Environment (please complete the following information)**
- OS: [e.g. Linux]
- Python version: 3.9.4
- Framework and version [e.g. Django 2.1]: FastAPI
- APM Server version: 7.12.1
- Agent version: 7.12.1
```python
from __future__ import annotations
import uvicorn
from elasticapm.contrib.starlette import ElasticAPM, make_apm_client
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
app = FastAPI(routes=[])
apm_config = {
"SERVICE_NAME": "testing",
"ENABLED": True,
"SERVER_URL": "http://localhost:8200",
"CAPTURE_HEADERS": True,
"CAPTURE_BODY": "all",
}
apm = make_apm_client(apm_config)
app.add_middleware(ElasticAPM, client=apm)
app.mount("/static",StaticFiles(directory="/"),name="static")
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", debug=False)
```
The issue is happens when elastic running the function _get_route_name.
the middleware will try to loop over the routes where starlette will return None in some cases:
```python
File "/home/syniex/.local/lib/python3.9/site-packages/elasticapm/contrib/starlette/__init__.py", line 233, in _get_route_name
for route in routes:
└ None
TypeError: 'NoneType' object is not iterable
```
Starlette code that makes the issue:
```python
@property
def routes(self) -> typing.List[BaseRoute]:
return getattr(self.app, "routes", None)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/contrib/starlette/__init__.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 from __future__ import absolute_import
33
34 from typing import Dict, Optional
35
36 import starlette
37 from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
38 from starlette.requests import Request
39 from starlette.responses import Response
40 from starlette.routing import Match, Mount
41 from starlette.types import ASGIApp
42
43 import elasticapm
44 import elasticapm.instrumentation.control
45 from elasticapm.base import Client
46 from elasticapm.conf import constants
47 from elasticapm.contrib.asyncio.traces import set_context
48 from elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response
49 from elasticapm.utils.disttracing import TraceParent
50 from elasticapm.utils.logging import get_logger
51
52 logger = get_logger("elasticapm.errors.client")
53
54
55 def make_apm_client(config: Optional[Dict] = None, client_cls=Client, **defaults) -> Client:
56 """Builds ElasticAPM client.
57
58 Args:
59 config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.
60 client_cls (Client): Must be Client or its child.
61 **defaults: Additional parameters for Client. See `elasticapm.base.Client`
62
63 Returns:
64 Client
65 """
66 if "framework_name" not in defaults:
67 defaults["framework_name"] = "starlette"
68 defaults["framework_version"] = starlette.__version__
69
70 return client_cls(config, **defaults)
71
72
73 class ElasticAPM(BaseHTTPMiddleware):
74 """
75 Starlette / FastAPI middleware for Elastic APM capturing.
76
77 >>> elasticapm = make_apm_client({
78 >>> 'SERVICE_NAME': 'myapp',
79 >>> 'DEBUG': True,
80 >>> 'SERVER_URL': 'http://localhost:8200',
81 >>> 'CAPTURE_HEADERS': True,
82 >>> 'CAPTURE_BODY': 'all'
83 >>> })
84
85 >>> app.add_middleware(ElasticAPM, client=elasticapm)
86
87 Pass an arbitrary APP_NAME and SECRET_TOKEN::
88
89 >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')
90
91 Pass an explicit client::
92
93 >>> elasticapm = ElasticAPM(app, client=client)
94
95 Automatically configure logging::
96
97 >>> elasticapm = ElasticAPM(app, logging=True)
98
99 Capture an exception::
100
101 >>> try:
102 >>> 1 / 0
103 >>> except ZeroDivisionError:
104 >>> elasticapm.capture_exception()
105
106 Capture a message::
107
108 >>> elasticapm.capture_message('hello, world!')
109 """
110
111 def __init__(self, app: ASGIApp, client: Client):
112 """
113
114 Args:
115 app (ASGIApp): Starlette app
116 client (Client): ElasticAPM Client
117 """
118 self.client = client
119
120 if self.client.config.instrument and self.client.config.enabled:
121 elasticapm.instrumentation.control.instrument()
122
123 super().__init__(app)
124
125 async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
126 """Processes the whole request APM capturing.
127
128 Args:
129 request (Request)
130 call_next (RequestResponseEndpoint): Next request process in Starlette.
131
132 Returns:
133 Response
134 """
135 await self._request_started(request)
136
137 try:
138 response = await call_next(request)
139 elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)
140 except Exception:
141 await self.capture_exception(
142 context={"request": await get_data_from_request(request, self.client.config, constants.ERROR)}
143 )
144 elasticapm.set_transaction_result("HTTP 5xx", override=False)
145 elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)
146 elasticapm.set_context({"status_code": 500}, "response")
147
148 raise
149 else:
150 await self._request_finished(response)
151 finally:
152 self.client.end_transaction()
153
154 return response
155
156 async def capture_exception(self, *args, **kwargs):
157 """Captures your exception.
158
159 Args:
160 *args:
161 **kwargs:
162 """
163 self.client.capture_exception(*args, **kwargs)
164
165 async def capture_message(self, *args, **kwargs):
166 """Captures your message.
167
168 Args:
169 *args: Whatever
170 **kwargs: Whatever
171 """
172 self.client.capture_message(*args, **kwargs)
173
174 async def _request_started(self, request: Request):
175 """Captures the begin of the request processing to APM.
176
177 Args:
178 request (Request)
179 """
180 # When we consume the body, we replace the streaming mechanism with
181 # a mocked version -- this workaround came from
182 # https://github.com/encode/starlette/issues/495#issuecomment-513138055
183 # and we call the workaround here to make sure that regardless of
184 # `capture_body` settings, we will have access to the body if we need it.
185 if self.client.config.capture_body != "off":
186 await get_body(request)
187
188 if not self.client.should_ignore_url(request.url.path):
189 trace_parent = TraceParent.from_headers(dict(request.headers))
190 self.client.begin_transaction("request", trace_parent=trace_parent)
191
192 await set_context(
193 lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), "request"
194 )
195 transaction_name = self.get_route_name(request) or request.url.path
196 elasticapm.set_transaction_name("{} {}".format(request.method, transaction_name), override=False)
197
198 async def _request_finished(self, response: Response):
199 """Captures the end of the request processing to APM.
200
201 Args:
202 response (Response)
203 """
204 await set_context(
205 lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), "response"
206 )
207
208 result = "HTTP {}xx".format(response.status_code // 100)
209 elasticapm.set_transaction_result(result, override=False)
210
211 def get_route_name(self, request: Request) -> str:
212 app = request.app
213 scope = request.scope
214 routes = app.routes
215 route_name = self._get_route_name(scope, routes)
216
217 # Starlette magically redirects requests if the path matches a route name with a trailing slash
218 # appended or removed. To not spam the transaction names list, we do the same here and put these
219 # redirects all in the same "redirect trailing slashes" transaction name
220 if not route_name and app.router.redirect_slashes and scope["path"] != "/":
221 redirect_scope = dict(scope)
222 if scope["path"].endswith("/"):
223 redirect_scope["path"] = scope["path"][:-1]
224 trim = True
225 else:
226 redirect_scope["path"] = scope["path"] + "/"
227 trim = False
228
229 route_name = self._get_route_name(redirect_scope, routes)
230 if route_name is not None:
231 route_name = route_name + "/" if trim else route_name[:-1]
232 return route_name
233
234 def _get_route_name(self, scope, routes, route_name=None):
235 for route in routes:
236 match, child_scope = route.matches(scope)
237 if match == Match.FULL:
238 route_name = route.path
239 child_scope = {**scope, **child_scope}
240 if isinstance(route, Mount):
241 child_route_name = self._get_route_name(child_scope, route.routes, route_name)
242 if child_route_name is None:
243 route_name = None
244 else:
245 route_name += child_route_name
246 return route_name
247 elif match == Match.PARTIAL and route_name is None:
248 route_name = route.path
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py
--- a/elasticapm/contrib/starlette/__init__.py
+++ b/elasticapm/contrib/starlette/__init__.py
@@ -237,7 +237,7 @@
if match == Match.FULL:
route_name = route.path
child_scope = {**scope, **child_scope}
- if isinstance(route, Mount):
+ if isinstance(route, Mount) and route.routes:
child_route_name = self._get_route_name(child_scope, route.routes, route_name)
if child_route_name is None:
route_name = None
|
{"golden_diff": "diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py\n--- a/elasticapm/contrib/starlette/__init__.py\n+++ b/elasticapm/contrib/starlette/__init__.py\n@@ -237,7 +237,7 @@\n if match == Match.FULL:\n route_name = route.path\n child_scope = {**scope, **child_scope}\n- if isinstance(route, Mount):\n+ if isinstance(route, Mount) and route.routes:\n child_route_name = self._get_route_name(child_scope, route.routes, route_name)\n if child_route_name is None:\n route_name = None\n", "issue": "Crash on starlette middleware in mount endpoints (StaticFiles)\n**Describe the bug**: Elastic middleware crash on access to static files\r\n\r\n**Environment (please complete the following information)**\r\n- OS: [e.g. Linux]\r\n- Python version: 3.9.4\r\n- Framework and version [e.g. Django 2.1]: FastAPI\r\n- APM Server version: 7.12.1\r\n- Agent version: 7.12.1\r\n\r\n```python\r\nfrom __future__ import annotations\r\n\r\nimport uvicorn\r\nfrom elasticapm.contrib.starlette import ElasticAPM, make_apm_client\r\nfrom fastapi import FastAPI\r\nfrom fastapi.staticfiles import StaticFiles\r\n\r\napp = FastAPI(routes=[])\r\napm_config = {\r\n \"SERVICE_NAME\": \"testing\",\r\n \"ENABLED\": True,\r\n \"SERVER_URL\": \"http://localhost:8200\",\r\n \"CAPTURE_HEADERS\": True,\r\n \"CAPTURE_BODY\": \"all\",\r\n}\r\napm = make_apm_client(apm_config)\r\napp.add_middleware(ElasticAPM, client=apm)\r\napp.mount(\"/static\",StaticFiles(directory=\"/\"),name=\"static\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n uvicorn.run(app, host=\"0.0.0.0\", debug=False)\r\n```\r\n\r\nThe issue is happens when elastic running the function _get_route_name.\r\nthe middleware will try to loop over the routes where starlette will return None in some cases:\r\n```python\r\n File \"/home/syniex/.local/lib/python3.9/site-packages/elasticapm/contrib/starlette/__init__.py\", line 233, in _get_route_name\r\n for route in routes:\r\n \u2514 None\r\n\r\nTypeError: 'NoneType' object is not iterable\r\n\r\n```\r\nStarlette code that makes the issue:\r\n```python\r\n@property\r\n def routes(self) -> typing.List[BaseRoute]:\r\n return getattr(self.app, \"routes\", None)\r\n```\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nfrom typing import Dict, Optional\n\nimport starlette\nfrom starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import Match, Mount\nfrom starlette.types import ASGIApp\n\nimport elasticapm\nimport elasticapm.instrumentation.control\nfrom elasticapm.base import Client\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.asyncio.traces import set_context\nfrom elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.errors.client\")\n\n\ndef make_apm_client(config: Optional[Dict] = None, client_cls=Client, **defaults) -> Client:\n \"\"\"Builds ElasticAPM client.\n\n Args:\n config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.\n client_cls (Client): Must be Client or its child.\n **defaults: Additional parameters for Client. See `elasticapm.base.Client`\n\n Returns:\n Client\n \"\"\"\n if \"framework_name\" not in defaults:\n defaults[\"framework_name\"] = \"starlette\"\n defaults[\"framework_version\"] = starlette.__version__\n\n return client_cls(config, **defaults)\n\n\nclass ElasticAPM(BaseHTTPMiddleware):\n \"\"\"\n Starlette / FastAPI middleware for Elastic APM capturing.\n\n >>> elasticapm = make_apm_client({\n >>> 'SERVICE_NAME': 'myapp',\n >>> 'DEBUG': True,\n >>> 'SERVER_URL': 'http://localhost:8200',\n >>> 'CAPTURE_HEADERS': True,\n >>> 'CAPTURE_BODY': 'all'\n >>> })\n\n >>> app.add_middleware(ElasticAPM, client=elasticapm)\n\n Pass an arbitrary APP_NAME and SECRET_TOKEN::\n\n >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')\n\n Pass an explicit client::\n\n >>> elasticapm = ElasticAPM(app, client=client)\n\n Automatically configure logging::\n\n >>> elasticapm = ElasticAPM(app, logging=True)\n\n Capture an exception::\n\n >>> try:\n >>> 1 / 0\n >>> except ZeroDivisionError:\n >>> elasticapm.capture_exception()\n\n Capture a message::\n\n >>> elasticapm.capture_message('hello, world!')\n \"\"\"\n\n def __init__(self, app: ASGIApp, client: Client):\n \"\"\"\n\n Args:\n app (ASGIApp): Starlette app\n client (Client): ElasticAPM Client\n \"\"\"\n self.client = client\n\n if self.client.config.instrument and self.client.config.enabled:\n elasticapm.instrumentation.control.instrument()\n\n super().__init__(app)\n\n async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:\n \"\"\"Processes the whole request APM capturing.\n\n Args:\n request (Request)\n call_next (RequestResponseEndpoint): Next request process in Starlette.\n\n Returns:\n Response\n \"\"\"\n await self._request_started(request)\n\n try:\n response = await call_next(request)\n elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)\n except Exception:\n await self.capture_exception(\n context={\"request\": await get_data_from_request(request, self.client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n raise\n else:\n await self._request_finished(response)\n finally:\n self.client.end_transaction()\n\n return response\n\n async def capture_exception(self, *args, **kwargs):\n \"\"\"Captures your exception.\n\n Args:\n *args:\n **kwargs:\n \"\"\"\n self.client.capture_exception(*args, **kwargs)\n\n async def capture_message(self, *args, **kwargs):\n \"\"\"Captures your message.\n\n Args:\n *args: Whatever\n **kwargs: Whatever\n \"\"\"\n self.client.capture_message(*args, **kwargs)\n\n async def _request_started(self, request: Request):\n \"\"\"Captures the begin of the request processing to APM.\n\n Args:\n request (Request)\n \"\"\"\n # When we consume the body, we replace the streaming mechanism with\n # a mocked version -- this workaround came from\n # https://github.com/encode/starlette/issues/495#issuecomment-513138055\n # and we call the workaround here to make sure that regardless of\n # `capture_body` settings, we will have access to the body if we need it.\n if self.client.config.capture_body != \"off\":\n await get_body(request)\n\n if not self.client.should_ignore_url(request.url.path):\n trace_parent = TraceParent.from_headers(dict(request.headers))\n self.client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n await set_context(\n lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), \"request\"\n )\n transaction_name = self.get_route_name(request) or request.url.path\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, transaction_name), override=False)\n\n async def _request_finished(self, response: Response):\n \"\"\"Captures the end of the request processing to APM.\n\n Args:\n response (Response)\n \"\"\"\n await set_context(\n lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), \"response\"\n )\n\n result = \"HTTP {}xx\".format(response.status_code // 100)\n elasticapm.set_transaction_result(result, override=False)\n\n def get_route_name(self, request: Request) -> str:\n app = request.app\n scope = request.scope\n routes = app.routes\n route_name = self._get_route_name(scope, routes)\n\n # Starlette magically redirects requests if the path matches a route name with a trailing slash\n # appended or removed. To not spam the transaction names list, we do the same here and put these\n # redirects all in the same \"redirect trailing slashes\" transaction name\n if not route_name and app.router.redirect_slashes and scope[\"path\"] != \"/\":\n redirect_scope = dict(scope)\n if scope[\"path\"].endswith(\"/\"):\n redirect_scope[\"path\"] = scope[\"path\"][:-1]\n trim = True\n else:\n redirect_scope[\"path\"] = scope[\"path\"] + \"/\"\n trim = False\n\n route_name = self._get_route_name(redirect_scope, routes)\n if route_name is not None:\n route_name = route_name + \"/\" if trim else route_name[:-1]\n return route_name\n\n def _get_route_name(self, scope, routes, route_name=None):\n for route in routes:\n match, child_scope = route.matches(scope)\n if match == Match.FULL:\n route_name = route.path\n child_scope = {**scope, **child_scope}\n if isinstance(route, Mount):\n child_route_name = self._get_route_name(child_scope, route.routes, route_name)\n if child_route_name is None:\n route_name = None\n else:\n route_name += child_route_name\n return route_name\n elif match == Match.PARTIAL and route_name is None:\n route_name = route.path\n", "path": "elasticapm/contrib/starlette/__init__.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nfrom typing import Dict, Optional\n\nimport starlette\nfrom starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import Match, Mount\nfrom starlette.types import ASGIApp\n\nimport elasticapm\nimport elasticapm.instrumentation.control\nfrom elasticapm.base import Client\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.asyncio.traces import set_context\nfrom elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.errors.client\")\n\n\ndef make_apm_client(config: Optional[Dict] = None, client_cls=Client, **defaults) -> Client:\n \"\"\"Builds ElasticAPM client.\n\n Args:\n config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.\n client_cls (Client): Must be Client or its child.\n **defaults: Additional parameters for Client. See `elasticapm.base.Client`\n\n Returns:\n Client\n \"\"\"\n if \"framework_name\" not in defaults:\n defaults[\"framework_name\"] = \"starlette\"\n defaults[\"framework_version\"] = starlette.__version__\n\n return client_cls(config, **defaults)\n\n\nclass ElasticAPM(BaseHTTPMiddleware):\n \"\"\"\n Starlette / FastAPI middleware for Elastic APM capturing.\n\n >>> elasticapm = make_apm_client({\n >>> 'SERVICE_NAME': 'myapp',\n >>> 'DEBUG': True,\n >>> 'SERVER_URL': 'http://localhost:8200',\n >>> 'CAPTURE_HEADERS': True,\n >>> 'CAPTURE_BODY': 'all'\n >>> })\n\n >>> app.add_middleware(ElasticAPM, client=elasticapm)\n\n Pass an arbitrary APP_NAME and SECRET_TOKEN::\n\n >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')\n\n Pass an explicit client::\n\n >>> elasticapm = ElasticAPM(app, client=client)\n\n Automatically configure logging::\n\n >>> elasticapm = ElasticAPM(app, logging=True)\n\n Capture an exception::\n\n >>> try:\n >>> 1 / 0\n >>> except ZeroDivisionError:\n >>> elasticapm.capture_exception()\n\n Capture a message::\n\n >>> elasticapm.capture_message('hello, world!')\n \"\"\"\n\n def __init__(self, app: ASGIApp, client: Client):\n \"\"\"\n\n Args:\n app (ASGIApp): Starlette app\n client (Client): ElasticAPM Client\n \"\"\"\n self.client = client\n\n if self.client.config.instrument and self.client.config.enabled:\n elasticapm.instrumentation.control.instrument()\n\n super().__init__(app)\n\n async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:\n \"\"\"Processes the whole request APM capturing.\n\n Args:\n request (Request)\n call_next (RequestResponseEndpoint): Next request process in Starlette.\n\n Returns:\n Response\n \"\"\"\n await self._request_started(request)\n\n try:\n response = await call_next(request)\n elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)\n except Exception:\n await self.capture_exception(\n context={\"request\": await get_data_from_request(request, self.client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n raise\n else:\n await self._request_finished(response)\n finally:\n self.client.end_transaction()\n\n return response\n\n async def capture_exception(self, *args, **kwargs):\n \"\"\"Captures your exception.\n\n Args:\n *args:\n **kwargs:\n \"\"\"\n self.client.capture_exception(*args, **kwargs)\n\n async def capture_message(self, *args, **kwargs):\n \"\"\"Captures your message.\n\n Args:\n *args: Whatever\n **kwargs: Whatever\n \"\"\"\n self.client.capture_message(*args, **kwargs)\n\n async def _request_started(self, request: Request):\n \"\"\"Captures the begin of the request processing to APM.\n\n Args:\n request (Request)\n \"\"\"\n # When we consume the body, we replace the streaming mechanism with\n # a mocked version -- this workaround came from\n # https://github.com/encode/starlette/issues/495#issuecomment-513138055\n # and we call the workaround here to make sure that regardless of\n # `capture_body` settings, we will have access to the body if we need it.\n if self.client.config.capture_body != \"off\":\n await get_body(request)\n\n if not self.client.should_ignore_url(request.url.path):\n trace_parent = TraceParent.from_headers(dict(request.headers))\n self.client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n await set_context(\n lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), \"request\"\n )\n transaction_name = self.get_route_name(request) or request.url.path\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, transaction_name), override=False)\n\n async def _request_finished(self, response: Response):\n \"\"\"Captures the end of the request processing to APM.\n\n Args:\n response (Response)\n \"\"\"\n await set_context(\n lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), \"response\"\n )\n\n result = \"HTTP {}xx\".format(response.status_code // 100)\n elasticapm.set_transaction_result(result, override=False)\n\n def get_route_name(self, request: Request) -> str:\n app = request.app\n scope = request.scope\n routes = app.routes\n route_name = self._get_route_name(scope, routes)\n\n # Starlette magically redirects requests if the path matches a route name with a trailing slash\n # appended or removed. To not spam the transaction names list, we do the same here and put these\n # redirects all in the same \"redirect trailing slashes\" transaction name\n if not route_name and app.router.redirect_slashes and scope[\"path\"] != \"/\":\n redirect_scope = dict(scope)\n if scope[\"path\"].endswith(\"/\"):\n redirect_scope[\"path\"] = scope[\"path\"][:-1]\n trim = True\n else:\n redirect_scope[\"path\"] = scope[\"path\"] + \"/\"\n trim = False\n\n route_name = self._get_route_name(redirect_scope, routes)\n if route_name is not None:\n route_name = route_name + \"/\" if trim else route_name[:-1]\n return route_name\n\n def _get_route_name(self, scope, routes, route_name=None):\n for route in routes:\n match, child_scope = route.matches(scope)\n if match == Match.FULL:\n route_name = route.path\n child_scope = {**scope, **child_scope}\n if isinstance(route, Mount) and route.routes:\n child_route_name = self._get_route_name(child_scope, route.routes, route_name)\n if child_route_name is None:\n route_name = None\n else:\n route_name += child_route_name\n return route_name\n elif match == Match.PARTIAL and route_name is None:\n route_name = route.path\n", "path": "elasticapm/contrib/starlette/__init__.py"}]}
| 3,321 | 154 |
gh_patches_debug_1741
|
rasdani/github-patches
|
git_diff
|
xonsh__xonsh-1265
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Amalgamate build on Anaconda.org fails
It builds fine on my machine so I am not really sure what is going on here:
https://anaconda.org/xonsh/xonsh/builds/403/0
``` bash
+ python setup.py install --single-version-externally-managed --record=record.txt
Traceback (most recent call last):
File "setup.py", line 210, in <module>
main()
File "setup.py", line 206, in main
setup(**skw)
File "/opt/miniconda/envs/_build/lib/python3.4/distutils/core.py", line 148, in setup
dist.run_commands()
File "/opt/miniconda/envs/_build/lib/python3.4/distutils/dist.py", line 955, in run_commands
self.run_command(cmd)
File "/opt/miniconda/envs/_build/lib/python3.4/distutils/dist.py", line 974, in run_command
cmd_obj.run()
File "setup.py", line 99, in run
build_tables()
File "setup.py", line 59, in build_tables
amalgamate.main(['amalgamate', '--debug=XONSH_DEBUG', 'xonsh'])
File "/opt/miniconda/conda-bld/work/amalgamate.py", line 369, in main
graph = make_graph(pkg, exclude=exclude)
File "/opt/miniconda/conda-bld/work/amalgamate.py", line 94, in make_graph
graph[base] = make_node(base, pkg, allowed)
File "/opt/miniconda/conda-bld/work/amalgamate.py", line 54, in make_node
raw = SOURCES[pkg, name]
File "/opt/miniconda/conda-bld/work/amalgamate.py", line 39, in __getitem__
raw = f.read()
File "/opt/miniconda/envs/_build/lib/python3.4/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc7 in position 2102: ordinal not in range(128)
Command failed: /bin/bash -x -e /opt/miniconda/conda-bld/work/conda_build.sh
discarding /home/dev/env/bin from PATH
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/platform.py`
Content:
```
1 """Module for platform-specific constants and implementations, as well as
2 compatibility layers to make use of the 'best' implementation available
3 on a platform.
4 """
5 import os
6 import sys
7 import pathlib
8 import platform
9 import functools
10 import subprocess
11 import importlib.util
12
13 from xonsh.lazyasd import LazyObject, LazyBool
14
15 def _distro():
16 try:
17 import distro as d
18 except ImportError:
19 d = None
20 except:
21 raise
22 return d
23
24
25 distro = LazyObject(_distro, globals(), 'distro')
26 del _distro
27
28 # do not import any xonsh-modules here to avoid circular dependencies
29
30
31 #
32 # OS
33 #
34 ON_DARWIN = LazyBool(lambda: platform.system() == 'Darwin',
35 globals(), 'ON_DARWIN')
36 """ ``True`` if executed on a Darwin platform, else ``False``. """
37 ON_LINUX = LazyBool(lambda: platform.system() == 'Linux',
38 globals(), 'ON_LINUX')
39 """ ``True`` if executed on a Linux platform, else ``False``. """
40 ON_WINDOWS = LazyBool(lambda: platform.system() == 'Windows',
41 globals(), 'ON_WINDOWS')
42 """ ``True`` if executed on a native Windows platform, else ``False``. """
43 ON_CYGWIN = LazyBool(lambda: sys.platform == 'cygwin', globals(), 'ON_CYGWIN')
44 """ ``True`` if executed on a Cygwin Windows platform, else ``False``. """
45 ON_POSIX = LazyBool(lambda: (os.name == 'posix'), globals(), 'ON_POSIX')
46 """ ``True`` if executed on a POSIX-compliant platform, else ``False``. """
47
48
49 #
50 # Python & packages
51 #
52
53 PYTHON_VERSION_INFO = sys.version_info[:3]
54 """ Version of Python interpreter as three-value tuple. """
55 ON_ANACONDA = LazyBool(
56 lambda: any(s in sys.version for s in {'Anaconda', 'Continuum'}),
57 globals(), 'ON_ANACONDA')
58 """ ``True`` if executed in an Anaconda instance, else ``False``. """
59
60 def _has_pygments():
61 spec = importlib.util.find_spec('pygments')
62 return (spec is not None)
63
64
65 HAS_PYGMENTS = LazyBool(_has_pygments, globals(), 'HAS_PYGMENTS')
66 """ ``True`` if `pygments` is available, else ``False``. """
67 del _has_pygments
68
69
70 @functools.lru_cache(1)
71 def pygments_version():
72 """pygments.__version__ version if available, else Ǹone."""
73 if HAS_PYGMENTS:
74 import pygments
75 v = pygments.__version__
76 else:
77 v = None
78 return v
79
80
81 @functools.lru_cache(1)
82 def has_prompt_toolkit():
83 """ Tests if the `prompt_toolkit` is available. """
84 spec = importlib.util.find_spec('pygments')
85 return (spec is not None)
86
87
88 @functools.lru_cache(1)
89 def ptk_version():
90 """ Returns `prompt_toolkit.__version__` if available, else ``None``. """
91 if has_prompt_toolkit():
92 import prompt_toolkit
93 return getattr(prompt_toolkit, '__version__', '<0.57')
94 else:
95 return None
96
97
98 @functools.lru_cache(1)
99 def ptk_version_info():
100 """ Returns `prompt_toolkit`'s version as tuple of integers. """
101 if has_prompt_toolkit():
102 return tuple(int(x) for x in ptk_version().strip('<>+-=.').split('.'))
103 else:
104 return None
105
106
107 @functools.lru_cache(1)
108 def best_shell_type():
109 if ON_WINDOWS or has_prompt_toolkit():
110 return 'prompt_toolkit'
111 else:
112 return 'readline'
113
114
115 @functools.lru_cache(1)
116 def is_readline_available():
117 """Checks if readline is available to import."""
118 spec = importlib.util.find_spec('readline')
119 return (spec is not None)
120
121
122 #
123 # Encoding
124 #
125
126 DEFAULT_ENCODING = sys.getdefaultencoding()
127 """ Default string encoding. """
128
129
130 if PYTHON_VERSION_INFO < (3, 5, 0):
131 class DirEntry:
132 def __init__(self, directory, name):
133 self.__path__ = pathlib.Path(directory) / name
134 self.name = name
135 self.path = str(self.__path__)
136 self.is_symlink = self.__path__.is_symlink
137
138 def inode(self):
139 return os.stat(self.path, follow_symlinks=False).st_ino
140
141 def is_dir(self, *, follow_symlinks=True):
142 if follow_symlinks:
143 return self.__path__.is_dir()
144 else:
145 return not self.__path__.is_symlink() \
146 and self.__path__.is_dir()
147
148 def is_file(self, *, follow_symlinks=True):
149 if follow_symlinks:
150 return self.__path__.is_file()
151 else:
152 return not self.__path__.is_symlink() \
153 and self.__path__.is_file()
154
155 def stat(self, *, follow_symlinks=True):
156 return os.stat(self.path, follow_symlinks=follow_symlinks)
157
158 def scandir(path):
159 """ Compatibility layer for `os.scandir` from Python 3.5+. """
160 return (DirEntry(path, x) for x in os.listdir(path))
161 else:
162 scandir = os.scandir
163
164
165 #
166 # Linux distro
167 #
168
169 @functools.lru_cache(1)
170 def linux_distro():
171 """The id of the Linux distribution running on, possibly 'unknown'.
172 None on non-Linux platforms.
173 """
174 if ON_LINUX:
175 if distro:
176 ld = distro.id()
177 elif PYTHON_VERSION_INFO < (3, 7, 0):
178 ld = platform.linux_distribution()[0] or 'unknown'
179 elif '-ARCH-' in platform.platform():
180 ld = 'arch' # that's the only one we need to know for now
181 else:
182 ld = 'unknown'
183 else:
184 ld = None
185 return ld
186
187
188 #
189 # Windows
190 #
191
192 @functools.lru_cache(1)
193 def git_for_windows_path():
194 """Returns the path to git for windows, if available and None otherwise."""
195 import winreg
196 try:
197 key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
198 'SOFTWARE\\GitForWindows')
199 gfwp, _ = winreg.QueryValueEx(key, "InstallPath")
200 except FileNotFoundError:
201 gfwp = None
202 return gfwp
203
204
205 @functools.lru_cache(1)
206 def windows_bash_command():
207 """Determines teh command for Bash on windows."""
208 import winreg
209 # Check that bash is on path otherwise try the default directory
210 # used by Git for windows
211 wbc = 'bash'
212 try:
213 subprocess.check_call([wbc, '--version'],
214 stdout=subprocess.PIPE,
215 stderr=subprocess.PIPE)
216 except (FileNotFoundError, subprocess.CalledProcessError):
217 gfwp = git_for_windows_path()
218 if gfwp:
219 bashcmd = os.path.join(gfwp, 'bin\\bash.exe')
220 if os.path.isfile(bashcmd):
221 wbc = bashcmd
222 return wbc
223
224 #
225 # Environment variables defaults
226 #
227
228 def _bcd():
229 """A possibly empty tuple with default paths to Bash completions known for
230 the current platform.
231 """
232 if ON_LINUX or ON_CYGWIN:
233 if linux_distro() == 'arch':
234 bcd = (
235 '/usr/share/bash-completion/bash_completion',
236 '/usr/share/bash-completion/completions')
237 else:
238 bcd = ('/usr/share/bash-completion',
239 '/usr/share/bash-completion/completions')
240 elif ON_DARWIN:
241 bcd = ('/usr/local/etc/bash_completion',
242 '/opt/local/etc/profile.d/bash_completion.sh')
243 elif ON_WINDOWS and git_for_windows_path():
244 bcd = (os.path.join(git_for_windows_path(),
245 'usr\\share\\bash-completion'),
246 os.path.join(git_for_windows_path(),
247 'usr\\share\\bash-completion\\completions'),
248 os.path.join(git_for_windows_path(),
249 'mingw64\\share\\git\\completion\\git-completion.bash'))
250 else:
251 bcd = ()
252 return bcd
253
254
255 BASH_COMPLETIONS_DEFAULT = LazyObject(_bcd, globals(),
256 'BASH_COMPLETIONS_DEFAULT')
257 del _bcd
258
259
260 def _pd():
261 if ON_LINUX or ON_CYGWIN:
262 if linux_distro() == 'arch':
263 pd = ('/usr/local/sbin',
264 '/usr/local/bin', '/usr/bin', '/usr/bin/site_perl',
265 '/usr/bin/vendor_perl', '/usr/bin/core_perl')
266 else:
267 pd = (os.path.expanduser('~/bin'), '/usr/local/sbin',
268 '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin', '/bin',
269 '/usr/games', '/usr/local/games')
270 elif ON_DARWIN:
271 pd = ('/usr/local/bin', '/usr/bin', '/bin', '/usr/sbin', '/sbin')
272 elif ON_WINDOWS:
273 import winreg
274 key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
275 r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment')
276 pd = tuple(winreg.QueryValueEx(key, 'Path')[0].split(os.pathsep))
277 else:
278 pd = ()
279 return pd
280
281 PATH_DEFAULT = LazyObject(_pd, globals(), 'PATH_DEFAULT')
282 del _pd
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/xonsh/platform.py b/xonsh/platform.py
--- a/xonsh/platform.py
+++ b/xonsh/platform.py
@@ -69,7 +69,7 @@
@functools.lru_cache(1)
def pygments_version():
- """pygments.__version__ version if available, else Ǹone."""
+ """pygments.__version__ version if available, else None."""
if HAS_PYGMENTS:
import pygments
v = pygments.__version__
|
{"golden_diff": "diff --git a/xonsh/platform.py b/xonsh/platform.py\n--- a/xonsh/platform.py\n+++ b/xonsh/platform.py\n@@ -69,7 +69,7 @@\n \n @functools.lru_cache(1)\n def pygments_version():\n- \"\"\"pygments.__version__ version if available, else \u01f8one.\"\"\"\n+ \"\"\"pygments.__version__ version if available, else None.\"\"\"\n if HAS_PYGMENTS:\n import pygments\n v = pygments.__version__\n", "issue": "Amalgamate build on Anaconda.org fails\nIt builds fine on my machine so I am not really sure what is going on here: \n\nhttps://anaconda.org/xonsh/xonsh/builds/403/0\n\n``` bash\n+ python setup.py install --single-version-externally-managed --record=record.txt\nTraceback (most recent call last):\n File \"setup.py\", line 210, in <module>\n main()\n File \"setup.py\", line 206, in main\n setup(**skw)\n File \"/opt/miniconda/envs/_build/lib/python3.4/distutils/core.py\", line 148, in setup\n dist.run_commands()\n File \"/opt/miniconda/envs/_build/lib/python3.4/distutils/dist.py\", line 955, in run_commands\n self.run_command(cmd)\n File \"/opt/miniconda/envs/_build/lib/python3.4/distutils/dist.py\", line 974, in run_command\n cmd_obj.run()\n File \"setup.py\", line 99, in run\n build_tables()\n File \"setup.py\", line 59, in build_tables\n amalgamate.main(['amalgamate', '--debug=XONSH_DEBUG', 'xonsh'])\n File \"/opt/miniconda/conda-bld/work/amalgamate.py\", line 369, in main\n graph = make_graph(pkg, exclude=exclude)\n File \"/opt/miniconda/conda-bld/work/amalgamate.py\", line 94, in make_graph\n graph[base] = make_node(base, pkg, allowed)\n File \"/opt/miniconda/conda-bld/work/amalgamate.py\", line 54, in make_node\n raw = SOURCES[pkg, name]\n File \"/opt/miniconda/conda-bld/work/amalgamate.py\", line 39, in __getitem__\n raw = f.read()\n File \"/opt/miniconda/envs/_build/lib/python3.4/encodings/ascii.py\", line 26, in decode\n return codecs.ascii_decode(input, self.errors)[0]\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xc7 in position 2102: ordinal not in range(128)\nCommand failed: /bin/bash -x -e /opt/miniconda/conda-bld/work/conda_build.sh\ndiscarding /home/dev/env/bin from PATH\n\n```\n\n", "before_files": [{"content": "\"\"\"Module for platform-specific constants and implementations, as well as\ncompatibility layers to make use of the 'best' implementation available\non a platform.\n\"\"\"\nimport os\nimport sys\nimport pathlib\nimport platform\nimport functools\nimport subprocess\nimport importlib.util\n\nfrom xonsh.lazyasd import LazyObject, LazyBool\n\ndef _distro():\n try:\n import distro as d\n except ImportError:\n d = None\n except:\n raise\n return d\n\n\ndistro = LazyObject(_distro, globals(), 'distro')\ndel _distro\n\n# do not import any xonsh-modules here to avoid circular dependencies\n\n\n#\n# OS\n#\nON_DARWIN = LazyBool(lambda: platform.system() == 'Darwin',\n globals(), 'ON_DARWIN')\n\"\"\" ``True`` if executed on a Darwin platform, else ``False``. \"\"\"\nON_LINUX = LazyBool(lambda: platform.system() == 'Linux',\n globals(), 'ON_LINUX')\n\"\"\" ``True`` if executed on a Linux platform, else ``False``. \"\"\"\nON_WINDOWS = LazyBool(lambda: platform.system() == 'Windows',\n globals(), 'ON_WINDOWS')\n\"\"\" ``True`` if executed on a native Windows platform, else ``False``. \"\"\"\nON_CYGWIN = LazyBool(lambda: sys.platform == 'cygwin', globals(), 'ON_CYGWIN')\n\"\"\" ``True`` if executed on a Cygwin Windows platform, else ``False``. \"\"\"\nON_POSIX = LazyBool(lambda: (os.name == 'posix'), globals(), 'ON_POSIX')\n\"\"\" ``True`` if executed on a POSIX-compliant platform, else ``False``. \"\"\"\n\n\n#\n# Python & packages\n#\n\nPYTHON_VERSION_INFO = sys.version_info[:3]\n\"\"\" Version of Python interpreter as three-value tuple. \"\"\"\nON_ANACONDA = LazyBool(\n lambda: any(s in sys.version for s in {'Anaconda', 'Continuum'}),\n globals(), 'ON_ANACONDA')\n\"\"\" ``True`` if executed in an Anaconda instance, else ``False``. \"\"\"\n\ndef _has_pygments():\n spec = importlib.util.find_spec('pygments')\n return (spec is not None)\n\n\nHAS_PYGMENTS = LazyBool(_has_pygments, globals(), 'HAS_PYGMENTS')\n\"\"\" ``True`` if `pygments` is available, else ``False``. \"\"\"\ndel _has_pygments\n\n\[email protected]_cache(1)\ndef pygments_version():\n \"\"\"pygments.__version__ version if available, else \u01f8one.\"\"\"\n if HAS_PYGMENTS:\n import pygments\n v = pygments.__version__\n else:\n v = None\n return v\n\n\[email protected]_cache(1)\ndef has_prompt_toolkit():\n \"\"\" Tests if the `prompt_toolkit` is available. \"\"\"\n spec = importlib.util.find_spec('pygments')\n return (spec is not None)\n\n\[email protected]_cache(1)\ndef ptk_version():\n \"\"\" Returns `prompt_toolkit.__version__` if available, else ``None``. \"\"\"\n if has_prompt_toolkit():\n import prompt_toolkit\n return getattr(prompt_toolkit, '__version__', '<0.57')\n else:\n return None\n\n\[email protected]_cache(1)\ndef ptk_version_info():\n \"\"\" Returns `prompt_toolkit`'s version as tuple of integers. \"\"\"\n if has_prompt_toolkit():\n return tuple(int(x) for x in ptk_version().strip('<>+-=.').split('.'))\n else:\n return None\n\n\[email protected]_cache(1)\ndef best_shell_type():\n if ON_WINDOWS or has_prompt_toolkit():\n return 'prompt_toolkit'\n else:\n return 'readline'\n\n\[email protected]_cache(1)\ndef is_readline_available():\n \"\"\"Checks if readline is available to import.\"\"\"\n spec = importlib.util.find_spec('readline')\n return (spec is not None)\n\n\n#\n# Encoding\n#\n\nDEFAULT_ENCODING = sys.getdefaultencoding()\n\"\"\" Default string encoding. \"\"\"\n\n\nif PYTHON_VERSION_INFO < (3, 5, 0):\n class DirEntry:\n def __init__(self, directory, name):\n self.__path__ = pathlib.Path(directory) / name\n self.name = name\n self.path = str(self.__path__)\n self.is_symlink = self.__path__.is_symlink\n\n def inode(self):\n return os.stat(self.path, follow_symlinks=False).st_ino\n\n def is_dir(self, *, follow_symlinks=True):\n if follow_symlinks:\n return self.__path__.is_dir()\n else:\n return not self.__path__.is_symlink() \\\n and self.__path__.is_dir()\n\n def is_file(self, *, follow_symlinks=True):\n if follow_symlinks:\n return self.__path__.is_file()\n else:\n return not self.__path__.is_symlink() \\\n and self.__path__.is_file()\n\n def stat(self, *, follow_symlinks=True):\n return os.stat(self.path, follow_symlinks=follow_symlinks)\n\n def scandir(path):\n \"\"\" Compatibility layer for `os.scandir` from Python 3.5+. \"\"\"\n return (DirEntry(path, x) for x in os.listdir(path))\nelse:\n scandir = os.scandir\n\n\n#\n# Linux distro\n#\n\[email protected]_cache(1)\ndef linux_distro():\n \"\"\"The id of the Linux distribution running on, possibly 'unknown'.\n None on non-Linux platforms.\n \"\"\"\n if ON_LINUX:\n if distro:\n ld = distro.id()\n elif PYTHON_VERSION_INFO < (3, 7, 0):\n ld = platform.linux_distribution()[0] or 'unknown'\n elif '-ARCH-' in platform.platform():\n ld = 'arch' # that's the only one we need to know for now\n else:\n ld = 'unknown'\n else:\n ld = None\n return ld\n\n\n#\n# Windows\n#\n\[email protected]_cache(1)\ndef git_for_windows_path():\n \"\"\"Returns the path to git for windows, if available and None otherwise.\"\"\"\n import winreg\n try:\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,\n 'SOFTWARE\\\\GitForWindows')\n gfwp, _ = winreg.QueryValueEx(key, \"InstallPath\")\n except FileNotFoundError:\n gfwp = None\n return gfwp\n\n\[email protected]_cache(1)\ndef windows_bash_command():\n \"\"\"Determines teh command for Bash on windows.\"\"\"\n import winreg\n # Check that bash is on path otherwise try the default directory\n # used by Git for windows\n wbc = 'bash'\n try:\n subprocess.check_call([wbc, '--version'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n except (FileNotFoundError, subprocess.CalledProcessError):\n gfwp = git_for_windows_path()\n if gfwp:\n bashcmd = os.path.join(gfwp, 'bin\\\\bash.exe')\n if os.path.isfile(bashcmd):\n wbc = bashcmd\n return wbc\n\n#\n# Environment variables defaults\n#\n\ndef _bcd():\n \"\"\"A possibly empty tuple with default paths to Bash completions known for\n the current platform.\n \"\"\"\n if ON_LINUX or ON_CYGWIN:\n if linux_distro() == 'arch':\n bcd = (\n '/usr/share/bash-completion/bash_completion',\n '/usr/share/bash-completion/completions')\n else:\n bcd = ('/usr/share/bash-completion',\n '/usr/share/bash-completion/completions')\n elif ON_DARWIN:\n bcd = ('/usr/local/etc/bash_completion',\n '/opt/local/etc/profile.d/bash_completion.sh')\n elif ON_WINDOWS and git_for_windows_path():\n bcd = (os.path.join(git_for_windows_path(),\n 'usr\\\\share\\\\bash-completion'),\n os.path.join(git_for_windows_path(),\n 'usr\\\\share\\\\bash-completion\\\\completions'),\n os.path.join(git_for_windows_path(),\n 'mingw64\\\\share\\\\git\\\\completion\\\\git-completion.bash'))\n else:\n bcd = ()\n return bcd\n\n\nBASH_COMPLETIONS_DEFAULT = LazyObject(_bcd, globals(),\n 'BASH_COMPLETIONS_DEFAULT')\ndel _bcd\n\n\ndef _pd():\n if ON_LINUX or ON_CYGWIN:\n if linux_distro() == 'arch':\n pd = ('/usr/local/sbin',\n '/usr/local/bin', '/usr/bin', '/usr/bin/site_perl',\n '/usr/bin/vendor_perl', '/usr/bin/core_perl')\n else:\n pd = (os.path.expanduser('~/bin'), '/usr/local/sbin',\n '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin', '/bin',\n '/usr/games', '/usr/local/games')\n elif ON_DARWIN:\n pd = ('/usr/local/bin', '/usr/bin', '/bin', '/usr/sbin', '/sbin')\n elif ON_WINDOWS:\n import winreg\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,\n r'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment')\n pd = tuple(winreg.QueryValueEx(key, 'Path')[0].split(os.pathsep))\n else:\n pd = ()\n return pd\n\nPATH_DEFAULT = LazyObject(_pd, globals(), 'PATH_DEFAULT')\ndel _pd\n", "path": "xonsh/platform.py"}], "after_files": [{"content": "\"\"\"Module for platform-specific constants and implementations, as well as\ncompatibility layers to make use of the 'best' implementation available\non a platform.\n\"\"\"\nimport os\nimport sys\nimport pathlib\nimport platform\nimport functools\nimport subprocess\nimport importlib.util\n\nfrom xonsh.lazyasd import LazyObject, LazyBool\n\ndef _distro():\n try:\n import distro as d\n except ImportError:\n d = None\n except:\n raise\n return d\n\n\ndistro = LazyObject(_distro, globals(), 'distro')\ndel _distro\n\n# do not import any xonsh-modules here to avoid circular dependencies\n\n\n#\n# OS\n#\nON_DARWIN = LazyBool(lambda: platform.system() == 'Darwin',\n globals(), 'ON_DARWIN')\n\"\"\" ``True`` if executed on a Darwin platform, else ``False``. \"\"\"\nON_LINUX = LazyBool(lambda: platform.system() == 'Linux',\n globals(), 'ON_LINUX')\n\"\"\" ``True`` if executed on a Linux platform, else ``False``. \"\"\"\nON_WINDOWS = LazyBool(lambda: platform.system() == 'Windows',\n globals(), 'ON_WINDOWS')\n\"\"\" ``True`` if executed on a native Windows platform, else ``False``. \"\"\"\nON_CYGWIN = LazyBool(lambda: sys.platform == 'cygwin', globals(), 'ON_CYGWIN')\n\"\"\" ``True`` if executed on a Cygwin Windows platform, else ``False``. \"\"\"\nON_POSIX = LazyBool(lambda: (os.name == 'posix'), globals(), 'ON_POSIX')\n\"\"\" ``True`` if executed on a POSIX-compliant platform, else ``False``. \"\"\"\n\n\n#\n# Python & packages\n#\n\nPYTHON_VERSION_INFO = sys.version_info[:3]\n\"\"\" Version of Python interpreter as three-value tuple. \"\"\"\nON_ANACONDA = LazyBool(\n lambda: any(s in sys.version for s in {'Anaconda', 'Continuum'}),\n globals(), 'ON_ANACONDA')\n\"\"\" ``True`` if executed in an Anaconda instance, else ``False``. \"\"\"\n\ndef _has_pygments():\n spec = importlib.util.find_spec('pygments')\n return (spec is not None)\n\n\nHAS_PYGMENTS = LazyBool(_has_pygments, globals(), 'HAS_PYGMENTS')\n\"\"\" ``True`` if `pygments` is available, else ``False``. \"\"\"\ndel _has_pygments\n\n\[email protected]_cache(1)\ndef pygments_version():\n \"\"\"pygments.__version__ version if available, else None.\"\"\"\n if HAS_PYGMENTS:\n import pygments\n v = pygments.__version__\n else:\n v = None\n return v\n\n\[email protected]_cache(1)\ndef has_prompt_toolkit():\n \"\"\" Tests if the `prompt_toolkit` is available. \"\"\"\n spec = importlib.util.find_spec('pygments')\n return (spec is not None)\n\n\[email protected]_cache(1)\ndef ptk_version():\n \"\"\" Returns `prompt_toolkit.__version__` if available, else ``None``. \"\"\"\n if has_prompt_toolkit():\n import prompt_toolkit\n return getattr(prompt_toolkit, '__version__', '<0.57')\n else:\n return None\n\n\[email protected]_cache(1)\ndef ptk_version_info():\n \"\"\" Returns `prompt_toolkit`'s version as tuple of integers. \"\"\"\n if has_prompt_toolkit():\n return tuple(int(x) for x in ptk_version().strip('<>+-=.').split('.'))\n else:\n return None\n\n\[email protected]_cache(1)\ndef best_shell_type():\n if ON_WINDOWS or has_prompt_toolkit():\n return 'prompt_toolkit'\n else:\n return 'readline'\n\n\[email protected]_cache(1)\ndef is_readline_available():\n \"\"\"Checks if readline is available to import.\"\"\"\n spec = importlib.util.find_spec('readline')\n return (spec is not None)\n\n\n#\n# Encoding\n#\n\nDEFAULT_ENCODING = sys.getdefaultencoding()\n\"\"\" Default string encoding. \"\"\"\n\n\nif PYTHON_VERSION_INFO < (3, 5, 0):\n class DirEntry:\n def __init__(self, directory, name):\n self.__path__ = pathlib.Path(directory) / name\n self.name = name\n self.path = str(self.__path__)\n self.is_symlink = self.__path__.is_symlink\n\n def inode(self):\n return os.stat(self.path, follow_symlinks=False).st_ino\n\n def is_dir(self, *, follow_symlinks=True):\n if follow_symlinks:\n return self.__path__.is_dir()\n else:\n return not self.__path__.is_symlink() \\\n and self.__path__.is_dir()\n\n def is_file(self, *, follow_symlinks=True):\n if follow_symlinks:\n return self.__path__.is_file()\n else:\n return not self.__path__.is_symlink() \\\n and self.__path__.is_file()\n\n def stat(self, *, follow_symlinks=True):\n return os.stat(self.path, follow_symlinks=follow_symlinks)\n\n def scandir(path):\n \"\"\" Compatibility layer for `os.scandir` from Python 3.5+. \"\"\"\n return (DirEntry(path, x) for x in os.listdir(path))\nelse:\n scandir = os.scandir\n\n\n#\n# Linux distro\n#\n\[email protected]_cache(1)\ndef linux_distro():\n \"\"\"The id of the Linux distribution running on, possibly 'unknown'.\n None on non-Linux platforms.\n \"\"\"\n if ON_LINUX:\n if distro:\n ld = distro.id()\n elif PYTHON_VERSION_INFO < (3, 7, 0):\n ld = platform.linux_distribution()[0] or 'unknown'\n elif '-ARCH-' in platform.platform():\n ld = 'arch' # that's the only one we need to know for now\n else:\n ld = 'unknown'\n else:\n ld = None\n return ld\n\n\n#\n# Windows\n#\n\[email protected]_cache(1)\ndef git_for_windows_path():\n \"\"\"Returns the path to git for windows, if available and None otherwise.\"\"\"\n import winreg\n try:\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,\n 'SOFTWARE\\\\GitForWindows')\n gfwp, _ = winreg.QueryValueEx(key, \"InstallPath\")\n except FileNotFoundError:\n gfwp = None\n return gfwp\n\n\[email protected]_cache(1)\ndef windows_bash_command():\n \"\"\"Determines teh command for Bash on windows.\"\"\"\n import winreg\n # Check that bash is on path otherwise try the default directory\n # used by Git for windows\n wbc = 'bash'\n try:\n subprocess.check_call([wbc, '--version'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n except (FileNotFoundError, subprocess.CalledProcessError):\n gfwp = git_for_windows_path()\n if gfwp:\n bashcmd = os.path.join(gfwp, 'bin\\\\bash.exe')\n if os.path.isfile(bashcmd):\n wbc = bashcmd\n return wbc\n\n#\n# Environment variables defaults\n#\n\ndef _bcd():\n \"\"\"A possibly empty tuple with default paths to Bash completions known for\n the current platform.\n \"\"\"\n if ON_LINUX or ON_CYGWIN:\n if linux_distro() == 'arch':\n bcd = (\n '/usr/share/bash-completion/bash_completion',\n '/usr/share/bash-completion/completions')\n else:\n bcd = ('/usr/share/bash-completion',\n '/usr/share/bash-completion/completions')\n elif ON_DARWIN:\n bcd = ('/usr/local/etc/bash_completion',\n '/opt/local/etc/profile.d/bash_completion.sh')\n elif ON_WINDOWS and git_for_windows_path():\n bcd = (os.path.join(git_for_windows_path(),\n 'usr\\\\share\\\\bash-completion'),\n os.path.join(git_for_windows_path(),\n 'usr\\\\share\\\\bash-completion\\\\completions'),\n os.path.join(git_for_windows_path(),\n 'mingw64\\\\share\\\\git\\\\completion\\\\git-completion.bash'))\n else:\n bcd = ()\n return bcd\n\n\nBASH_COMPLETIONS_DEFAULT = LazyObject(_bcd, globals(),\n 'BASH_COMPLETIONS_DEFAULT')\ndel _bcd\n\n\ndef _pd():\n if ON_LINUX or ON_CYGWIN:\n if linux_distro() == 'arch':\n pd = ('/usr/local/sbin',\n '/usr/local/bin', '/usr/bin', '/usr/bin/site_perl',\n '/usr/bin/vendor_perl', '/usr/bin/core_perl')\n else:\n pd = (os.path.expanduser('~/bin'), '/usr/local/sbin',\n '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin', '/bin',\n '/usr/games', '/usr/local/games')\n elif ON_DARWIN:\n pd = ('/usr/local/bin', '/usr/bin', '/bin', '/usr/sbin', '/sbin')\n elif ON_WINDOWS:\n import winreg\n key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,\n r'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment')\n pd = tuple(winreg.QueryValueEx(key, 'Path')[0].split(os.pathsep))\n else:\n pd = ()\n return pd\n\nPATH_DEFAULT = LazyObject(_pd, globals(), 'PATH_DEFAULT')\ndel _pd\n", "path": "xonsh/platform.py"}]}
| 3,618 | 112 |
gh_patches_debug_18315
|
rasdani/github-patches
|
git_diff
|
microsoft__Qcodes-719
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IPInstrument._send() could not send all its data
I was browsing the instruments code and found out that in the `IPInstrument` class the `socket.send(...)` function is implemented wrongly:
```python
def _send(self, cmd):
data = cmd + self._terminator
self._socket.send(data.encode())
```
This is what the documentation says about `socket.send(...)`:
> socket.send(bytes[, flags])
Send data to the socket. The socket must be connected to a remote socket. The optional flags argument has the same meaning as for recv() above. Returns the number of bytes sent. Applications are responsible for checking that all data has been sent; if only some of the data was transmitted, the application needs to attempt delivery of the remaining data.
https://docs.python.org/3.6/library/socket.html
At this moment, if `send(...)` fails, only a part of the message is transmitted. Which will create strange bugs.
A better solution is to use `socket.sendall(...)` or as the example shows:
```python
def mysend(self, msg):
totalsent = 0
while totalsent < MSGLEN:
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
```
https://docs.python.org/3.6/howto/sockets.html#socket-howto
IPInstrument._send() could not send all its data
I was browsing the instruments code and found out that in the `IPInstrument` class the `socket.send(...)` function is implemented wrongly:
```python
def _send(self, cmd):
data = cmd + self._terminator
self._socket.send(data.encode())
```
This is what the documentation says about `socket.send(...)`:
> socket.send(bytes[, flags])
Send data to the socket. The socket must be connected to a remote socket. The optional flags argument has the same meaning as for recv() above. Returns the number of bytes sent. Applications are responsible for checking that all data has been sent; if only some of the data was transmitted, the application needs to attempt delivery of the remaining data.
https://docs.python.org/3.6/library/socket.html
At this moment, if `send(...)` fails, only a part of the message is transmitted. Which will create strange bugs.
A better solution is to use `socket.sendall(...)` or as the example shows:
```python
def mysend(self, msg):
totalsent = 0
while totalsent < MSGLEN:
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
```
https://docs.python.org/3.6/howto/sockets.html#socket-howto
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/instrument/ip.py`
Content:
```
1 """Ethernet instrument driver class based on sockets."""
2 import socket
3
4 from .base import Instrument
5
6
7 class IPInstrument(Instrument):
8
9 r"""
10 Bare socket ethernet instrument implementation.
11
12 Args:
13 name (str): What this instrument is called locally.
14
15 address (Optional[str]): The IP address or name. If not given on
16 construction, must be provided before any communication.
17
18 port (Optional[int]): The IP port. If not given on construction, must
19 be provided before any communication.
20
21 timeout (number): Seconds to allow for responses. Default 5.
22
23 terminator (str): Character(s) to terminate each send. Default '\n'.
24
25 persistent (bool): Whether to leave the socket open between calls.
26 Default True.
27
28 write_confirmation (bool): Whether the instrument acknowledges writes
29 with some response we should read. Default True.
30
31 metadata (Optional[Dict]): additional static metadata to add to this
32 instrument's JSON snapshot.
33
34 See help for ``qcodes.Instrument`` for additional information on writing
35 instrument subclasses.
36 """
37
38 def __init__(self, name, address=None, port=None, timeout=5,
39 terminator='\n', persistent=True, write_confirmation=True, testing=False,
40 **kwargs):
41 super().__init__(name, testing=testing, **kwargs)
42
43 self._address = address
44 self._port = port
45 self._timeout = timeout
46 self._terminator = terminator
47 self._confirmation = write_confirmation
48
49 self._ensure_connection = EnsureConnection(self)
50 self._buffer_size = 1400
51
52 self._socket = None
53
54 self.set_persistent(persistent)
55
56 def set_address(self, address=None, port=None):
57 """
58 Change the IP address and/or port of this instrument.
59
60 Args:
61 address (Optional[str]): The IP address or name.
62 port (Optional[number]): The IP port.
63 """
64 if address is not None:
65 self._address = address
66 elif not hasattr(self, '_address'):
67 raise TypeError('This instrument doesn\'t have an address yet, '
68 'you must provide one.')
69 if port is not None:
70 self._port = port
71 elif not hasattr(self, '_port'):
72 raise TypeError('This instrument doesn\'t have a port yet, '
73 'you must provide one.')
74
75 self._disconnect()
76 self.set_persistent(self._persistent)
77
78 def set_persistent(self, persistent):
79 """
80 Change whether this instrument keeps its socket open between calls.
81
82 Args:
83 persistent (bool): Set True to keep the socket open all the time.
84 """
85 self._persistent = persistent
86 if persistent:
87 self._connect()
88 else:
89 self._disconnect()
90
91 def flush_connection(self):
92 if not self._testing:
93 self._recv()
94
95 def _connect(self):
96 if self._testing:
97 return
98
99 if self._socket is not None:
100 self._disconnect()
101
102 try:
103 self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
104 self._socket.connect((self._address, self._port))
105 self.set_timeout(self._timeout)
106 except ConnectionRefusedError:
107 self._socket.close()
108 self._socket = None
109
110 def _disconnect(self):
111 if getattr(self, '_socket', None) is None:
112 return
113
114 self._socket.shutdown(socket.SHUT_RDWR)
115 self._socket.close()
116 self._socket = None
117
118 def set_timeout(self, timeout=None):
119 """
120 Change the read timeout for the socket.
121
122 Args:
123 timeout (number): Seconds to allow for responses.
124 """
125 self._timeout = timeout
126
127 if self._socket is not None:
128 self._socket.settimeout(float(self._timeout))
129
130 def set_terminator(self, terminator):
131 r"""
132 Change the write terminator to use.
133
134 Args:
135 terminator (str): Character(s) to terminate each send.
136 Default '\n'.
137 """
138 self._terminator = terminator
139
140 def _send(self, cmd):
141 data = cmd + self._terminator
142 self._socket.send(data.encode())
143
144 def _recv(self):
145 return self._socket.recv(self._buffer_size).decode()
146
147 def close(self):
148 """Disconnect and irreversibly tear down the instrument."""
149 self._disconnect()
150 super().close()
151
152 def write_raw(self, cmd):
153 """
154 Low-level interface to send a command that gets no response.
155
156 Args:
157 cmd (str): The command to send to the instrument.
158 """
159
160 with self._ensure_connection:
161 self._send(cmd)
162 if self._confirmation:
163 self._recv()
164
165 def ask_raw(self, cmd):
166 """
167 Low-level interface to send a command an read a response.
168
169 Args:
170 cmd (str): The command to send to the instrument.
171
172 Returns:
173 str: The instrument's response.
174 """
175 with self._ensure_connection:
176 self._send(cmd)
177 return self._recv()
178
179 def __del__(self):
180 self.close()
181
182 def snapshot_base(self, update=False):
183 """
184 State of the instrument as a JSON-compatible dict.
185
186 Args:
187 update (bool): If True, update the state by querying the
188 instrument. If False, just use the latest values in memory.
189
190 Returns:
191 dict: base snapshot
192 """
193 snap = super().snapshot_base(update=update)
194
195 snap['port'] = self._port
196 snap['confirmation'] = self._confirmation
197 snap['address'] = self._address
198 snap['terminator'] = self._terminator
199 snap['timeout'] = self._timeout
200 snap['persistent'] = self._persistent
201
202 return snap
203
204
205 class EnsureConnection:
206
207 """
208 Context manager to ensure an instrument is connected when needed.
209
210 Uses ``instrument._persistent`` to determine whether or not to close
211 the connection immediately on completion.
212
213 Args:
214 instrument (IPInstrument): the instance to connect.
215 """
216
217 def __init__(self, instrument):
218 self.instrument = instrument
219
220 def __enter__(self):
221 """Make sure we connect when entering the context."""
222 if not self.instrument._persistent or self.instrument._socket is None:
223 self.instrument._connect()
224
225 def __exit__(self, type, value, tb):
226 """Possibly disconnect on exiting the context."""
227 if not self.instrument._persistent:
228 self.instrument._disconnect()
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qcodes/instrument/ip.py b/qcodes/instrument/ip.py
--- a/qcodes/instrument/ip.py
+++ b/qcodes/instrument/ip.py
@@ -1,8 +1,10 @@
"""Ethernet instrument driver class based on sockets."""
import socket
+import logging
from .base import Instrument
+log = logging.getLogger(__name__)
class IPInstrument(Instrument):
@@ -139,10 +141,14 @@
def _send(self, cmd):
data = cmd + self._terminator
- self._socket.send(data.encode())
+ self._socket.sendall(data.encode())
def _recv(self):
- return self._socket.recv(self._buffer_size).decode()
+ result = self._socket.recv(self._buffer_size)
+ if result == b'':
+ log.warning("Got empty response from Socket recv() "
+ "Connection broken.")
+ return result.decode()
def close(self):
"""Disconnect and irreversibly tear down the instrument."""
|
{"golden_diff": "diff --git a/qcodes/instrument/ip.py b/qcodes/instrument/ip.py\n--- a/qcodes/instrument/ip.py\n+++ b/qcodes/instrument/ip.py\n@@ -1,8 +1,10 @@\n \"\"\"Ethernet instrument driver class based on sockets.\"\"\"\n import socket\n+import logging\n \n from .base import Instrument\n \n+log = logging.getLogger(__name__)\n \n class IPInstrument(Instrument):\n \n@@ -139,10 +141,14 @@\n \n def _send(self, cmd):\n data = cmd + self._terminator\n- self._socket.send(data.encode())\n+ self._socket.sendall(data.encode())\n \n def _recv(self):\n- return self._socket.recv(self._buffer_size).decode()\n+ result = self._socket.recv(self._buffer_size)\n+ if result == b'':\n+ log.warning(\"Got empty response from Socket recv() \"\n+ \"Connection broken.\")\n+ return result.decode()\n \n def close(self):\n \"\"\"Disconnect and irreversibly tear down the instrument.\"\"\"\n", "issue": "IPInstrument._send() could not send all its data\nI was browsing the instruments code and found out that in the `IPInstrument` class the `socket.send(...)` function is implemented wrongly:\r\n\r\n```python\r\n def _send(self, cmd):\r\n data = cmd + self._terminator\r\n self._socket.send(data.encode())\r\n```\r\n\r\nThis is what the documentation says about `socket.send(...)`:\r\n\r\n> socket.send(bytes[, flags])\r\nSend data to the socket. The socket must be connected to a remote socket. The optional flags argument has the same meaning as for recv() above. Returns the number of bytes sent. Applications are responsible for checking that all data has been sent; if only some of the data was transmitted, the application needs to attempt delivery of the remaining data. \r\nhttps://docs.python.org/3.6/library/socket.html\r\n\r\nAt this moment, if `send(...)` fails, only a part of the message is transmitted. Which will create strange bugs.\r\n\r\nA better solution is to use `socket.sendall(...)` or as the example shows:\r\n```python\r\n def mysend(self, msg):\r\n totalsent = 0\r\n while totalsent < MSGLEN:\r\n sent = self.sock.send(msg[totalsent:])\r\n if sent == 0:\r\n raise RuntimeError(\"socket connection broken\")\r\n totalsent = totalsent + sent\r\n```\r\nhttps://docs.python.org/3.6/howto/sockets.html#socket-howto\nIPInstrument._send() could not send all its data\nI was browsing the instruments code and found out that in the `IPInstrument` class the `socket.send(...)` function is implemented wrongly:\r\n\r\n```python\r\n def _send(self, cmd):\r\n data = cmd + self._terminator\r\n self._socket.send(data.encode())\r\n```\r\n\r\nThis is what the documentation says about `socket.send(...)`:\r\n\r\n> socket.send(bytes[, flags])\r\nSend data to the socket. The socket must be connected to a remote socket. The optional flags argument has the same meaning as for recv() above. Returns the number of bytes sent. Applications are responsible for checking that all data has been sent; if only some of the data was transmitted, the application needs to attempt delivery of the remaining data. \r\nhttps://docs.python.org/3.6/library/socket.html\r\n\r\nAt this moment, if `send(...)` fails, only a part of the message is transmitted. Which will create strange bugs.\r\n\r\nA better solution is to use `socket.sendall(...)` or as the example shows:\r\n```python\r\n def mysend(self, msg):\r\n totalsent = 0\r\n while totalsent < MSGLEN:\r\n sent = self.sock.send(msg[totalsent:])\r\n if sent == 0:\r\n raise RuntimeError(\"socket connection broken\")\r\n totalsent = totalsent + sent\r\n```\r\nhttps://docs.python.org/3.6/howto/sockets.html#socket-howto\n", "before_files": [{"content": "\"\"\"Ethernet instrument driver class based on sockets.\"\"\"\nimport socket\n\nfrom .base import Instrument\n\n\nclass IPInstrument(Instrument):\n\n r\"\"\"\n Bare socket ethernet instrument implementation.\n\n Args:\n name (str): What this instrument is called locally.\n\n address (Optional[str]): The IP address or name. If not given on\n construction, must be provided before any communication.\n\n port (Optional[int]): The IP port. If not given on construction, must\n be provided before any communication.\n\n timeout (number): Seconds to allow for responses. Default 5.\n\n terminator (str): Character(s) to terminate each send. Default '\\n'.\n\n persistent (bool): Whether to leave the socket open between calls.\n Default True.\n\n write_confirmation (bool): Whether the instrument acknowledges writes\n with some response we should read. Default True.\n\n metadata (Optional[Dict]): additional static metadata to add to this\n instrument's JSON snapshot.\n\n See help for ``qcodes.Instrument`` for additional information on writing\n instrument subclasses.\n \"\"\"\n\n def __init__(self, name, address=None, port=None, timeout=5,\n terminator='\\n', persistent=True, write_confirmation=True, testing=False,\n **kwargs):\n super().__init__(name, testing=testing, **kwargs)\n\n self._address = address\n self._port = port\n self._timeout = timeout\n self._terminator = terminator\n self._confirmation = write_confirmation\n\n self._ensure_connection = EnsureConnection(self)\n self._buffer_size = 1400\n\n self._socket = None\n\n self.set_persistent(persistent)\n\n def set_address(self, address=None, port=None):\n \"\"\"\n Change the IP address and/or port of this instrument.\n\n Args:\n address (Optional[str]): The IP address or name.\n port (Optional[number]): The IP port.\n \"\"\"\n if address is not None:\n self._address = address\n elif not hasattr(self, '_address'):\n raise TypeError('This instrument doesn\\'t have an address yet, '\n 'you must provide one.')\n if port is not None:\n self._port = port\n elif not hasattr(self, '_port'):\n raise TypeError('This instrument doesn\\'t have a port yet, '\n 'you must provide one.')\n\n self._disconnect()\n self.set_persistent(self._persistent)\n\n def set_persistent(self, persistent):\n \"\"\"\n Change whether this instrument keeps its socket open between calls.\n\n Args:\n persistent (bool): Set True to keep the socket open all the time.\n \"\"\"\n self._persistent = persistent\n if persistent:\n self._connect()\n else:\n self._disconnect()\n\n def flush_connection(self):\n if not self._testing:\n self._recv()\n\n def _connect(self):\n if self._testing:\n return\n\n if self._socket is not None:\n self._disconnect()\n\n try:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self._address, self._port))\n self.set_timeout(self._timeout)\n except ConnectionRefusedError:\n self._socket.close()\n self._socket = None\n\n def _disconnect(self):\n if getattr(self, '_socket', None) is None:\n return\n\n self._socket.shutdown(socket.SHUT_RDWR)\n self._socket.close()\n self._socket = None\n\n def set_timeout(self, timeout=None):\n \"\"\"\n Change the read timeout for the socket.\n\n Args:\n timeout (number): Seconds to allow for responses.\n \"\"\"\n self._timeout = timeout\n\n if self._socket is not None:\n self._socket.settimeout(float(self._timeout))\n\n def set_terminator(self, terminator):\n r\"\"\"\n Change the write terminator to use.\n\n Args:\n terminator (str): Character(s) to terminate each send.\n Default '\\n'.\n \"\"\"\n self._terminator = terminator\n\n def _send(self, cmd):\n data = cmd + self._terminator\n self._socket.send(data.encode())\n\n def _recv(self):\n return self._socket.recv(self._buffer_size).decode()\n\n def close(self):\n \"\"\"Disconnect and irreversibly tear down the instrument.\"\"\"\n self._disconnect()\n super().close()\n\n def write_raw(self, cmd):\n \"\"\"\n Low-level interface to send a command that gets no response.\n\n Args:\n cmd (str): The command to send to the instrument.\n \"\"\"\n\n with self._ensure_connection:\n self._send(cmd)\n if self._confirmation:\n self._recv()\n\n def ask_raw(self, cmd):\n \"\"\"\n Low-level interface to send a command an read a response.\n\n Args:\n cmd (str): The command to send to the instrument.\n\n Returns:\n str: The instrument's response.\n \"\"\"\n with self._ensure_connection:\n self._send(cmd)\n return self._recv()\n\n def __del__(self):\n self.close()\n\n def snapshot_base(self, update=False):\n \"\"\"\n State of the instrument as a JSON-compatible dict.\n\n Args:\n update (bool): If True, update the state by querying the\n instrument. If False, just use the latest values in memory.\n\n Returns:\n dict: base snapshot\n \"\"\"\n snap = super().snapshot_base(update=update)\n\n snap['port'] = self._port\n snap['confirmation'] = self._confirmation\n snap['address'] = self._address\n snap['terminator'] = self._terminator\n snap['timeout'] = self._timeout\n snap['persistent'] = self._persistent\n\n return snap\n\n\nclass EnsureConnection:\n\n \"\"\"\n Context manager to ensure an instrument is connected when needed.\n\n Uses ``instrument._persistent`` to determine whether or not to close\n the connection immediately on completion.\n\n Args:\n instrument (IPInstrument): the instance to connect.\n \"\"\"\n\n def __init__(self, instrument):\n self.instrument = instrument\n\n def __enter__(self):\n \"\"\"Make sure we connect when entering the context.\"\"\"\n if not self.instrument._persistent or self.instrument._socket is None:\n self.instrument._connect()\n\n def __exit__(self, type, value, tb):\n \"\"\"Possibly disconnect on exiting the context.\"\"\"\n if not self.instrument._persistent:\n self.instrument._disconnect()\n", "path": "qcodes/instrument/ip.py"}], "after_files": [{"content": "\"\"\"Ethernet instrument driver class based on sockets.\"\"\"\nimport socket\nimport logging\n\nfrom .base import Instrument\n\nlog = logging.getLogger(__name__)\n\nclass IPInstrument(Instrument):\n\n r\"\"\"\n Bare socket ethernet instrument implementation.\n\n Args:\n name (str): What this instrument is called locally.\n\n address (Optional[str]): The IP address or name. If not given on\n construction, must be provided before any communication.\n\n port (Optional[int]): The IP port. If not given on construction, must\n be provided before any communication.\n\n timeout (number): Seconds to allow for responses. Default 5.\n\n terminator (str): Character(s) to terminate each send. Default '\\n'.\n\n persistent (bool): Whether to leave the socket open between calls.\n Default True.\n\n write_confirmation (bool): Whether the instrument acknowledges writes\n with some response we should read. Default True.\n\n metadata (Optional[Dict]): additional static metadata to add to this\n instrument's JSON snapshot.\n\n See help for ``qcodes.Instrument`` for additional information on writing\n instrument subclasses.\n \"\"\"\n\n def __init__(self, name, address=None, port=None, timeout=5,\n terminator='\\n', persistent=True, write_confirmation=True, testing=False,\n **kwargs):\n super().__init__(name, testing=testing, **kwargs)\n\n self._address = address\n self._port = port\n self._timeout = timeout\n self._terminator = terminator\n self._confirmation = write_confirmation\n\n self._ensure_connection = EnsureConnection(self)\n self._buffer_size = 1400\n\n self._socket = None\n\n self.set_persistent(persistent)\n\n def set_address(self, address=None, port=None):\n \"\"\"\n Change the IP address and/or port of this instrument.\n\n Args:\n address (Optional[str]): The IP address or name.\n port (Optional[number]): The IP port.\n \"\"\"\n if address is not None:\n self._address = address\n elif not hasattr(self, '_address'):\n raise TypeError('This instrument doesn\\'t have an address yet, '\n 'you must provide one.')\n if port is not None:\n self._port = port\n elif not hasattr(self, '_port'):\n raise TypeError('This instrument doesn\\'t have a port yet, '\n 'you must provide one.')\n\n self._disconnect()\n self.set_persistent(self._persistent)\n\n def set_persistent(self, persistent):\n \"\"\"\n Change whether this instrument keeps its socket open between calls.\n\n Args:\n persistent (bool): Set True to keep the socket open all the time.\n \"\"\"\n self._persistent = persistent\n if persistent:\n self._connect()\n else:\n self._disconnect()\n\n def flush_connection(self):\n if not self._testing:\n self._recv()\n\n def _connect(self):\n if self._testing:\n return\n\n if self._socket is not None:\n self._disconnect()\n\n try:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self._address, self._port))\n self.set_timeout(self._timeout)\n except ConnectionRefusedError:\n self._socket.close()\n self._socket = None\n\n def _disconnect(self):\n if getattr(self, '_socket', None) is None:\n return\n\n self._socket.shutdown(socket.SHUT_RDWR)\n self._socket.close()\n self._socket = None\n\n def set_timeout(self, timeout=None):\n \"\"\"\n Change the read timeout for the socket.\n\n Args:\n timeout (number): Seconds to allow for responses.\n \"\"\"\n self._timeout = timeout\n\n if self._socket is not None:\n self._socket.settimeout(float(self._timeout))\n\n def set_terminator(self, terminator):\n r\"\"\"\n Change the write terminator to use.\n\n Args:\n terminator (str): Character(s) to terminate each send.\n Default '\\n'.\n \"\"\"\n self._terminator = terminator\n\n def _send(self, cmd):\n data = cmd + self._terminator\n self._socket.sendall(data.encode())\n\n def _recv(self):\n result = self._socket.recv(self._buffer_size)\n if result == b'':\n log.warning(\"Got empty response from Socket recv() \"\n \"Connection broken.\")\n return result.decode()\n\n def close(self):\n \"\"\"Disconnect and irreversibly tear down the instrument.\"\"\"\n self._disconnect()\n super().close()\n\n def write_raw(self, cmd):\n \"\"\"\n Low-level interface to send a command that gets no response.\n\n Args:\n cmd (str): The command to send to the instrument.\n \"\"\"\n\n with self._ensure_connection:\n self._send(cmd)\n if self._confirmation:\n self._recv()\n\n def ask_raw(self, cmd):\n \"\"\"\n Low-level interface to send a command an read a response.\n\n Args:\n cmd (str): The command to send to the instrument.\n\n Returns:\n str: The instrument's response.\n \"\"\"\n with self._ensure_connection:\n self._send(cmd)\n return self._recv()\n\n def __del__(self):\n self.close()\n\n def snapshot_base(self, update=False):\n \"\"\"\n State of the instrument as a JSON-compatible dict.\n\n Args:\n update (bool): If True, update the state by querying the\n instrument. If False, just use the latest values in memory.\n\n Returns:\n dict: base snapshot\n \"\"\"\n snap = super().snapshot_base(update=update)\n\n snap['port'] = self._port\n snap['confirmation'] = self._confirmation\n snap['address'] = self._address\n snap['terminator'] = self._terminator\n snap['timeout'] = self._timeout\n snap['persistent'] = self._persistent\n\n return snap\n\n\nclass EnsureConnection:\n\n \"\"\"\n Context manager to ensure an instrument is connected when needed.\n\n Uses ``instrument._persistent`` to determine whether or not to close\n the connection immediately on completion.\n\n Args:\n instrument (IPInstrument): the instance to connect.\n \"\"\"\n\n def __init__(self, instrument):\n self.instrument = instrument\n\n def __enter__(self):\n \"\"\"Make sure we connect when entering the context.\"\"\"\n if not self.instrument._persistent or self.instrument._socket is None:\n self.instrument._connect()\n\n def __exit__(self, type, value, tb):\n \"\"\"Possibly disconnect on exiting the context.\"\"\"\n if not self.instrument._persistent:\n self.instrument._disconnect()\n", "path": "qcodes/instrument/ip.py"}]}
| 2,843 | 224 |
gh_patches_debug_17767
|
rasdani/github-patches
|
git_diff
|
aws__aws-sdk-pandas-2796
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect error message or implementation for datetime formatting?
### Describe the bug
when starting a SQL query with timezone aware date times, you get an error message like:
`Supports only timezone aware datatype, got 2024-04-29 13:30:34.573187+00:00`
The SDK code involved is
```python
39 if value.tzinfo is not None:
40 print(type(value))
---> 41 raise TypeError(f"Supports only timezone aware datatype, got {value}. {type(value)}")
43 return f"TIMESTAMP '{value.isoformat(sep=' ', timespec='milliseconds')}'"
```
Where it checks for the existence of timezone info. And enter the if clause, if there is one.
However the error message say it support _only_ timezone aware dates.
### How to Reproduce
``` python
import awswrangler as wr
from datetime import timezone, datetime
wr.athena.start_query_execution(
database='ddd',
sql=f"""DELETE FROM ddd.t
WHERE event_date >= :start
""",
params={
"start": datetime.now(timezone.utc),
})
``````
### Expected behavior
To either have an error message like:
`Supports only timezone naive datatype, got 2024-04-29 13:30:34.573187+00:00`
or:
to correctly format timetamps with timezone info.
### Your project
Private
### Screenshots
_No response_
### OS
Mac
### Python version
3.11
### AWS SDK for pandas version
seen on main branch and ('3.4.2')
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awswrangler/_sql_formatter.py`
Content:
```
1 """Formatting logic for SQL parameters."""
2
3 from __future__ import annotations
4
5 import datetime
6 import decimal
7 import re
8 from abc import ABC, abstractmethod
9 from typing import Any, Callable, Sequence
10
11 from typing_extensions import Literal
12
13 from awswrangler import exceptions
14
15 _EngineTypeLiteral = Literal["presto", "hive", "partiql"]
16
17
18 class _Engine(ABC):
19 def __init__(self, engine_name: _EngineTypeLiteral) -> None:
20 self.engine_name = engine_name
21
22 def format_null(self, value: None = None) -> str:
23 return "NULL"
24
25 @abstractmethod
26 def format_string(self, value: str) -> str:
27 pass
28
29 def format_bool(self, value: bool) -> str:
30 return str(value).upper()
31
32 def format_integer(self, value: int) -> str:
33 return str(value)
34
35 def format_float(self, value: float) -> str:
36 return f"{value:f}"
37
38 def format_decimal(self, value: decimal.Decimal) -> str:
39 return f"DECIMAL '{value:f}'"
40
41 def format_timestamp(self, value: datetime.datetime) -> str:
42 if value.tzinfo is not None:
43 raise TypeError(f"Supports only timezone aware datatype, got {value}.")
44
45 return f"TIMESTAMP '{value.isoformat(sep=' ', timespec='milliseconds')}'"
46
47 def format_date(self, value: datetime.date) -> str:
48 return f"DATE '{value.isoformat()}'"
49
50 def format_array(self, value: Sequence[Any]) -> str:
51 return f"ARRAY [{', '.join(map(self.format, value))}]"
52
53 def format_dict(self, value: dict[Any, Any]) -> str:
54 if not value:
55 return "MAP()"
56
57 map_keys = list(value.keys())
58 key_type = type(map_keys[0])
59 for key in map_keys:
60 if key is None:
61 raise TypeError("Map key cannot be null.")
62 if not isinstance(key, key_type):
63 raise TypeError("All Map key elements must be the same type.")
64
65 map_values = list(value.values())
66 return (
67 f"MAP(ARRAY [{', '.join(map(self.format, map_keys))}], ARRAY [{', '.join(map(self.format, map_values))}])"
68 )
69
70 def format(self, data: Any) -> str:
71 formats_dict: dict[type[Any], Callable[[Any], str]] = {
72 bool: self.format_bool,
73 str: self.format_string,
74 int: self.format_integer,
75 datetime.datetime: self.format_timestamp,
76 datetime.date: self.format_date,
77 decimal.Decimal: self.format_decimal,
78 float: self.format_float,
79 list: self.format_array,
80 tuple: self.format_array,
81 set: self.format_array,
82 dict: self.format_dict,
83 }
84
85 if data is None:
86 return self.format_null()
87
88 for python_type, format_func in formats_dict.items():
89 if isinstance(data, python_type):
90 return format_func(data)
91
92 raise TypeError(f"Unsupported type {type(data)} in parameter.")
93
94
95 class _PrestoEngine(_Engine):
96 def __init__(self) -> None:
97 super().__init__("presto")
98
99 def format_string(self, value: str) -> str:
100 return f"""'{value.replace("'", "''")}'"""
101
102
103 class _HiveEngine(_Engine):
104 def __init__(self) -> None:
105 super().__init__("hive")
106
107 def format_string(self, value: str) -> str:
108 return "'{}'".format(
109 value.replace("\\", "\\\\")
110 .replace("'", "\\'")
111 .replace("\r", "\\r")
112 .replace("\n", "\\n")
113 .replace("\t", "\\t")
114 )
115
116
117 class _PartiQLEngine(_Engine):
118 def __init__(self) -> None:
119 super().__init__("partiql")
120
121 def format_null(self, value: None = None) -> str:
122 return "null"
123
124 def format_string(self, value: str) -> str:
125 return f"""'{value.replace("'", "''")}'"""
126
127 def format_bool(self, value: bool) -> str:
128 return "1" if value else "0"
129
130 def format_decimal(self, value: decimal.Decimal) -> str:
131 return f"'{value}'"
132
133 def format_timestamp(self, value: datetime.datetime) -> str:
134 if value.tzinfo is not None:
135 raise TypeError(f"Supports only timezone aware datatype, got {value}.")
136
137 return f"'{value.isoformat()}'"
138
139 def format_date(self, value: datetime.date) -> str:
140 return f"'{value.isoformat()}'"
141
142 def format_array(self, value: Sequence[Any]) -> str:
143 raise NotImplementedError(f"format_array not implemented for engine={self.engine_name}.")
144
145 def format_dict(self, value: dict[Any, Any]) -> str:
146 raise NotImplementedError(f"format_dict not implemented for engine={self.engine_name}.")
147
148
149 def _format_parameters(params: dict[str, Any], engine: _Engine) -> dict[str, Any]:
150 processed_params = {}
151
152 for k, v in params.items():
153 processed_params[k] = engine.format(data=v)
154
155 return processed_params
156
157
158 _PATTERN = re.compile(r":([A-Za-z0-9_]+)(?![A-Za-z0-9_])")
159
160
161 def _create_engine(engine_type: _EngineTypeLiteral) -> _Engine:
162 if engine_type == "hive":
163 return _HiveEngine()
164
165 if engine_type == "presto":
166 return _PrestoEngine()
167
168 if engine_type == "partiql":
169 return _PartiQLEngine()
170
171 raise exceptions.InvalidArgumentValue(f"Unknown engine type: {engine_type}")
172
173
174 def _process_sql_params(sql: str, params: dict[str, Any] | None, engine_type: _EngineTypeLiteral = "presto") -> str:
175 if params is None:
176 params = {}
177
178 engine = _create_engine(engine_type)
179 processed_params = _format_parameters(params, engine=engine)
180
181 def replace(match: re.Match) -> str: # type: ignore[type-arg]
182 key = match.group(1)
183
184 if key not in processed_params:
185 # do not replace anything if the parameter is not provided
186 return str(match.group(0))
187
188 return str(processed_params[key])
189
190 sql = _PATTERN.sub(replace, sql)
191
192 return sql
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awswrangler/_sql_formatter.py b/awswrangler/_sql_formatter.py
--- a/awswrangler/_sql_formatter.py
+++ b/awswrangler/_sql_formatter.py
@@ -40,7 +40,7 @@
def format_timestamp(self, value: datetime.datetime) -> str:
if value.tzinfo is not None:
- raise TypeError(f"Supports only timezone aware datatype, got {value}.")
+ raise TypeError(f"Supports only timezone naive datatype, got {value}.")
return f"TIMESTAMP '{value.isoformat(sep=' ', timespec='milliseconds')}'"
@@ -132,7 +132,7 @@
def format_timestamp(self, value: datetime.datetime) -> str:
if value.tzinfo is not None:
- raise TypeError(f"Supports only timezone aware datatype, got {value}.")
+ raise TypeError(f"Supports only timezone naive datatype, got {value}.")
return f"'{value.isoformat()}'"
|
{"golden_diff": "diff --git a/awswrangler/_sql_formatter.py b/awswrangler/_sql_formatter.py\n--- a/awswrangler/_sql_formatter.py\n+++ b/awswrangler/_sql_formatter.py\n@@ -40,7 +40,7 @@\n \n def format_timestamp(self, value: datetime.datetime) -> str:\n if value.tzinfo is not None:\n- raise TypeError(f\"Supports only timezone aware datatype, got {value}.\")\n+ raise TypeError(f\"Supports only timezone naive datatype, got {value}.\")\n \n return f\"TIMESTAMP '{value.isoformat(sep=' ', timespec='milliseconds')}'\"\n \n@@ -132,7 +132,7 @@\n \n def format_timestamp(self, value: datetime.datetime) -> str:\n if value.tzinfo is not None:\n- raise TypeError(f\"Supports only timezone aware datatype, got {value}.\")\n+ raise TypeError(f\"Supports only timezone naive datatype, got {value}.\")\n \n return f\"'{value.isoformat()}'\"\n", "issue": "Incorrect error message or implementation for datetime formatting?\n### Describe the bug\n\nwhen starting a SQL query with timezone aware date times, you get an error message like:\r\n\r\n`Supports only timezone aware datatype, got 2024-04-29 13:30:34.573187+00:00`\r\n\r\nThe SDK code involved is\r\n\r\n```python\r\n 39 if value.tzinfo is not None:\r\n 40 print(type(value))\r\n---> 41 raise TypeError(f\"Supports only timezone aware datatype, got {value}. {type(value)}\")\r\n 43 return f\"TIMESTAMP '{value.isoformat(sep=' ', timespec='milliseconds')}'\"\r\n```\r\nWhere it checks for the existence of timezone info. And enter the if clause, if there is one.\r\n\r\nHowever the error message say it support _only_ timezone aware dates.\n\n### How to Reproduce\n\n``` python\r\nimport awswrangler as wr\r\nfrom datetime import timezone, datetime\r\n\r\nwr.athena.start_query_execution(\r\n database='ddd',\r\n sql=f\"\"\"DELETE FROM ddd.t\r\n WHERE event_date >= :start \r\n \"\"\",\r\n params={\r\n \"start\": datetime.now(timezone.utc),\r\n })\r\n``````\r\n\n\n### Expected behavior\n\nTo either have an error message like: \r\n\r\n`Supports only timezone naive datatype, got 2024-04-29 13:30:34.573187+00:00`\r\n\r\nor: \r\n\r\nto correctly format timetamps with timezone info.\n\n### Your project\n\nPrivate\n\n### Screenshots\n\n_No response_\n\n### OS\n\nMac\n\n### Python version\n\n3.11\n\n### AWS SDK for pandas version\n\nseen on main branch and ('3.4.2')\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "\"\"\"Formatting logic for SQL parameters.\"\"\"\n\nfrom __future__ import annotations\n\nimport datetime\nimport decimal\nimport re\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, Sequence\n\nfrom typing_extensions import Literal\n\nfrom awswrangler import exceptions\n\n_EngineTypeLiteral = Literal[\"presto\", \"hive\", \"partiql\"]\n\n\nclass _Engine(ABC):\n def __init__(self, engine_name: _EngineTypeLiteral) -> None:\n self.engine_name = engine_name\n\n def format_null(self, value: None = None) -> str:\n return \"NULL\"\n\n @abstractmethod\n def format_string(self, value: str) -> str:\n pass\n\n def format_bool(self, value: bool) -> str:\n return str(value).upper()\n\n def format_integer(self, value: int) -> str:\n return str(value)\n\n def format_float(self, value: float) -> str:\n return f\"{value:f}\"\n\n def format_decimal(self, value: decimal.Decimal) -> str:\n return f\"DECIMAL '{value:f}'\"\n\n def format_timestamp(self, value: datetime.datetime) -> str:\n if value.tzinfo is not None:\n raise TypeError(f\"Supports only timezone aware datatype, got {value}.\")\n\n return f\"TIMESTAMP '{value.isoformat(sep=' ', timespec='milliseconds')}'\"\n\n def format_date(self, value: datetime.date) -> str:\n return f\"DATE '{value.isoformat()}'\"\n\n def format_array(self, value: Sequence[Any]) -> str:\n return f\"ARRAY [{', '.join(map(self.format, value))}]\"\n\n def format_dict(self, value: dict[Any, Any]) -> str:\n if not value:\n return \"MAP()\"\n\n map_keys = list(value.keys())\n key_type = type(map_keys[0])\n for key in map_keys:\n if key is None:\n raise TypeError(\"Map key cannot be null.\")\n if not isinstance(key, key_type):\n raise TypeError(\"All Map key elements must be the same type.\")\n\n map_values = list(value.values())\n return (\n f\"MAP(ARRAY [{', '.join(map(self.format, map_keys))}], ARRAY [{', '.join(map(self.format, map_values))}])\"\n )\n\n def format(self, data: Any) -> str:\n formats_dict: dict[type[Any], Callable[[Any], str]] = {\n bool: self.format_bool,\n str: self.format_string,\n int: self.format_integer,\n datetime.datetime: self.format_timestamp,\n datetime.date: self.format_date,\n decimal.Decimal: self.format_decimal,\n float: self.format_float,\n list: self.format_array,\n tuple: self.format_array,\n set: self.format_array,\n dict: self.format_dict,\n }\n\n if data is None:\n return self.format_null()\n\n for python_type, format_func in formats_dict.items():\n if isinstance(data, python_type):\n return format_func(data)\n\n raise TypeError(f\"Unsupported type {type(data)} in parameter.\")\n\n\nclass _PrestoEngine(_Engine):\n def __init__(self) -> None:\n super().__init__(\"presto\")\n\n def format_string(self, value: str) -> str:\n return f\"\"\"'{value.replace(\"'\", \"''\")}'\"\"\"\n\n\nclass _HiveEngine(_Engine):\n def __init__(self) -> None:\n super().__init__(\"hive\")\n\n def format_string(self, value: str) -> str:\n return \"'{}'\".format(\n value.replace(\"\\\\\", \"\\\\\\\\\")\n .replace(\"'\", \"\\\\'\")\n .replace(\"\\r\", \"\\\\r\")\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\t\", \"\\\\t\")\n )\n\n\nclass _PartiQLEngine(_Engine):\n def __init__(self) -> None:\n super().__init__(\"partiql\")\n\n def format_null(self, value: None = None) -> str:\n return \"null\"\n\n def format_string(self, value: str) -> str:\n return f\"\"\"'{value.replace(\"'\", \"''\")}'\"\"\"\n\n def format_bool(self, value: bool) -> str:\n return \"1\" if value else \"0\"\n\n def format_decimal(self, value: decimal.Decimal) -> str:\n return f\"'{value}'\"\n\n def format_timestamp(self, value: datetime.datetime) -> str:\n if value.tzinfo is not None:\n raise TypeError(f\"Supports only timezone aware datatype, got {value}.\")\n\n return f\"'{value.isoformat()}'\"\n\n def format_date(self, value: datetime.date) -> str:\n return f\"'{value.isoformat()}'\"\n\n def format_array(self, value: Sequence[Any]) -> str:\n raise NotImplementedError(f\"format_array not implemented for engine={self.engine_name}.\")\n\n def format_dict(self, value: dict[Any, Any]) -> str:\n raise NotImplementedError(f\"format_dict not implemented for engine={self.engine_name}.\")\n\n\ndef _format_parameters(params: dict[str, Any], engine: _Engine) -> dict[str, Any]:\n processed_params = {}\n\n for k, v in params.items():\n processed_params[k] = engine.format(data=v)\n\n return processed_params\n\n\n_PATTERN = re.compile(r\":([A-Za-z0-9_]+)(?![A-Za-z0-9_])\")\n\n\ndef _create_engine(engine_type: _EngineTypeLiteral) -> _Engine:\n if engine_type == \"hive\":\n return _HiveEngine()\n\n if engine_type == \"presto\":\n return _PrestoEngine()\n\n if engine_type == \"partiql\":\n return _PartiQLEngine()\n\n raise exceptions.InvalidArgumentValue(f\"Unknown engine type: {engine_type}\")\n\n\ndef _process_sql_params(sql: str, params: dict[str, Any] | None, engine_type: _EngineTypeLiteral = \"presto\") -> str:\n if params is None:\n params = {}\n\n engine = _create_engine(engine_type)\n processed_params = _format_parameters(params, engine=engine)\n\n def replace(match: re.Match) -> str: # type: ignore[type-arg]\n key = match.group(1)\n\n if key not in processed_params:\n # do not replace anything if the parameter is not provided\n return str(match.group(0))\n\n return str(processed_params[key])\n\n sql = _PATTERN.sub(replace, sql)\n\n return sql\n", "path": "awswrangler/_sql_formatter.py"}], "after_files": [{"content": "\"\"\"Formatting logic for SQL parameters.\"\"\"\n\nfrom __future__ import annotations\n\nimport datetime\nimport decimal\nimport re\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Callable, Sequence\n\nfrom typing_extensions import Literal\n\nfrom awswrangler import exceptions\n\n_EngineTypeLiteral = Literal[\"presto\", \"hive\", \"partiql\"]\n\n\nclass _Engine(ABC):\n def __init__(self, engine_name: _EngineTypeLiteral) -> None:\n self.engine_name = engine_name\n\n def format_null(self, value: None = None) -> str:\n return \"NULL\"\n\n @abstractmethod\n def format_string(self, value: str) -> str:\n pass\n\n def format_bool(self, value: bool) -> str:\n return str(value).upper()\n\n def format_integer(self, value: int) -> str:\n return str(value)\n\n def format_float(self, value: float) -> str:\n return f\"{value:f}\"\n\n def format_decimal(self, value: decimal.Decimal) -> str:\n return f\"DECIMAL '{value:f}'\"\n\n def format_timestamp(self, value: datetime.datetime) -> str:\n if value.tzinfo is not None:\n raise TypeError(f\"Supports only timezone naive datatype, got {value}.\")\n\n return f\"TIMESTAMP '{value.isoformat(sep=' ', timespec='milliseconds')}'\"\n\n def format_date(self, value: datetime.date) -> str:\n return f\"DATE '{value.isoformat()}'\"\n\n def format_array(self, value: Sequence[Any]) -> str:\n return f\"ARRAY [{', '.join(map(self.format, value))}]\"\n\n def format_dict(self, value: dict[Any, Any]) -> str:\n if not value:\n return \"MAP()\"\n\n map_keys = list(value.keys())\n key_type = type(map_keys[0])\n for key in map_keys:\n if key is None:\n raise TypeError(\"Map key cannot be null.\")\n if not isinstance(key, key_type):\n raise TypeError(\"All Map key elements must be the same type.\")\n\n map_values = list(value.values())\n return (\n f\"MAP(ARRAY [{', '.join(map(self.format, map_keys))}], ARRAY [{', '.join(map(self.format, map_values))}])\"\n )\n\n def format(self, data: Any) -> str:\n formats_dict: dict[type[Any], Callable[[Any], str]] = {\n bool: self.format_bool,\n str: self.format_string,\n int: self.format_integer,\n datetime.datetime: self.format_timestamp,\n datetime.date: self.format_date,\n decimal.Decimal: self.format_decimal,\n float: self.format_float,\n list: self.format_array,\n tuple: self.format_array,\n set: self.format_array,\n dict: self.format_dict,\n }\n\n if data is None:\n return self.format_null()\n\n for python_type, format_func in formats_dict.items():\n if isinstance(data, python_type):\n return format_func(data)\n\n raise TypeError(f\"Unsupported type {type(data)} in parameter.\")\n\n\nclass _PrestoEngine(_Engine):\n def __init__(self) -> None:\n super().__init__(\"presto\")\n\n def format_string(self, value: str) -> str:\n return f\"\"\"'{value.replace(\"'\", \"''\")}'\"\"\"\n\n\nclass _HiveEngine(_Engine):\n def __init__(self) -> None:\n super().__init__(\"hive\")\n\n def format_string(self, value: str) -> str:\n return \"'{}'\".format(\n value.replace(\"\\\\\", \"\\\\\\\\\")\n .replace(\"'\", \"\\\\'\")\n .replace(\"\\r\", \"\\\\r\")\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\t\", \"\\\\t\")\n )\n\n\nclass _PartiQLEngine(_Engine):\n def __init__(self) -> None:\n super().__init__(\"partiql\")\n\n def format_null(self, value: None = None) -> str:\n return \"null\"\n\n def format_string(self, value: str) -> str:\n return f\"\"\"'{value.replace(\"'\", \"''\")}'\"\"\"\n\n def format_bool(self, value: bool) -> str:\n return \"1\" if value else \"0\"\n\n def format_decimal(self, value: decimal.Decimal) -> str:\n return f\"'{value}'\"\n\n def format_timestamp(self, value: datetime.datetime) -> str:\n if value.tzinfo is not None:\n raise TypeError(f\"Supports only timezone naive datatype, got {value}.\")\n\n return f\"'{value.isoformat()}'\"\n\n def format_date(self, value: datetime.date) -> str:\n return f\"'{value.isoformat()}'\"\n\n def format_array(self, value: Sequence[Any]) -> str:\n raise NotImplementedError(f\"format_array not implemented for engine={self.engine_name}.\")\n\n def format_dict(self, value: dict[Any, Any]) -> str:\n raise NotImplementedError(f\"format_dict not implemented for engine={self.engine_name}.\")\n\n\ndef _format_parameters(params: dict[str, Any], engine: _Engine) -> dict[str, Any]:\n processed_params = {}\n\n for k, v in params.items():\n processed_params[k] = engine.format(data=v)\n\n return processed_params\n\n\n_PATTERN = re.compile(r\":([A-Za-z0-9_]+)(?![A-Za-z0-9_])\")\n\n\ndef _create_engine(engine_type: _EngineTypeLiteral) -> _Engine:\n if engine_type == \"hive\":\n return _HiveEngine()\n\n if engine_type == \"presto\":\n return _PrestoEngine()\n\n if engine_type == \"partiql\":\n return _PartiQLEngine()\n\n raise exceptions.InvalidArgumentValue(f\"Unknown engine type: {engine_type}\")\n\n\ndef _process_sql_params(sql: str, params: dict[str, Any] | None, engine_type: _EngineTypeLiteral = \"presto\") -> str:\n if params is None:\n params = {}\n\n engine = _create_engine(engine_type)\n processed_params = _format_parameters(params, engine=engine)\n\n def replace(match: re.Match) -> str: # type: ignore[type-arg]\n key = match.group(1)\n\n if key not in processed_params:\n # do not replace anything if the parameter is not provided\n return str(match.group(0))\n\n return str(processed_params[key])\n\n sql = _PATTERN.sub(replace, sql)\n\n return sql\n", "path": "awswrangler/_sql_formatter.py"}]}
| 2,547 | 230 |
gh_patches_debug_12103
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-4326
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
soundfile hook has osx/windows specific behaviour
hook-soundfile.py was added in 3.5 but it blows up on linux as follows
9727 INFO: Loading module hook "hook-soundfile.py"...
Unable to find "/home/matt/.virtualenvs/beqdesigner-entpycF3/lib/python3.7/site-packages/_soundfile_data" when adding binary and data files.
on OSX, it also fails but with a different error
ValueError: Unknown Mach-O header: 0x20202020 in <_io.BufferedReader
name='/Users/travis/build/3ll3d00d/beqdesigner/.venv/lib/python3.7/site-packages/_soundfile_data/COPYING'>
It completes successfully on Windows
The problem is that pysoundfile packages libsndfile on Windows and OSX (as per https://pysoundfile.readthedocs.io/en/0.9.0/#installation) but relies on a system package on Linux so the mentioned directory (`_soundfile_data`) will not exist on Linux. On OSX only a certain file is required (`_soundfile_data/libsndfile.dylib`)
Minimal test case can be found at https://github.com/3ll3d00d/pyinstaller-pysoundfile-bug
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-soundfile.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2016-2019, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9
10 """
11 pysoundfile:
12 https://github.com/bastibe/SoundFile
13 """
14
15 import os
16 from PyInstaller.utils.hooks import get_package_paths
17
18 # get path of soundfile
19 sfp = get_package_paths('soundfile')
20
21 # add the binaries
22 bins = os.path.join(sfp[0], "_soundfile_data")
23 binaries = [(bins, "_soundfile_data")]
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PyInstaller/hooks/hook-soundfile.py b/PyInstaller/hooks/hook-soundfile.py
--- a/PyInstaller/hooks/hook-soundfile.py
+++ b/PyInstaller/hooks/hook-soundfile.py
@@ -13,11 +13,20 @@
"""
import os
+
+from PyInstaller.compat import is_win, is_darwin
from PyInstaller.utils.hooks import get_package_paths
# get path of soundfile
sfp = get_package_paths('soundfile')
-# add the binaries
-bins = os.path.join(sfp[0], "_soundfile_data")
-binaries = [(bins, "_soundfile_data")]
+# add binaries packaged by soundfile on OSX and Windows
+# an external dependency (libsndfile) is used on GNU/Linux
+path = None
+if is_win:
+ path = os.path.join(sfp[0], '_soundfile_data')
+elif is_darwin:
+ path = os.path.join(sfp[0], '_soundfile_data', 'libsndfile.dylib')
+
+if path is not None and os.path.exists(path):
+ binaries = [(path, "_soundfile_data")]
|
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-soundfile.py b/PyInstaller/hooks/hook-soundfile.py\n--- a/PyInstaller/hooks/hook-soundfile.py\n+++ b/PyInstaller/hooks/hook-soundfile.py\n@@ -13,11 +13,20 @@\n \"\"\"\n \n import os\n+\n+from PyInstaller.compat import is_win, is_darwin\n from PyInstaller.utils.hooks import get_package_paths\n \n # get path of soundfile\n sfp = get_package_paths('soundfile')\n \n-# add the binaries\n-bins = os.path.join(sfp[0], \"_soundfile_data\")\n-binaries = [(bins, \"_soundfile_data\")]\n+# add binaries packaged by soundfile on OSX and Windows\n+# an external dependency (libsndfile) is used on GNU/Linux\n+path = None\n+if is_win:\n+ path = os.path.join(sfp[0], '_soundfile_data')\n+elif is_darwin:\n+ path = os.path.join(sfp[0], '_soundfile_data', 'libsndfile.dylib')\n+\n+if path is not None and os.path.exists(path):\n+ binaries = [(path, \"_soundfile_data\")]\n", "issue": "soundfile hook has osx/windows specific behaviour\nhook-soundfile.py was added in 3.5 but it blows up on linux as follows\r\n\r\n 9727 INFO: Loading module hook \"hook-soundfile.py\"...\r\n Unable to find \"/home/matt/.virtualenvs/beqdesigner-entpycF3/lib/python3.7/site-packages/_soundfile_data\" when adding binary and data files.\r\n\r\non OSX, it also fails but with a different error\r\n\r\n ValueError: Unknown Mach-O header: 0x20202020 in <_io.BufferedReader \r\n name='/Users/travis/build/3ll3d00d/beqdesigner/.venv/lib/python3.7/site-packages/_soundfile_data/COPYING'>\r\n\r\nIt completes successfully on Windows\r\n\r\nThe problem is that pysoundfile packages libsndfile on Windows and OSX (as per https://pysoundfile.readthedocs.io/en/0.9.0/#installation) but relies on a system package on Linux so the mentioned directory (`_soundfile_data`) will not exist on Linux. On OSX only a certain file is required (`_soundfile_data/libsndfile.dylib`)\r\n\r\nMinimal test case can be found at https://github.com/3ll3d00d/pyinstaller-pysoundfile-bug\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2016-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\"\"\"\npysoundfile:\nhttps://github.com/bastibe/SoundFile\n\"\"\"\n\nimport os\nfrom PyInstaller.utils.hooks import get_package_paths\n\n# get path of soundfile\nsfp = get_package_paths('soundfile')\n\n# add the binaries\nbins = os.path.join(sfp[0], \"_soundfile_data\")\nbinaries = [(bins, \"_soundfile_data\")]\n", "path": "PyInstaller/hooks/hook-soundfile.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2016-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\"\"\"\npysoundfile:\nhttps://github.com/bastibe/SoundFile\n\"\"\"\n\nimport os\n\nfrom PyInstaller.compat import is_win, is_darwin\nfrom PyInstaller.utils.hooks import get_package_paths\n\n# get path of soundfile\nsfp = get_package_paths('soundfile')\n\n# add binaries packaged by soundfile on OSX and Windows\n# an external dependency (libsndfile) is used on GNU/Linux\npath = None\nif is_win:\n path = os.path.join(sfp[0], '_soundfile_data')\nelif is_darwin:\n path = os.path.join(sfp[0], '_soundfile_data', 'libsndfile.dylib')\n\nif path is not None and os.path.exists(path):\n binaries = [(path, \"_soundfile_data\")]\n", "path": "PyInstaller/hooks/hook-soundfile.py"}]}
| 730 | 257 |
gh_patches_debug_14260
|
rasdani/github-patches
|
git_diff
|
kornia__kornia-2132
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Round trip `torch.save` / `torch.load` not working for `Hyperplane`
### Describe the bug
Crashing when trying to save and reload a `Hyperplane` using `torch`:
```
Traceback (most recent call last):
File "save_hyperplane.py", line 16, in <module>
plane = torch.load("./saved_plane.pt")
File "/home/kyle/venv/lib/python3.8/site-packages/torch/serialization.py", line 789, in load
return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
File "/home/kyle/venv/lib/python3.8/site-packages/torch/serialization.py", line 1131, in _load
result = unpickler.load()
File "/home/kyle/venv/lib/python3.8/site-packages/kornia/core/tensor_wrapper.py", line 48, in __getattr__
self.used_attrs.add(name)
File "/home/kyle/venv/lib/python3.8/site-packages/kornia/core/tensor_wrapper.py", line 48, in __getattr__
self.used_attrs.add(name)
File "/home/kyle/venv/lib/python3.8/site-packages/kornia/core/tensor_wrapper.py", line 48, in __getattr__
self.used_attrs.add(name)
[Previous line repeated 993 more times]
RecursionError: maximum recursion depth exceeded
```
### Reproduction steps
```bash
$ python3 -m venv venv
$ source venv/bin/activate
(venv) $ pip install kornia
(venv) $ pip install numpy
(venv) $ python save_hyperplane.py
```
Content of save_hyperplane.py
```
from kornia.geometry.plane import Hyperplane
from kornia.geometry.vector import Vec3, Scalar
import torch
plane = Hyperplane(
Vec3(torch.tensor([0,0,1])), Scalar(torch.tensor(0.5))
)
torch.save(plane, "./saved_plane.pt")
plane = torch.load("./saved_plane.pt")
```
### Expected behavior
Hyperplane is able to be saved and reloaded with `torch.save` & `torch.load`
### Environment
```shell
Collecting environment information...
PyTorch version: 1.13.1+cu117
Is debug build: False
CUDA used to build PyTorch: 11.7
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.5 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
Clang version: Could not collect
CMake version: version 3.16.3
Libc version: glibc-2.31
Python version: 3.8.10 (default, Nov 14 2022, 12:59:47) [GCC 9.4.0] (64-bit runtime)
Python platform: Linux-5.15.0-56-generic-x86_64-with-glibc2.29
Is CUDA available: True
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration: GPU 0: NVIDIA GeForce RTX 3080 Laptop GPU
Nvidia driver version: 470.141.03
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] numpy==1.24.1
[pip3] torch==1.13.1
[conda] Could not collect
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/core/tensor_wrapper.py`
Content:
```
1 # pytorch tensor wrapper class
2 # insipired by:
3 # https://github.com/pytorch/pytorch/blob/591dfffa38848de54b7f5f4e49260847024c9281/test/test_overrides.py#L748
4 import collections
5 from typing import Any
6
7 import torch
8 from torch import Tensor
9
10 # wrap inputs if necessary
11
12 # TODO: promote to KORNIA_WRAP
13
14
15 def wrap(v, cls):
16 if type(v) in {tuple, list}:
17 return type(v)(wrap(vi, cls) for vi in v)
18
19 return cls(v) if isinstance(v, Tensor) else v
20
21
22 # TODO: promote to KORNIA_UNWRAP
23
24
25 def unwrap(v):
26 if type(v) in {tuple, list}:
27 return type(v)(unwrap(vi) for vi in v)
28
29 return v._data if not isinstance(v, Tensor) else v
30
31
32 class TensorWrapper:
33 def __init__(self, data: Tensor) -> None:
34 self.__dict__["_data"] = data
35 self.__dict__["used_attrs"] = set()
36 self.__dict__["used_calls"] = set()
37
38 @property
39 def data(self) -> Tensor:
40 return self._data
41
42 def __repr__(self) -> str:
43 return f"{self.data}"
44
45 def __getattr__(self, name):
46 if name in self.__dict__:
47 return self.__dict__[name]
48 self.used_attrs.add(name)
49
50 val = getattr(self._data, name)
51
52 # NOTE: not clear is really needed
53 # If it's a method
54 # if callable(val):
55 # c = getattr(type(self._data), name)
56 # # Don't append self to args if classmethod/staticmethod
57 # if c is val:
58 # return lambda *a, **kw: wrap(self.__torch_function__(c, (type(self),), args=a, kwargs=kw), type(self))
59 # # Otherwise append self to args
60 # return lambda *a, **kw: wrap(
61 # #self.__torch_function__(c, (type(self),), args=(self,) + a, kwargs=kw), type(self)
62 # )
63
64 return wrap(val, type(self))
65
66 def __setattr__(self, name, value):
67 if name in self.__dict__:
68 self.__dict__[name] = value
69
70 self.used_attrs.add(name)
71 setattr(self._data, name, value)
72
73 def __setitem__(self, key, value):
74 self._data[key] = value
75
76 def __getitem__(self, key):
77 return wrap(self._data[key], type(self))
78
79 @classmethod
80 def __torch_function__(cls, func, types, args=(), kwargs=None):
81 if kwargs is None:
82 kwargs = {}
83 # Find an instance of this class in the arguments
84 args_of_this_cls = []
85 for a in args:
86 if isinstance(a, cls):
87 args_of_this_cls.append(a)
88 elif isinstance(a, collections.abc.Sequence):
89 args_of_this_cls.extend(el for el in a if isinstance(el, cls))
90 # assert len(args_of_this_cls) > 0
91 for a in args_of_this_cls:
92 a.used_calls.add(func)
93 args = unwrap(tuple(args))
94 kwargs = {k: unwrap(v) for k, v in kwargs.items()}
95
96 return wrap(func(*args, **kwargs), cls)
97
98 # TODO: `def __add__(self, other) -> Self:` when mypy release >0.991
99 def __add__(self, other):
100 return self.__unary_op__(torch.add, other)
101
102 def __mul__(self, other):
103 return self.__unary_op__(torch.mul, other)
104
105 def __sub__(self, other):
106 return self.__unary_op__(torch.sub, other)
107
108 def __truediv__(self, other):
109 return self.__unary_op__(torch.true_divide, other)
110
111 def __floordiv__(self, other):
112 return self.__unary_op__(torch.floor_divide, other)
113
114 def __ge__(self, other):
115 return self.__unary_op__(torch.ge, other)
116
117 def __gt__(self, other):
118 return self.__unary_op__(torch.gt, other)
119
120 def __lt__(self, other):
121 return self.__unary_op__(torch.lt, other)
122
123 def __le__(self, other):
124 return self.__unary_op__(torch.le, other)
125
126 def __eq__(self, other):
127 return self.__unary_op__(torch.eq, other)
128
129 def __ne__(self, other):
130 return self.__unary_op__(torch.ne, other)
131
132 def __bool__(self):
133 return self.__unary_op__(Tensor.__bool__)
134
135 def __int__(self):
136 return self.__unary_op__(Tensor.__int__)
137
138 def __neg__(self):
139 return self.__unary_op__(Tensor.negative)
140
141 def __unary_op__(self, func: Any, other=None):
142 args = (self, other) if other is not None else (self,)
143 return self.__torch_function__(func, (type(self),), args)
144
145 def __len__(self):
146 return len(self._data)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kornia/core/tensor_wrapper.py b/kornia/core/tensor_wrapper.py
--- a/kornia/core/tensor_wrapper.py
+++ b/kornia/core/tensor_wrapper.py
@@ -35,15 +35,22 @@
self.__dict__["used_attrs"] = set()
self.__dict__["used_calls"] = set()
- @property
- def data(self) -> Tensor:
- return self._data
+ def unwrap(self):
+ return unwrap(self)
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
def __repr__(self) -> str:
- return f"{self.data}"
+ return f"{self._data}"
def __getattr__(self, name):
- if name in self.__dict__:
+ if name == "data":
+ return self._data
+ elif name in self.__dict__:
return self.__dict__[name]
self.used_attrs.add(name)
|
{"golden_diff": "diff --git a/kornia/core/tensor_wrapper.py b/kornia/core/tensor_wrapper.py\n--- a/kornia/core/tensor_wrapper.py\n+++ b/kornia/core/tensor_wrapper.py\n@@ -35,15 +35,22 @@\n self.__dict__[\"used_attrs\"] = set()\n self.__dict__[\"used_calls\"] = set()\n \n- @property\n- def data(self) -> Tensor:\n- return self._data\n+ def unwrap(self):\n+ return unwrap(self)\n+\n+ def __getstate__(self):\n+ return self.__dict__\n+\n+ def __setstate__(self, state):\n+ self.__dict__.update(state)\n \n def __repr__(self) -> str:\n- return f\"{self.data}\"\n+ return f\"{self._data}\"\n \n def __getattr__(self, name):\n- if name in self.__dict__:\n+ if name == \"data\":\n+ return self._data\n+ elif name in self.__dict__:\n return self.__dict__[name]\n self.used_attrs.add(name)\n", "issue": "Round trip `torch.save` / `torch.load` not working for `Hyperplane`\n### Describe the bug\r\n\r\nCrashing when trying to save and reload a `Hyperplane` using `torch`:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"save_hyperplane.py\", line 16, in <module>\r\n plane = torch.load(\"./saved_plane.pt\")\r\n File \"/home/kyle/venv/lib/python3.8/site-packages/torch/serialization.py\", line 789, in load\r\n return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)\r\n File \"/home/kyle/venv/lib/python3.8/site-packages/torch/serialization.py\", line 1131, in _load\r\n result = unpickler.load()\r\n File \"/home/kyle/venv/lib/python3.8/site-packages/kornia/core/tensor_wrapper.py\", line 48, in __getattr__\r\n self.used_attrs.add(name)\r\n File \"/home/kyle/venv/lib/python3.8/site-packages/kornia/core/tensor_wrapper.py\", line 48, in __getattr__\r\n self.used_attrs.add(name)\r\n File \"/home/kyle/venv/lib/python3.8/site-packages/kornia/core/tensor_wrapper.py\", line 48, in __getattr__\r\n self.used_attrs.add(name)\r\n [Previous line repeated 993 more times]\r\nRecursionError: maximum recursion depth exceeded\r\n```\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\n$ python3 -m venv venv\r\n$ source venv/bin/activate\r\n(venv) $ pip install kornia\r\n(venv) $ pip install numpy\r\n(venv) $ python save_hyperplane.py\r\n```\r\n\r\nContent of save_hyperplane.py\r\n\r\n```\r\nfrom kornia.geometry.plane import Hyperplane\r\nfrom kornia.geometry.vector import Vec3, Scalar\r\nimport torch\r\n\r\nplane = Hyperplane(\r\n Vec3(torch.tensor([0,0,1])), Scalar(torch.tensor(0.5))\r\n)\r\n\r\ntorch.save(plane, \"./saved_plane.pt\")\r\nplane = torch.load(\"./saved_plane.pt\")\r\n```\r\n\r\n### Expected behavior\r\n\r\nHyperplane is able to be saved and reloaded with `torch.save` & `torch.load` \r\n\r\n### Environment\r\n\r\n```shell\r\nCollecting environment information...\r\nPyTorch version: 1.13.1+cu117\r\nIs debug build: False\r\nCUDA used to build PyTorch: 11.7\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Ubuntu 20.04.5 LTS (x86_64)\r\nGCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\r\nClang version: Could not collect\r\nCMake version: version 3.16.3\r\nLibc version: glibc-2.31\r\n\r\nPython version: 3.8.10 (default, Nov 14 2022, 12:59:47) [GCC 9.4.0] (64-bit runtime)\r\nPython platform: Linux-5.15.0-56-generic-x86_64-with-glibc2.29\r\nIs CUDA available: True\r\nCUDA runtime version: Could not collect\r\nCUDA_MODULE_LOADING set to: LAZY\r\nGPU models and configuration: GPU 0: NVIDIA GeForce RTX 3080 Laptop GPU\r\nNvidia driver version: 470.141.03\r\ncuDNN version: Could not collect\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.24.1\r\n[pip3] torch==1.13.1\r\n[conda] Could not collect\r\n```\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "# pytorch tensor wrapper class\n# insipired by:\n# https://github.com/pytorch/pytorch/blob/591dfffa38848de54b7f5f4e49260847024c9281/test/test_overrides.py#L748\nimport collections\nfrom typing import Any\n\nimport torch\nfrom torch import Tensor\n\n# wrap inputs if necessary\n\n# TODO: promote to KORNIA_WRAP\n\n\ndef wrap(v, cls):\n if type(v) in {tuple, list}:\n return type(v)(wrap(vi, cls) for vi in v)\n\n return cls(v) if isinstance(v, Tensor) else v\n\n\n# TODO: promote to KORNIA_UNWRAP\n\n\ndef unwrap(v):\n if type(v) in {tuple, list}:\n return type(v)(unwrap(vi) for vi in v)\n\n return v._data if not isinstance(v, Tensor) else v\n\n\nclass TensorWrapper:\n def __init__(self, data: Tensor) -> None:\n self.__dict__[\"_data\"] = data\n self.__dict__[\"used_attrs\"] = set()\n self.__dict__[\"used_calls\"] = set()\n\n @property\n def data(self) -> Tensor:\n return self._data\n\n def __repr__(self) -> str:\n return f\"{self.data}\"\n\n def __getattr__(self, name):\n if name in self.__dict__:\n return self.__dict__[name]\n self.used_attrs.add(name)\n\n val = getattr(self._data, name)\n\n # NOTE: not clear is really needed\n # If it's a method\n # if callable(val):\n # c = getattr(type(self._data), name)\n # # Don't append self to args if classmethod/staticmethod\n # if c is val:\n # return lambda *a, **kw: wrap(self.__torch_function__(c, (type(self),), args=a, kwargs=kw), type(self))\n # # Otherwise append self to args\n # return lambda *a, **kw: wrap(\n # #self.__torch_function__(c, (type(self),), args=(self,) + a, kwargs=kw), type(self)\n # )\n\n return wrap(val, type(self))\n\n def __setattr__(self, name, value):\n if name in self.__dict__:\n self.__dict__[name] = value\n\n self.used_attrs.add(name)\n setattr(self._data, name, value)\n\n def __setitem__(self, key, value):\n self._data[key] = value\n\n def __getitem__(self, key):\n return wrap(self._data[key], type(self))\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n if kwargs is None:\n kwargs = {}\n # Find an instance of this class in the arguments\n args_of_this_cls = []\n for a in args:\n if isinstance(a, cls):\n args_of_this_cls.append(a)\n elif isinstance(a, collections.abc.Sequence):\n args_of_this_cls.extend(el for el in a if isinstance(el, cls))\n # assert len(args_of_this_cls) > 0\n for a in args_of_this_cls:\n a.used_calls.add(func)\n args = unwrap(tuple(args))\n kwargs = {k: unwrap(v) for k, v in kwargs.items()}\n\n return wrap(func(*args, **kwargs), cls)\n\n # TODO: `def __add__(self, other) -> Self:` when mypy release >0.991\n def __add__(self, other):\n return self.__unary_op__(torch.add, other)\n\n def __mul__(self, other):\n return self.__unary_op__(torch.mul, other)\n\n def __sub__(self, other):\n return self.__unary_op__(torch.sub, other)\n\n def __truediv__(self, other):\n return self.__unary_op__(torch.true_divide, other)\n\n def __floordiv__(self, other):\n return self.__unary_op__(torch.floor_divide, other)\n\n def __ge__(self, other):\n return self.__unary_op__(torch.ge, other)\n\n def __gt__(self, other):\n return self.__unary_op__(torch.gt, other)\n\n def __lt__(self, other):\n return self.__unary_op__(torch.lt, other)\n\n def __le__(self, other):\n return self.__unary_op__(torch.le, other)\n\n def __eq__(self, other):\n return self.__unary_op__(torch.eq, other)\n\n def __ne__(self, other):\n return self.__unary_op__(torch.ne, other)\n\n def __bool__(self):\n return self.__unary_op__(Tensor.__bool__)\n\n def __int__(self):\n return self.__unary_op__(Tensor.__int__)\n\n def __neg__(self):\n return self.__unary_op__(Tensor.negative)\n\n def __unary_op__(self, func: Any, other=None):\n args = (self, other) if other is not None else (self,)\n return self.__torch_function__(func, (type(self),), args)\n\n def __len__(self):\n return len(self._data)\n", "path": "kornia/core/tensor_wrapper.py"}], "after_files": [{"content": "# pytorch tensor wrapper class\n# insipired by:\n# https://github.com/pytorch/pytorch/blob/591dfffa38848de54b7f5f4e49260847024c9281/test/test_overrides.py#L748\nimport collections\nfrom typing import Any\n\nimport torch\nfrom torch import Tensor\n\n# wrap inputs if necessary\n\n# TODO: promote to KORNIA_WRAP\n\n\ndef wrap(v, cls):\n if type(v) in {tuple, list}:\n return type(v)(wrap(vi, cls) for vi in v)\n\n return cls(v) if isinstance(v, Tensor) else v\n\n\n# TODO: promote to KORNIA_UNWRAP\n\n\ndef unwrap(v):\n if type(v) in {tuple, list}:\n return type(v)(unwrap(vi) for vi in v)\n\n return v._data if not isinstance(v, Tensor) else v\n\n\nclass TensorWrapper:\n def __init__(self, data: Tensor) -> None:\n self.__dict__[\"_data\"] = data\n self.__dict__[\"used_attrs\"] = set()\n self.__dict__[\"used_calls\"] = set()\n\n def unwrap(self):\n return unwrap(self)\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n def __repr__(self) -> str:\n return f\"{self._data}\"\n\n def __getattr__(self, name):\n if name == \"data\":\n return self._data\n elif name in self.__dict__:\n return self.__dict__[name]\n self.used_attrs.add(name)\n\n val = getattr(self._data, name)\n\n # NOTE: not clear is really needed\n # If it's a method\n # if callable(val):\n # c = getattr(type(self._data), name)\n # # Don't append self to args if classmethod/staticmethod\n # if c is val:\n # return lambda *a, **kw: wrap(self.__torch_function__(c, (type(self),), args=a, kwargs=kw), type(self))\n # # Otherwise append self to args\n # return lambda *a, **kw: wrap(\n # #self.__torch_function__(c, (type(self),), args=(self,) + a, kwargs=kw), type(self)\n # )\n\n return wrap(val, type(self))\n\n def __setattr__(self, name, value):\n if name in self.__dict__:\n self.__dict__[name] = value\n\n self.used_attrs.add(name)\n setattr(self._data, name, value)\n\n def __setitem__(self, key, value):\n self._data[key] = value\n\n def __getitem__(self, key):\n return wrap(self._data[key], type(self))\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n if kwargs is None:\n kwargs = {}\n # Find an instance of this class in the arguments\n args_of_this_cls = []\n for a in args:\n if isinstance(a, cls):\n args_of_this_cls.append(a)\n elif isinstance(a, collections.abc.Sequence):\n args_of_this_cls.extend(el for el in a if isinstance(el, cls))\n # assert len(args_of_this_cls) > 0\n for a in args_of_this_cls:\n a.used_calls.add(func)\n args = unwrap(tuple(args))\n kwargs = {k: unwrap(v) for k, v in kwargs.items()}\n\n return wrap(func(*args, **kwargs), cls)\n\n # TODO: `def __add__(self, other) -> Self:` when mypy release >0.991\n def __add__(self, other):\n return self.__unary_op__(torch.add, other)\n\n def __mul__(self, other):\n return self.__unary_op__(torch.mul, other)\n\n def __sub__(self, other):\n return self.__unary_op__(torch.sub, other)\n\n def __truediv__(self, other):\n return self.__unary_op__(torch.true_divide, other)\n\n def __floordiv__(self, other):\n return self.__unary_op__(torch.floor_divide, other)\n\n def __ge__(self, other):\n return self.__unary_op__(torch.ge, other)\n\n def __gt__(self, other):\n return self.__unary_op__(torch.gt, other)\n\n def __lt__(self, other):\n return self.__unary_op__(torch.lt, other)\n\n def __le__(self, other):\n return self.__unary_op__(torch.le, other)\n\n def __eq__(self, other):\n return self.__unary_op__(torch.eq, other)\n\n def __ne__(self, other):\n return self.__unary_op__(torch.ne, other)\n\n def __bool__(self):\n return self.__unary_op__(Tensor.__bool__)\n\n def __int__(self):\n return self.__unary_op__(Tensor.__int__)\n\n def __neg__(self):\n return self.__unary_op__(Tensor.negative)\n\n def __unary_op__(self, func: Any, other=None):\n args = (self, other) if other is not None else (self,)\n return self.__torch_function__(func, (type(self),), args)\n\n def __len__(self):\n return len(self._data)\n", "path": "kornia/core/tensor_wrapper.py"}]}
| 2,627 | 240 |
gh_patches_debug_12208
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-118
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ICS Scraper error
My Home-Assistant has the following error:
`2021-12-02 06:53:42 ERROR (SyncWorker_19) [waste_collection_schedule.scraper] fetch failed for source <waste_collection_schedule.source.ics.Source object at 0x7f3785601ca0>: 'NoneType' object does not support item assignment`
I have events for this year, but not for the next. I know, that the ICS scraper is trying to get events for the next year, when it's december. I don't know if this years events are cached or not. But I assume that the issue is because of the next year scraping. With this in mind I searched the source code.
My Python is quite bad, but I assume, that the problem is in line [162](https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py#L162). I think this line should look like the block [151-155](https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py#L151-L155).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py`
Content:
```
1 import datetime
2 import logging
3 from pathlib import Path
4
5 import requests
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7 from waste_collection_schedule.service.ICS import ICS
8
9 TITLE = "ICS"
10 DESCRIPTION = "Source for ICS based schedules."
11 URL = None
12 TEST_CASES = {
13 "Dortmund, Dudenstr. 5": {
14 "url": "https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4"
15 },
16 "Leipzig, Sandgrubenweg 27": {
17 "url": "https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027"
18 },
19 "Ludwigsburg": {
20 "url": "https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics"
21 },
22 "Esslingen, Bahnhof": {
23 "url": "https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe"
24 },
25 "Test File": {
26 # Path is used here to allow to call the Source from any location.
27 # This is not required in a yaml configuration!
28 "file": str(Path(__file__).resolve().parents[1].joinpath("test/test.ics"))
29 },
30 "Test File (recurring)": {
31 # Path is used here to allow to call the Source from any location.
32 # This is not required in a yaml configuration!
33 "file": str(Path(__file__).resolve().parents[1].joinpath("test/recurring.ics"))
34 },
35 "München, Bahnstr. 11": {
36 "url": "https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}"
37 },
38 "Buxtehude, Am Berg": {
39 "url": "https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics"
40 },
41 # "Hausmüllinfo: ASR Chemnitz": {
42 # "url": "https://asc.hausmuell.info/ics/ics.php",
43 # "method": "POST",
44 # "params": {
45 # "hidden_id_egebiet": 439087,
46 # "input_ort": "Chemnitz",
47 # "input_str": "Straße der Nationen",
48 # "input_hnr": 2,
49 # "hidden_send_btn": "ics",
50 # # "hiddenYear": 2021,
51 # "hidden_id_ort": 10,
52 # "hidden_id_ortsteil": 0,
53 # "hidden_id_str": 17814,
54 # "hidden_id_hnr": 5538100,
55 # "hidden_kalenderart": "privat",
56 # "showBinsBio": "on",
57 # "showBinsRest": "on",
58 # "showBinsRest_rc": "on",
59 # "showBinsPapier": "on",
60 # "showBinsOrganic": "on",
61 # "showBinsXmas": "on",
62 # "showBinsDsd": "on",
63 # "showBinsProb": "on",
64 # },
65 # "year_field": "hiddenYear",
66 # },
67 "Abfall Zollernalbkreis, Ebingen": {
68 "url": "https://www.abfallkalender-zak.de",
69 "params": {
70 "city": "2,3,4",
71 "street": "3",
72 "types[]": [
73 "restmuell",
74 "gelbersack",
75 "papiertonne",
76 "biomuell",
77 "gruenabfall",
78 "schadstoffsammlung",
79 "altpapiersammlung",
80 "schrottsammlung",
81 "weihnachtsbaeume",
82 "elektrosammlung",
83 ],
84 "go_ics": "Download",
85 },
86 "year_field": "year",
87 },
88 "Detmold": {
89 "url": "https://abfuhrkalender.detmold.de/icsmaker.php",
90 "method": "GET",
91 "params": {"strid": 338},
92 "year_field": "year",
93 },
94 "EAW Rheingau Taunus": {
95 "url": "https://www.eaw-rheingau-taunus.de/abfallkalender/calendar.ics?streetid=1429",
96 "split_at": ",",
97 },
98 "Recollect, Ottawa": {
99 "url": "https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics",
100 "split_at": "\\, [and ]*",
101 },
102 "Frankfurt am Main, Achenbachstrasse 3": {
103 "url": "https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics"
104 },
105 "Erlensee, Am Haspel": {
106 "url": "https://sperrmuell.erlensee.de/?type=reminder",
107 "method": "POST",
108 "params": {
109 "street": 8,
110 "eventType[]": [27, 23, 19, 20, 21, 24, 22, 25, 26],
111 "timeframe": 23,
112 "download": "ical",
113 },
114 },
115 }
116
117
118 HEADERS = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}
119 _LOGGER = logging.getLogger(__name__)
120
121
122 class Source:
123 def __init__(
124 self,
125 url=None,
126 file=None,
127 offset=None,
128 params=None,
129 year_field=None,
130 method="GET",
131 split_at=None,
132 ):
133 self._url = url
134 self._file = file
135 if bool(self._url is not None) == bool(self._file is not None):
136 raise RuntimeError("Specify either url or file")
137 self._ics = ICS(offset=offset, split_at=split_at)
138 self._params = params
139 self._year_field = year_field # replace this field in params with current year
140 self._method = method # The method to send the params
141
142 def fetch(self):
143 if self._url is not None:
144 if "{%Y}" in self._url or self._year_field is not None:
145 # url contains wildcard or params contains year field
146 now = datetime.datetime.now()
147
148 # replace year in url
149 url = self._url.replace("{%Y}", str(now.year))
150
151 # replace year in params
152 if self._year_field is not None:
153 if self._params is None:
154 raise RuntimeError("year_field specified without params")
155 self._params[self._year_field] = str(now.year)
156
157 entries = self.fetch_url(url, self._params)
158
159 if now.month == 12:
160 # also get data for next year if we are already in december
161 url = self._url.replace("{%Y}", str(now.year + 1))
162 self._params[self._year_field] = str(now.year + 1)
163
164 try:
165 entries.extend(self.fetch_url(url), self._params)
166 except Exception:
167 # ignore if fetch for next year fails
168 pass
169 return entries
170 else:
171 return self.fetch_url(self._url, self._params)
172 elif self._file is not None:
173 return self.fetch_file(self._file)
174
175 def fetch_url(self, url, params=None):
176 # get ics file
177 if self._method == "GET":
178 r = requests.get(url, params=params, headers=HEADERS)
179 elif self._method == "POST":
180 r = requests.post(url, data=params, headers=HEADERS)
181 else:
182 _LOGGER.error(
183 "Error: unknown method to fetch URL, use GET or POST; got %s"
184 % self._method
185 )
186 return "error"
187 r.encoding = "utf-8" # requests doesn't guess the encoding correctly
188
189 # check the return code
190 if not r.ok:
191 _LOGGER.error(
192 "Error: the response is not ok; need code 200, but got code %s"
193 % r.status_code
194 )
195 return "error"
196
197 return self._convert(r.text)
198
199 def fetch_file(self, file):
200 f = open(file)
201 return self._convert(f.read())
202
203 def _convert(self, data):
204 dates = self._ics.convert(data)
205
206 entries = []
207 for d in dates:
208 entries.append(Collection(d[0], d[1]))
209 return entries
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py
@@ -159,7 +159,8 @@
if now.month == 12:
# also get data for next year if we are already in december
url = self._url.replace("{%Y}", str(now.year + 1))
- self._params[self._year_field] = str(now.year + 1)
+ if self._year_field is not None:
+ self._params[self._year_field] = str(now.year + 1)
try:
entries.extend(self.fetch_url(url), self._params)
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n@@ -159,7 +159,8 @@\n if now.month == 12:\n # also get data for next year if we are already in december\n url = self._url.replace(\"{%Y}\", str(now.year + 1))\n- self._params[self._year_field] = str(now.year + 1)\n+ if self._year_field is not None:\n+ self._params[self._year_field] = str(now.year + 1)\n \n try:\n entries.extend(self.fetch_url(url), self._params)\n", "issue": "ICS Scraper error\nMy Home-Assistant has the following error:\r\n\r\n`2021-12-02 06:53:42 ERROR (SyncWorker_19) [waste_collection_schedule.scraper] fetch failed for source <waste_collection_schedule.source.ics.Source object at 0x7f3785601ca0>: 'NoneType' object does not support item assignment`\r\n\r\nI have events for this year, but not for the next. I know, that the ICS scraper is trying to get events for the next year, when it's december. I don't know if this years events are cached or not. But I assume that the issue is because of the next year scraping. With this in mind I searched the source code.\r\nMy Python is quite bad, but I assume, that the problem is in line [162](https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py#L162). I think this line should look like the block [151-155](https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py#L151-L155).\n", "before_files": [{"content": "import datetime\nimport logging\nfrom pathlib import Path\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"ICS\"\nDESCRIPTION = \"Source for ICS based schedules.\"\nURL = None\nTEST_CASES = {\n \"Dortmund, Dudenstr. 5\": {\n \"url\": \"https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4\"\n },\n \"Leipzig, Sandgrubenweg 27\": {\n \"url\": \"https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027\"\n },\n \"Ludwigsburg\": {\n \"url\": \"https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics\"\n },\n \"Esslingen, Bahnhof\": {\n \"url\": \"https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe\"\n },\n \"Test File\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/test.ics\"))\n },\n \"Test File (recurring)\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/recurring.ics\"))\n },\n \"M\u00fcnchen, Bahnstr. 11\": {\n \"url\": \"https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}\"\n },\n \"Buxtehude, Am Berg\": {\n \"url\": \"https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics\"\n },\n # \"Hausm\u00fcllinfo: ASR Chemnitz\": {\n # \"url\": \"https://asc.hausmuell.info/ics/ics.php\",\n # \"method\": \"POST\",\n # \"params\": {\n # \"hidden_id_egebiet\": 439087,\n # \"input_ort\": \"Chemnitz\",\n # \"input_str\": \"Stra\u00dfe der Nationen\",\n # \"input_hnr\": 2,\n # \"hidden_send_btn\": \"ics\",\n # # \"hiddenYear\": 2021,\n # \"hidden_id_ort\": 10,\n # \"hidden_id_ortsteil\": 0,\n # \"hidden_id_str\": 17814,\n # \"hidden_id_hnr\": 5538100,\n # \"hidden_kalenderart\": \"privat\",\n # \"showBinsBio\": \"on\",\n # \"showBinsRest\": \"on\",\n # \"showBinsRest_rc\": \"on\",\n # \"showBinsPapier\": \"on\",\n # \"showBinsOrganic\": \"on\",\n # \"showBinsXmas\": \"on\",\n # \"showBinsDsd\": \"on\",\n # \"showBinsProb\": \"on\",\n # },\n # \"year_field\": \"hiddenYear\",\n # },\n \"Abfall Zollernalbkreis, Ebingen\": {\n \"url\": \"https://www.abfallkalender-zak.de\",\n \"params\": {\n \"city\": \"2,3,4\",\n \"street\": \"3\",\n \"types[]\": [\n \"restmuell\",\n \"gelbersack\",\n \"papiertonne\",\n \"biomuell\",\n \"gruenabfall\",\n \"schadstoffsammlung\",\n \"altpapiersammlung\",\n \"schrottsammlung\",\n \"weihnachtsbaeume\",\n \"elektrosammlung\",\n ],\n \"go_ics\": \"Download\",\n },\n \"year_field\": \"year\",\n },\n \"Detmold\": {\n \"url\": \"https://abfuhrkalender.detmold.de/icsmaker.php\",\n \"method\": \"GET\",\n \"params\": {\"strid\": 338},\n \"year_field\": \"year\",\n },\n \"EAW Rheingau Taunus\": {\n \"url\": \"https://www.eaw-rheingau-taunus.de/abfallkalender/calendar.ics?streetid=1429\",\n \"split_at\": \",\",\n },\n \"Recollect, Ottawa\": {\n \"url\": \"https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics\",\n \"split_at\": \"\\\\, [and ]*\",\n },\n \"Frankfurt am Main, Achenbachstrasse 3\": {\n \"url\": \"https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics\"\n },\n \"Erlensee, Am Haspel\": {\n \"url\": \"https://sperrmuell.erlensee.de/?type=reminder\",\n \"method\": \"POST\",\n \"params\": {\n \"street\": 8,\n \"eventType[]\": [27, 23, 19, 20, 21, 24, 22, 25, 26],\n \"timeframe\": 23,\n \"download\": \"ical\",\n },\n },\n}\n\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"}\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self,\n url=None,\n file=None,\n offset=None,\n params=None,\n year_field=None,\n method=\"GET\",\n split_at=None,\n ):\n self._url = url\n self._file = file\n if bool(self._url is not None) == bool(self._file is not None):\n raise RuntimeError(\"Specify either url or file\")\n self._ics = ICS(offset=offset, split_at=split_at)\n self._params = params\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n\n def fetch(self):\n if self._url is not None:\n if \"{%Y}\" in self._url or self._year_field is not None:\n # url contains wildcard or params contains year field\n now = datetime.datetime.now()\n\n # replace year in url\n url = self._url.replace(\"{%Y}\", str(now.year))\n\n # replace year in params\n if self._year_field is not None:\n if self._params is None:\n raise RuntimeError(\"year_field specified without params\")\n self._params[self._year_field] = str(now.year)\n\n entries = self.fetch_url(url, self._params)\n\n if now.month == 12:\n # also get data for next year if we are already in december\n url = self._url.replace(\"{%Y}\", str(now.year + 1))\n self._params[self._year_field] = str(now.year + 1)\n\n try:\n entries.extend(self.fetch_url(url), self._params)\n except Exception:\n # ignore if fetch for next year fails\n pass\n return entries\n else:\n return self.fetch_url(self._url, self._params)\n elif self._file is not None:\n return self.fetch_file(self._file)\n\n def fetch_url(self, url, params=None):\n # get ics file\n if self._method == \"GET\":\n r = requests.get(url, params=params, headers=HEADERS)\n elif self._method == \"POST\":\n r = requests.post(url, data=params, headers=HEADERS)\n else:\n _LOGGER.error(\n \"Error: unknown method to fetch URL, use GET or POST; got %s\"\n % self._method\n )\n return \"error\"\n r.encoding = \"utf-8\" # requests doesn't guess the encoding correctly\n\n # check the return code\n if not r.ok:\n _LOGGER.error(\n \"Error: the response is not ok; need code 200, but got code %s\"\n % r.status_code\n )\n return \"error\"\n\n return self._convert(r.text)\n\n def fetch_file(self, file):\n f = open(file)\n return self._convert(f.read())\n\n def _convert(self, data):\n dates = self._ics.convert(data)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py"}], "after_files": [{"content": "import datetime\nimport logging\nfrom pathlib import Path\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"ICS\"\nDESCRIPTION = \"Source for ICS based schedules.\"\nURL = None\nTEST_CASES = {\n \"Dortmund, Dudenstr. 5\": {\n \"url\": \"https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4\"\n },\n \"Leipzig, Sandgrubenweg 27\": {\n \"url\": \"https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027\"\n },\n \"Ludwigsburg\": {\n \"url\": \"https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics\"\n },\n \"Esslingen, Bahnhof\": {\n \"url\": \"https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe\"\n },\n \"Test File\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/test.ics\"))\n },\n \"Test File (recurring)\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/recurring.ics\"))\n },\n \"M\u00fcnchen, Bahnstr. 11\": {\n \"url\": \"https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}\"\n },\n \"Buxtehude, Am Berg\": {\n \"url\": \"https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics\"\n },\n # \"Hausm\u00fcllinfo: ASR Chemnitz\": {\n # \"url\": \"https://asc.hausmuell.info/ics/ics.php\",\n # \"method\": \"POST\",\n # \"params\": {\n # \"hidden_id_egebiet\": 439087,\n # \"input_ort\": \"Chemnitz\",\n # \"input_str\": \"Stra\u00dfe der Nationen\",\n # \"input_hnr\": 2,\n # \"hidden_send_btn\": \"ics\",\n # # \"hiddenYear\": 2021,\n # \"hidden_id_ort\": 10,\n # \"hidden_id_ortsteil\": 0,\n # \"hidden_id_str\": 17814,\n # \"hidden_id_hnr\": 5538100,\n # \"hidden_kalenderart\": \"privat\",\n # \"showBinsBio\": \"on\",\n # \"showBinsRest\": \"on\",\n # \"showBinsRest_rc\": \"on\",\n # \"showBinsPapier\": \"on\",\n # \"showBinsOrganic\": \"on\",\n # \"showBinsXmas\": \"on\",\n # \"showBinsDsd\": \"on\",\n # \"showBinsProb\": \"on\",\n # },\n # \"year_field\": \"hiddenYear\",\n # },\n \"Abfall Zollernalbkreis, Ebingen\": {\n \"url\": \"https://www.abfallkalender-zak.de\",\n \"params\": {\n \"city\": \"2,3,4\",\n \"street\": \"3\",\n \"types[]\": [\n \"restmuell\",\n \"gelbersack\",\n \"papiertonne\",\n \"biomuell\",\n \"gruenabfall\",\n \"schadstoffsammlung\",\n \"altpapiersammlung\",\n \"schrottsammlung\",\n \"weihnachtsbaeume\",\n \"elektrosammlung\",\n ],\n \"go_ics\": \"Download\",\n },\n \"year_field\": \"year\",\n },\n \"Detmold\": {\n \"url\": \"https://abfuhrkalender.detmold.de/icsmaker.php\",\n \"method\": \"GET\",\n \"params\": {\"strid\": 338},\n \"year_field\": \"year\",\n },\n \"EAW Rheingau Taunus\": {\n \"url\": \"https://www.eaw-rheingau-taunus.de/abfallkalender/calendar.ics?streetid=1429\",\n \"split_at\": \",\",\n },\n \"Recollect, Ottawa\": {\n \"url\": \"https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics\",\n \"split_at\": \"\\\\, [and ]*\",\n },\n \"Frankfurt am Main, Achenbachstrasse 3\": {\n \"url\": \"https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics\"\n },\n \"Erlensee, Am Haspel\": {\n \"url\": \"https://sperrmuell.erlensee.de/?type=reminder\",\n \"method\": \"POST\",\n \"params\": {\n \"street\": 8,\n \"eventType[]\": [27, 23, 19, 20, 21, 24, 22, 25, 26],\n \"timeframe\": 23,\n \"download\": \"ical\",\n },\n },\n}\n\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"}\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self,\n url=None,\n file=None,\n offset=None,\n params=None,\n year_field=None,\n method=\"GET\",\n split_at=None,\n ):\n self._url = url\n self._file = file\n if bool(self._url is not None) == bool(self._file is not None):\n raise RuntimeError(\"Specify either url or file\")\n self._ics = ICS(offset=offset, split_at=split_at)\n self._params = params\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n\n def fetch(self):\n if self._url is not None:\n if \"{%Y}\" in self._url or self._year_field is not None:\n # url contains wildcard or params contains year field\n now = datetime.datetime.now()\n\n # replace year in url\n url = self._url.replace(\"{%Y}\", str(now.year))\n\n # replace year in params\n if self._year_field is not None:\n if self._params is None:\n raise RuntimeError(\"year_field specified without params\")\n self._params[self._year_field] = str(now.year)\n\n entries = self.fetch_url(url, self._params)\n\n if now.month == 12:\n # also get data for next year if we are already in december\n url = self._url.replace(\"{%Y}\", str(now.year + 1))\n if self._year_field is not None:\n self._params[self._year_field] = str(now.year + 1)\n\n try:\n entries.extend(self.fetch_url(url), self._params)\n except Exception:\n # ignore if fetch for next year fails\n pass\n return entries\n else:\n return self.fetch_url(self._url, self._params)\n elif self._file is not None:\n return self.fetch_file(self._file)\n\n def fetch_url(self, url, params=None):\n # get ics file\n if self._method == \"GET\":\n r = requests.get(url, params=params, headers=HEADERS)\n elif self._method == \"POST\":\n r = requests.post(url, data=params, headers=HEADERS)\n else:\n _LOGGER.error(\n \"Error: unknown method to fetch URL, use GET or POST; got %s\"\n % self._method\n )\n return \"error\"\n r.encoding = \"utf-8\" # requests doesn't guess the encoding correctly\n\n # check the return code\n if not r.ok:\n _LOGGER.error(\n \"Error: the response is not ok; need code 200, but got code %s\"\n % r.status_code\n )\n return \"error\"\n\n return self._convert(r.text)\n\n def fetch_file(self, file):\n f = open(file)\n return self._convert(f.read())\n\n def _convert(self, data):\n dates = self._ics.convert(data)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py"}]}
| 3,630 | 187 |
gh_patches_debug_35619
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-616
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Uncaught 404s in record viewsets and pagination classes
## Description
The record viewset, column viewset and paignation classes regularly call `Table.objects.get(id=table_pk)`, which throws a `mathesar.models.Table.DoesNotExist: Table matching query does not exist.` when an invalid table id is passed.
To recreate, run `client.get(f'/api/v0/tables/3000/records/')`.
## Expected behavior
We should ensure that the table exists before querying, or catch the `DoesNotExist` error after querying. We should also include tests for table 404s.
This is probably best done after #488 is merged, as it includes a function to do exactly this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/pagination.py`
Content:
```
1 from collections import OrderedDict
2
3 from rest_framework.pagination import LimitOffsetPagination
4 from rest_framework.response import Response
5
6
7 class DefaultLimitOffsetPagination(LimitOffsetPagination):
8 default_limit = 50
9 max_limit = 500
10
11 def get_paginated_response(self, data):
12 return Response(OrderedDict([
13 ('count', self.count),
14 ('results', data)
15 ]))
16
17
18 class ColumnLimitOffsetPagination(DefaultLimitOffsetPagination):
19
20 def paginate_queryset(self, queryset, request, table_id):
21 self.limit = self.get_limit(request)
22 if self.limit is None:
23 self.limit = self.default_limit
24 self.offset = self.get_offset(request)
25 table = queryset.get(id=table_id)
26 self.count = len(table.sa_columns)
27 self.request = request
28 return list(table.sa_columns)[self.offset:self.offset + self.limit]
29
30
31 class TableLimitOffsetPagination(DefaultLimitOffsetPagination):
32
33 def paginate_queryset(self, queryset, request, table_id,
34 filters=[], order_by=[]):
35 self.limit = self.get_limit(request)
36 if self.limit is None:
37 self.limit = self.default_limit
38 self.offset = self.get_offset(request)
39 # TODO: Cache count value somewhere, since calculating it is expensive.
40 table = queryset.get(id=table_id)
41 self.count = table.sa_num_records(filters=filters)
42 self.request = request
43
44 return table.get_records(
45 self.limit, self.offset, filters=filters, order_by=order_by,
46 )
47
48
49 class TableLimitOffsetGroupPagination(TableLimitOffsetPagination):
50 def get_paginated_response(self, data):
51 return Response(OrderedDict([
52 ('count', self.count),
53 ('group_count', self.group_count),
54 ('results', data)
55 ]))
56
57 def paginate_queryset(self, queryset, request, table_id,
58 filters=[], order_by=[], group_count_by=[]):
59 records = super().paginate_queryset(
60 queryset, request, table_id, filters=filters, order_by=order_by
61 )
62
63 table = queryset.get(id=table_id)
64 if group_count_by:
65 group_count = table.get_group_counts(
66 group_count_by, self.limit, self.offset,
67 filters=filters, order_by=order_by
68 )
69 # Convert the tuple keys into strings so it can be converted to JSON
70 group_count = [{"values": list(cols), "count": count}
71 for cols, count in group_count.items()]
72 self.group_count = {
73 'group_count_by': group_count_by,
74 'results': group_count,
75 }
76 else:
77 self.group_count = {
78 'group_count_by': None,
79 'results': None,
80 }
81
82 return records
83
```
Path: `mathesar/api/utils.py`
Content:
```
1 from rest_framework.exceptions import NotFound
2
3 from mathesar.models import Table
4
5
6 def get_table_or_404(pk):
7 try:
8 table = Table.objects.get(id=pk)
9 except Table.DoesNotExist:
10 raise NotFound
11 return table
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/api/pagination.py b/mathesar/api/pagination.py
--- a/mathesar/api/pagination.py
+++ b/mathesar/api/pagination.py
@@ -3,6 +3,8 @@
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
+from mathesar.api.utils import get_table_or_404
+
class DefaultLimitOffsetPagination(LimitOffsetPagination):
default_limit = 50
@@ -22,7 +24,7 @@
if self.limit is None:
self.limit = self.default_limit
self.offset = self.get_offset(request)
- table = queryset.get(id=table_id)
+ table = get_table_or_404(pk=table_id)
self.count = len(table.sa_columns)
self.request = request
return list(table.sa_columns)[self.offset:self.offset + self.limit]
@@ -37,7 +39,7 @@
self.limit = self.default_limit
self.offset = self.get_offset(request)
# TODO: Cache count value somewhere, since calculating it is expensive.
- table = queryset.get(id=table_id)
+ table = get_table_or_404(pk=table_id)
self.count = table.sa_num_records(filters=filters)
self.request = request
@@ -60,7 +62,7 @@
queryset, request, table_id, filters=filters, order_by=order_by
)
- table = queryset.get(id=table_id)
+ table = get_table_or_404(pk=table_id)
if group_count_by:
group_count = table.get_group_counts(
group_count_by, self.limit, self.offset,
diff --git a/mathesar/api/utils.py b/mathesar/api/utils.py
--- a/mathesar/api/utils.py
+++ b/mathesar/api/utils.py
@@ -4,6 +4,13 @@
def get_table_or_404(pk):
+ """
+ Get table if it exists, otherwise throws a DRF NotFound error.
+ Args:
+ pk: id of table
+ Returns:
+ table: return the table based on a specific id
+ """
try:
table = Table.objects.get(id=pk)
except Table.DoesNotExist:
|
{"golden_diff": "diff --git a/mathesar/api/pagination.py b/mathesar/api/pagination.py\n--- a/mathesar/api/pagination.py\n+++ b/mathesar/api/pagination.py\n@@ -3,6 +3,8 @@\n from rest_framework.pagination import LimitOffsetPagination\n from rest_framework.response import Response\n \n+from mathesar.api.utils import get_table_or_404\n+\n \n class DefaultLimitOffsetPagination(LimitOffsetPagination):\n default_limit = 50\n@@ -22,7 +24,7 @@\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n- table = queryset.get(id=table_id)\n+ table = get_table_or_404(pk=table_id)\n self.count = len(table.sa_columns)\n self.request = request\n return list(table.sa_columns)[self.offset:self.offset + self.limit]\n@@ -37,7 +39,7 @@\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n # TODO: Cache count value somewhere, since calculating it is expensive.\n- table = queryset.get(id=table_id)\n+ table = get_table_or_404(pk=table_id)\n self.count = table.sa_num_records(filters=filters)\n self.request = request\n \n@@ -60,7 +62,7 @@\n queryset, request, table_id, filters=filters, order_by=order_by\n )\n \n- table = queryset.get(id=table_id)\n+ table = get_table_or_404(pk=table_id)\n if group_count_by:\n group_count = table.get_group_counts(\n group_count_by, self.limit, self.offset,\ndiff --git a/mathesar/api/utils.py b/mathesar/api/utils.py\n--- a/mathesar/api/utils.py\n+++ b/mathesar/api/utils.py\n@@ -4,6 +4,13 @@\n \n \n def get_table_or_404(pk):\n+ \"\"\"\n+ Get table if it exists, otherwise throws a DRF NotFound error.\n+ Args:\n+ pk: id of table\n+ Returns:\n+ table: return the table based on a specific id\n+ \"\"\"\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n", "issue": "Uncaught 404s in record viewsets and pagination classes\n## Description\r\nThe record viewset, column viewset and paignation classes regularly call `Table.objects.get(id=table_pk)`, which throws a `mathesar.models.Table.DoesNotExist: Table matching query does not exist.` when an invalid table id is passed.\r\n\r\nTo recreate, run `client.get(f'/api/v0/tables/3000/records/')`.\r\n\r\n\r\n## Expected behavior\r\nWe should ensure that the table exists before querying, or catch the `DoesNotExist` error after querying. We should also include tests for table 404s. \r\n\r\nThis is probably best done after #488 is merged, as it includes a function to do exactly this.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.response import Response\n\n\nclass DefaultLimitOffsetPagination(LimitOffsetPagination):\n default_limit = 50\n max_limit = 500\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('results', data)\n ]))\n\n\nclass ColumnLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n table = queryset.get(id=table_id)\n self.count = len(table.sa_columns)\n self.request = request\n return list(table.sa_columns)[self.offset:self.offset + self.limit]\n\n\nclass TableLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[]):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n # TODO: Cache count value somewhere, since calculating it is expensive.\n table = queryset.get(id=table_id)\n self.count = table.sa_num_records(filters=filters)\n self.request = request\n\n return table.get_records(\n self.limit, self.offset, filters=filters, order_by=order_by,\n )\n\n\nclass TableLimitOffsetGroupPagination(TableLimitOffsetPagination):\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('group_count', self.group_count),\n ('results', data)\n ]))\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[], group_count_by=[]):\n records = super().paginate_queryset(\n queryset, request, table_id, filters=filters, order_by=order_by\n )\n\n table = queryset.get(id=table_id)\n if group_count_by:\n group_count = table.get_group_counts(\n group_count_by, self.limit, self.offset,\n filters=filters, order_by=order_by\n )\n # Convert the tuple keys into strings so it can be converted to JSON\n group_count = [{\"values\": list(cols), \"count\": count}\n for cols, count in group_count.items()]\n self.group_count = {\n 'group_count_by': group_count_by,\n 'results': group_count,\n }\n else:\n self.group_count = {\n 'group_count_by': None,\n 'results': None,\n }\n\n return records\n", "path": "mathesar/api/pagination.py"}, {"content": "from rest_framework.exceptions import NotFound\n\nfrom mathesar.models import Table\n\n\ndef get_table_or_404(pk):\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n raise NotFound\n return table\n", "path": "mathesar/api/utils.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.response import Response\n\nfrom mathesar.api.utils import get_table_or_404\n\n\nclass DefaultLimitOffsetPagination(LimitOffsetPagination):\n default_limit = 50\n max_limit = 500\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('results', data)\n ]))\n\n\nclass ColumnLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n table = get_table_or_404(pk=table_id)\n self.count = len(table.sa_columns)\n self.request = request\n return list(table.sa_columns)[self.offset:self.offset + self.limit]\n\n\nclass TableLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[]):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n # TODO: Cache count value somewhere, since calculating it is expensive.\n table = get_table_or_404(pk=table_id)\n self.count = table.sa_num_records(filters=filters)\n self.request = request\n\n return table.get_records(\n self.limit, self.offset, filters=filters, order_by=order_by,\n )\n\n\nclass TableLimitOffsetGroupPagination(TableLimitOffsetPagination):\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('group_count', self.group_count),\n ('results', data)\n ]))\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[], group_count_by=[]):\n records = super().paginate_queryset(\n queryset, request, table_id, filters=filters, order_by=order_by\n )\n\n table = get_table_or_404(pk=table_id)\n if group_count_by:\n group_count = table.get_group_counts(\n group_count_by, self.limit, self.offset,\n filters=filters, order_by=order_by\n )\n # Convert the tuple keys into strings so it can be converted to JSON\n group_count = [{\"values\": list(cols), \"count\": count}\n for cols, count in group_count.items()]\n self.group_count = {\n 'group_count_by': group_count_by,\n 'results': group_count,\n }\n else:\n self.group_count = {\n 'group_count_by': None,\n 'results': None,\n }\n\n return records\n", "path": "mathesar/api/pagination.py"}, {"content": "from rest_framework.exceptions import NotFound\n\nfrom mathesar.models import Table\n\n\ndef get_table_or_404(pk):\n \"\"\"\n Get table if it exists, otherwise throws a DRF NotFound error.\n Args:\n pk: id of table\n Returns:\n table: return the table based on a specific id\n \"\"\"\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n raise NotFound\n return table\n", "path": "mathesar/api/utils.py"}]}
| 1,228 | 487 |
gh_patches_debug_31929
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-1422
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check if datetime fields exists in Event API payload before using them
Currently when string datetime's are converted to python datetime in Event API, then the keys are not being checked for existence. So if an "optional" field is missing from payload, it results in `KeyError`.
Fixing this issue should also fix https://github.com/loklak/loklak_server/issues/642
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `open_event/api/events.py`
Content:
```
1 from flask.ext.restplus import Resource, Namespace, reqparse
2 from flask import g
3
4 from open_event.models.event import Event as EventModel
5 from open_event.models.social_link import SocialLink as SocialLinkModel
6 from open_event.models.users_events_roles import UsersEventsRoles
7 from open_event.models.role import Role
8 from open_event.models.user import ORGANIZER
9 from open_event.helpers.data import save_to_db, update_version, record_activity
10
11 from .helpers.helpers import get_paginated_list, requires_auth, parse_args
12 from .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \
13 PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO
14 from .helpers import custom_fields as fields
15 from helpers.special_fields import EventTypeField, EventTopicField, \
16 EventPrivacyField, EventSubTopicField
17
18 api = Namespace('events', description='Events')
19
20 EVENT_CREATOR = api.model('EventCreator', {
21 'id': fields.Integer(),
22 'email': fields.Email()
23 })
24
25 SOCIAL_LINK = api.model('SocialLink', {
26 'id': fields.Integer(),
27 'name': fields.String(),
28 'link': fields.String()
29 })
30
31 SOCIAL_LINK_POST = api.clone('SocialLinkPost', SOCIAL_LINK)
32 del SOCIAL_LINK_POST['id']
33
34 EVENT = api.model('Event', {
35 'id': fields.Integer(required=True),
36 'name': fields.String(required=True),
37 'email': fields.Email(),
38 'logo': fields.Upload(),
39 'start_time': fields.DateTime(required=True),
40 'end_time': fields.DateTime(required=True),
41 'timezone': fields.String(),
42 'latitude': fields.Float(),
43 'longitude': fields.Float(),
44 'event_url': fields.Uri(),
45 'background_url': fields.Upload(),
46 'description': fields.String(),
47 'location_name': fields.String(),
48 'organizer_name': fields.String(),
49 'organizer_description': fields.String(),
50 'state': fields.String(),
51 'closing_datetime': fields.DateTime(),
52 'type': EventTypeField(),
53 'topic': EventTopicField(),
54 'sub_topic': EventSubTopicField(),
55 'privacy': EventPrivacyField(),
56 'ticket_url': fields.Uri(),
57 'creator': fields.Nested(EVENT_CREATOR, allow_null=True),
58 'schedule_published_on': fields.DateTime(),
59 'code_of_conduct': fields.String(),
60 'social_links': fields.List(fields.Nested(SOCIAL_LINK), attribute='social_link')
61 })
62
63 EVENT_PAGINATED = api.clone('EventPaginated', PAGINATED_MODEL, {
64 'results': fields.List(fields.Nested(EVENT))
65 })
66
67 EVENT_POST = api.clone('EventPost', EVENT)
68 del EVENT_POST['id']
69 del EVENT_POST['creator']
70 del EVENT_POST['social_links']
71
72
73
74 # ###################
75 # Data Access Objects
76 # ###################
77
78
79 class SocialLinkDAO(ServiceDAO):
80 """
81 Social Link DAO
82 """
83 pass
84
85
86 class EventDAO(BaseDAO):
87 """
88 Event DAO
89 """
90
91 def fix_payload(self, data):
92 """
93 Fixes the payload data.
94 Here converts string time from datetime obj
95 """
96 data['start_time'] = EVENT_POST['start_time'].from_str(data['start_time'])
97 data['end_time'] = EVENT_POST['end_time'].from_str(data['end_time'])
98 data['closing_datetime'] = EVENT_POST['closing_datetime'].from_str(
99 data['closing_datetime'])
100 data['schedule_published_on'] = EVENT_POST['schedule_published_on'].from_str(
101 data['schedule_published_on'])
102 return data
103
104 def create(self, data, url):
105 data = self.validate(data)
106 payload = self.fix_payload(data)
107 new_event = self.model(**payload)
108 new_event.creator = g.user
109 save_to_db(new_event, "Event saved")
110 # set organizer
111 role = Role.query.filter_by(name=ORGANIZER).first()
112 uer = UsersEventsRoles(g.user, new_event, role)
113 save_to_db(uer, 'UER saved')
114 update_version(
115 event_id=new_event.id,
116 is_created=True,
117 column_to_increment="event_ver"
118 )
119 # Return created resource with a 201 status code and its Location
120 # (url) in the header.
121 resource_location = url + '/' + str(new_event.id)
122 return self.get(new_event.id), 201, {'Location': resource_location}
123
124 def update(self, event_id, data):
125 data = self.validate(data)
126 payload = self.fix_payload(data)
127 return BaseDAO.update(self, event_id, payload, validate=False)
128
129
130 LinkDAO = SocialLinkDAO(SocialLinkModel, SOCIAL_LINK_POST)
131 DAO = EventDAO(EventModel, EVENT_POST)
132
133 # DEFINE PARAMS
134
135 EVENT_PARAMS = {
136 'location': {
137 'type': str
138 },
139 'contains': {
140 'description': 'Contains the string in name and description',
141 'type': str
142 },
143 'state': {
144 'type': str
145 },
146 'privacy': {
147 'type': str
148 },
149 'type': {
150 'type': str
151 },
152 'topic': {
153 'type': str
154 },
155 'sub_topic': {
156 'type': str
157 },
158 'start_time_gt': {},
159 'start_time_lt': {},
160 'end_time_gt': {},
161 'end_time_lt': {},
162 'time_period': {
163 'type': str
164 }
165 }
166
167
168 # DEFINE RESOURCES
169
170
171 class EventResource():
172 """
173 Event Resource Base class
174 """
175 event_parser = reqparse.RequestParser()
176 event_parser.add_argument('location', type=str, dest='__event_location')
177 event_parser.add_argument('contains', type=str, dest='__event_contains')
178 event_parser.add_argument('state', type=str)
179 event_parser.add_argument('privacy', type=str)
180 event_parser.add_argument('type', type=str)
181 event_parser.add_argument('topic', type=str)
182 event_parser.add_argument('sub_topic', type=str)
183 event_parser.add_argument('start_time_gt', dest='__event_start_time_gt')
184 event_parser.add_argument('start_time_lt', dest='__event_start_time_lt')
185 event_parser.add_argument('end_time_gt', dest='__event_end_time_gt')
186 event_parser.add_argument('end_time_lt', dest='__event_end_time_lt')
187 event_parser.add_argument('time_period', type=str, dest='__event_time_period')
188
189
190 @api.route('/<int:event_id>')
191 @api.param('event_id')
192 @api.response(404, 'Event not found')
193 class Event(Resource):
194 @api.doc('get_event')
195 @api.marshal_with(EVENT)
196 def get(self, event_id):
197 """Fetch an event given its id"""
198 return DAO.get(event_id)
199
200 @requires_auth
201 @api.doc('delete_event')
202 @api.marshal_with(EVENT)
203 def delete(self, event_id):
204 """Delete an event given its id"""
205 event = DAO.delete(event_id)
206 record_activity('delete_event', event_id=event_id)
207 return event
208
209 @requires_auth
210 @api.doc('update_event', responses=PUT_RESPONSES)
211 @api.marshal_with(EVENT)
212 @api.expect(EVENT_POST)
213 def put(self, event_id):
214 """Update an event given its id"""
215 event = DAO.update(event_id, self.api.payload)
216 record_activity('update_event', event_id=event_id)
217 return event
218
219
220 @api.route('')
221 class EventList(Resource, EventResource):
222 @api.doc('list_events', params=EVENT_PARAMS)
223 @api.marshal_list_with(EVENT)
224 def get(self):
225 """List all events"""
226 return DAO.list(**parse_args(self.event_parser))
227
228 @requires_auth
229 @api.doc('create_event', responses=POST_RESPONSES)
230 @api.marshal_with(EVENT)
231 @api.expect(EVENT_POST)
232 def post(self):
233 """Create an event"""
234 item = DAO.create(self.api.payload, self.api.url_for(self))
235 record_activity('create_event', event_id=item[0].id)
236 return item
237
238
239 @api.route('/page')
240 class EventListPaginated(Resource, PaginatedResourceBase, EventResource):
241 @api.doc('list_events_paginated', params=PAGE_PARAMS)
242 @api.doc(params=EVENT_PARAMS)
243 @api.marshal_with(EVENT_PAGINATED)
244 def get(self):
245 """List events in a paginated manner"""
246 args = self.parser.parse_args()
247 return get_paginated_list(
248 EventModel, args=args,
249 **parse_args(self.event_parser)
250 )
251
252
253 @api.route('/<int:event_id>/links')
254 @api.param('event_id')
255 class SocialLinkList(Resource):
256 @api.doc('list_social_links')
257 @api.marshal_list_with(SOCIAL_LINK)
258 def get(self, event_id):
259 """List all social links"""
260 return LinkDAO.list(event_id)
261
262 @requires_auth
263 @api.doc('create_social_link', responses=POST_RESPONSES)
264 @api.marshal_with(SOCIAL_LINK_POST)
265 @api.expect(SOCIAL_LINK_POST)
266 def post(self, event_id):
267 """Create a social link"""
268 return LinkDAO.create(
269 event_id,
270 self.api.payload,
271 self.api.url_for(self, event_id=event_id)
272 )
273
274
275 @api.route('/<int:event_id>/links/<int:link_id>')
276 class SocialLink(Resource):
277 @requires_auth
278 @api.doc('delete_social_link')
279 @api.marshal_with(SOCIAL_LINK)
280 def delete(self, event_id, link_id):
281 """Delete a social link given its id"""
282 return LinkDAO.delete(event_id, link_id)
283
284 @requires_auth
285 @api.doc('update_social_link', responses=PUT_RESPONSES)
286 @api.marshal_with(SOCIAL_LINK_POST)
287 @api.expect(SOCIAL_LINK_POST)
288 def put(self, event_id, link_id):
289 """Update a social link given its id"""
290 return LinkDAO.update(event_id, link_id, self.api.payload)
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/open_event/api/events.py b/open_event/api/events.py
--- a/open_event/api/events.py
+++ b/open_event/api/events.py
@@ -70,7 +70,6 @@
del EVENT_POST['social_links']
-
# ###################
# Data Access Objects
# ###################
@@ -93,12 +92,12 @@
Fixes the payload data.
Here converts string time from datetime obj
"""
- data['start_time'] = EVENT_POST['start_time'].from_str(data['start_time'])
- data['end_time'] = EVENT_POST['end_time'].from_str(data['end_time'])
- data['closing_datetime'] = EVENT_POST['closing_datetime'].from_str(
- data['closing_datetime'])
- data['schedule_published_on'] = EVENT_POST['schedule_published_on'].from_str(
- data['schedule_published_on'])
+ datetime_fields = [
+ 'start_time', 'end_time', 'closing_datetime',
+ 'schedule_published_on'
+ ]
+ for i in datetime_fields:
+ data[i] = EVENT_POST[i].from_str(data.get(i))
return data
def create(self, data, url):
@@ -133,35 +132,20 @@
# DEFINE PARAMS
EVENT_PARAMS = {
- 'location': {
- 'type': str
- },
+ 'location': {},
'contains': {
- 'description': 'Contains the string in name and description',
- 'type': str
- },
- 'state': {
- 'type': str
- },
- 'privacy': {
- 'type': str
- },
- 'type': {
- 'type': str
- },
- 'topic': {
- 'type': str
- },
- 'sub_topic': {
- 'type': str
+ 'description': 'Contains the string in name and description'
},
+ 'state': {},
+ 'privacy': {},
+ 'type': {},
+ 'topic': {},
+ 'sub_topic': {},
'start_time_gt': {},
'start_time_lt': {},
'end_time_gt': {},
'end_time_lt': {},
- 'time_period': {
- 'type': str
- }
+ 'time_period': {}
}
|
{"golden_diff": "diff --git a/open_event/api/events.py b/open_event/api/events.py\n--- a/open_event/api/events.py\n+++ b/open_event/api/events.py\n@@ -70,7 +70,6 @@\n del EVENT_POST['social_links']\n \n \n-\n # ###################\n # Data Access Objects\n # ###################\n@@ -93,12 +92,12 @@\n Fixes the payload data.\n Here converts string time from datetime obj\n \"\"\"\n- data['start_time'] = EVENT_POST['start_time'].from_str(data['start_time'])\n- data['end_time'] = EVENT_POST['end_time'].from_str(data['end_time'])\n- data['closing_datetime'] = EVENT_POST['closing_datetime'].from_str(\n- data['closing_datetime'])\n- data['schedule_published_on'] = EVENT_POST['schedule_published_on'].from_str(\n- data['schedule_published_on'])\n+ datetime_fields = [\n+ 'start_time', 'end_time', 'closing_datetime',\n+ 'schedule_published_on'\n+ ]\n+ for i in datetime_fields:\n+ data[i] = EVENT_POST[i].from_str(data.get(i))\n return data\n \n def create(self, data, url):\n@@ -133,35 +132,20 @@\n # DEFINE PARAMS\n \n EVENT_PARAMS = {\n- 'location': {\n- 'type': str\n- },\n+ 'location': {},\n 'contains': {\n- 'description': 'Contains the string in name and description',\n- 'type': str\n- },\n- 'state': {\n- 'type': str\n- },\n- 'privacy': {\n- 'type': str\n- },\n- 'type': {\n- 'type': str\n- },\n- 'topic': {\n- 'type': str\n- },\n- 'sub_topic': {\n- 'type': str\n+ 'description': 'Contains the string in name and description'\n },\n+ 'state': {},\n+ 'privacy': {},\n+ 'type': {},\n+ 'topic': {},\n+ 'sub_topic': {},\n 'start_time_gt': {},\n 'start_time_lt': {},\n 'end_time_gt': {},\n 'end_time_lt': {},\n- 'time_period': {\n- 'type': str\n- }\n+ 'time_period': {}\n }\n", "issue": "Check if datetime fields exists in Event API payload before using them\nCurrently when string datetime's are converted to python datetime in Event API, then the keys are not being checked for existence. So if an \"optional\" field is missing from payload, it results in `KeyError`. \n\nFixing this issue should also fix https://github.com/loklak/loklak_server/issues/642\n\n", "before_files": [{"content": "from flask.ext.restplus import Resource, Namespace, reqparse\nfrom flask import g\n\nfrom open_event.models.event import Event as EventModel\nfrom open_event.models.social_link import SocialLink as SocialLinkModel\nfrom open_event.models.users_events_roles import UsersEventsRoles\nfrom open_event.models.role import Role\nfrom open_event.models.user import ORGANIZER\nfrom open_event.helpers.data import save_to_db, update_version, record_activity\n\nfrom .helpers.helpers import get_paginated_list, requires_auth, parse_args\nfrom .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \\\n PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO\nfrom .helpers import custom_fields as fields\nfrom helpers.special_fields import EventTypeField, EventTopicField, \\\n EventPrivacyField, EventSubTopicField\n\napi = Namespace('events', description='Events')\n\nEVENT_CREATOR = api.model('EventCreator', {\n 'id': fields.Integer(),\n 'email': fields.Email()\n})\n\nSOCIAL_LINK = api.model('SocialLink', {\n 'id': fields.Integer(),\n 'name': fields.String(),\n 'link': fields.String()\n})\n\nSOCIAL_LINK_POST = api.clone('SocialLinkPost', SOCIAL_LINK)\ndel SOCIAL_LINK_POST['id']\n\nEVENT = api.model('Event', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(required=True),\n 'email': fields.Email(),\n 'logo': fields.Upload(),\n 'start_time': fields.DateTime(required=True),\n 'end_time': fields.DateTime(required=True),\n 'timezone': fields.String(),\n 'latitude': fields.Float(),\n 'longitude': fields.Float(),\n 'event_url': fields.Uri(),\n 'background_url': fields.Upload(),\n 'description': fields.String(),\n 'location_name': fields.String(),\n 'organizer_name': fields.String(),\n 'organizer_description': fields.String(),\n 'state': fields.String(),\n 'closing_datetime': fields.DateTime(),\n 'type': EventTypeField(),\n 'topic': EventTopicField(),\n 'sub_topic': EventSubTopicField(),\n 'privacy': EventPrivacyField(),\n 'ticket_url': fields.Uri(),\n 'creator': fields.Nested(EVENT_CREATOR, allow_null=True),\n 'schedule_published_on': fields.DateTime(),\n 'code_of_conduct': fields.String(),\n 'social_links': fields.List(fields.Nested(SOCIAL_LINK), attribute='social_link')\n})\n\nEVENT_PAGINATED = api.clone('EventPaginated', PAGINATED_MODEL, {\n 'results': fields.List(fields.Nested(EVENT))\n})\n\nEVENT_POST = api.clone('EventPost', EVENT)\ndel EVENT_POST['id']\ndel EVENT_POST['creator']\ndel EVENT_POST['social_links']\n\n\n\n# ###################\n# Data Access Objects\n# ###################\n\n\nclass SocialLinkDAO(ServiceDAO):\n \"\"\"\n Social Link DAO\n \"\"\"\n pass\n\n\nclass EventDAO(BaseDAO):\n \"\"\"\n Event DAO\n \"\"\"\n\n def fix_payload(self, data):\n \"\"\"\n Fixes the payload data.\n Here converts string time from datetime obj\n \"\"\"\n data['start_time'] = EVENT_POST['start_time'].from_str(data['start_time'])\n data['end_time'] = EVENT_POST['end_time'].from_str(data['end_time'])\n data['closing_datetime'] = EVENT_POST['closing_datetime'].from_str(\n data['closing_datetime'])\n data['schedule_published_on'] = EVENT_POST['schedule_published_on'].from_str(\n data['schedule_published_on'])\n return data\n\n def create(self, data, url):\n data = self.validate(data)\n payload = self.fix_payload(data)\n new_event = self.model(**payload)\n new_event.creator = g.user\n save_to_db(new_event, \"Event saved\")\n # set organizer\n role = Role.query.filter_by(name=ORGANIZER).first()\n uer = UsersEventsRoles(g.user, new_event, role)\n save_to_db(uer, 'UER saved')\n update_version(\n event_id=new_event.id,\n is_created=True,\n column_to_increment=\"event_ver\"\n )\n # Return created resource with a 201 status code and its Location\n # (url) in the header.\n resource_location = url + '/' + str(new_event.id)\n return self.get(new_event.id), 201, {'Location': resource_location}\n\n def update(self, event_id, data):\n data = self.validate(data)\n payload = self.fix_payload(data)\n return BaseDAO.update(self, event_id, payload, validate=False)\n\n\nLinkDAO = SocialLinkDAO(SocialLinkModel, SOCIAL_LINK_POST)\nDAO = EventDAO(EventModel, EVENT_POST)\n\n# DEFINE PARAMS\n\nEVENT_PARAMS = {\n 'location': {\n 'type': str\n },\n 'contains': {\n 'description': 'Contains the string in name and description',\n 'type': str\n },\n 'state': {\n 'type': str\n },\n 'privacy': {\n 'type': str\n },\n 'type': {\n 'type': str\n },\n 'topic': {\n 'type': str\n },\n 'sub_topic': {\n 'type': str\n },\n 'start_time_gt': {},\n 'start_time_lt': {},\n 'end_time_gt': {},\n 'end_time_lt': {},\n 'time_period': {\n 'type': str\n }\n}\n\n\n# DEFINE RESOURCES\n\n\nclass EventResource():\n \"\"\"\n Event Resource Base class\n \"\"\"\n event_parser = reqparse.RequestParser()\n event_parser.add_argument('location', type=str, dest='__event_location')\n event_parser.add_argument('contains', type=str, dest='__event_contains')\n event_parser.add_argument('state', type=str)\n event_parser.add_argument('privacy', type=str)\n event_parser.add_argument('type', type=str)\n event_parser.add_argument('topic', type=str)\n event_parser.add_argument('sub_topic', type=str)\n event_parser.add_argument('start_time_gt', dest='__event_start_time_gt')\n event_parser.add_argument('start_time_lt', dest='__event_start_time_lt')\n event_parser.add_argument('end_time_gt', dest='__event_end_time_gt')\n event_parser.add_argument('end_time_lt', dest='__event_end_time_lt')\n event_parser.add_argument('time_period', type=str, dest='__event_time_period')\n\n\[email protected]('/<int:event_id>')\[email protected]('event_id')\[email protected](404, 'Event not found')\nclass Event(Resource):\n @api.doc('get_event')\n @api.marshal_with(EVENT)\n def get(self, event_id):\n \"\"\"Fetch an event given its id\"\"\"\n return DAO.get(event_id)\n\n @requires_auth\n @api.doc('delete_event')\n @api.marshal_with(EVENT)\n def delete(self, event_id):\n \"\"\"Delete an event given its id\"\"\"\n event = DAO.delete(event_id)\n record_activity('delete_event', event_id=event_id)\n return event\n\n @requires_auth\n @api.doc('update_event', responses=PUT_RESPONSES)\n @api.marshal_with(EVENT)\n @api.expect(EVENT_POST)\n def put(self, event_id):\n \"\"\"Update an event given its id\"\"\"\n event = DAO.update(event_id, self.api.payload)\n record_activity('update_event', event_id=event_id)\n return event\n\n\[email protected]('')\nclass EventList(Resource, EventResource):\n @api.doc('list_events', params=EVENT_PARAMS)\n @api.marshal_list_with(EVENT)\n def get(self):\n \"\"\"List all events\"\"\"\n return DAO.list(**parse_args(self.event_parser))\n\n @requires_auth\n @api.doc('create_event', responses=POST_RESPONSES)\n @api.marshal_with(EVENT)\n @api.expect(EVENT_POST)\n def post(self):\n \"\"\"Create an event\"\"\"\n item = DAO.create(self.api.payload, self.api.url_for(self))\n record_activity('create_event', event_id=item[0].id)\n return item\n\n\[email protected]('/page')\nclass EventListPaginated(Resource, PaginatedResourceBase, EventResource):\n @api.doc('list_events_paginated', params=PAGE_PARAMS)\n @api.doc(params=EVENT_PARAMS)\n @api.marshal_with(EVENT_PAGINATED)\n def get(self):\n \"\"\"List events in a paginated manner\"\"\"\n args = self.parser.parse_args()\n return get_paginated_list(\n EventModel, args=args,\n **parse_args(self.event_parser)\n )\n\n\[email protected]('/<int:event_id>/links')\[email protected]('event_id')\nclass SocialLinkList(Resource):\n @api.doc('list_social_links')\n @api.marshal_list_with(SOCIAL_LINK)\n def get(self, event_id):\n \"\"\"List all social links\"\"\"\n return LinkDAO.list(event_id)\n\n @requires_auth\n @api.doc('create_social_link', responses=POST_RESPONSES)\n @api.marshal_with(SOCIAL_LINK_POST)\n @api.expect(SOCIAL_LINK_POST)\n def post(self, event_id):\n \"\"\"Create a social link\"\"\"\n return LinkDAO.create(\n event_id,\n self.api.payload,\n self.api.url_for(self, event_id=event_id)\n )\n\n\[email protected]('/<int:event_id>/links/<int:link_id>')\nclass SocialLink(Resource):\n @requires_auth\n @api.doc('delete_social_link')\n @api.marshal_with(SOCIAL_LINK)\n def delete(self, event_id, link_id):\n \"\"\"Delete a social link given its id\"\"\"\n return LinkDAO.delete(event_id, link_id)\n\n @requires_auth\n @api.doc('update_social_link', responses=PUT_RESPONSES)\n @api.marshal_with(SOCIAL_LINK_POST)\n @api.expect(SOCIAL_LINK_POST)\n def put(self, event_id, link_id):\n \"\"\"Update a social link given its id\"\"\"\n return LinkDAO.update(event_id, link_id, self.api.payload)\n", "path": "open_event/api/events.py"}], "after_files": [{"content": "from flask.ext.restplus import Resource, Namespace, reqparse\nfrom flask import g\n\nfrom open_event.models.event import Event as EventModel\nfrom open_event.models.social_link import SocialLink as SocialLinkModel\nfrom open_event.models.users_events_roles import UsersEventsRoles\nfrom open_event.models.role import Role\nfrom open_event.models.user import ORGANIZER\nfrom open_event.helpers.data import save_to_db, update_version, record_activity\n\nfrom .helpers.helpers import get_paginated_list, requires_auth, parse_args\nfrom .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, \\\n PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES, BaseDAO, ServiceDAO\nfrom .helpers import custom_fields as fields\nfrom helpers.special_fields import EventTypeField, EventTopicField, \\\n EventPrivacyField, EventSubTopicField\n\napi = Namespace('events', description='Events')\n\nEVENT_CREATOR = api.model('EventCreator', {\n 'id': fields.Integer(),\n 'email': fields.Email()\n})\n\nSOCIAL_LINK = api.model('SocialLink', {\n 'id': fields.Integer(),\n 'name': fields.String(),\n 'link': fields.String()\n})\n\nSOCIAL_LINK_POST = api.clone('SocialLinkPost', SOCIAL_LINK)\ndel SOCIAL_LINK_POST['id']\n\nEVENT = api.model('Event', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(required=True),\n 'email': fields.Email(),\n 'logo': fields.Upload(),\n 'start_time': fields.DateTime(required=True),\n 'end_time': fields.DateTime(required=True),\n 'timezone': fields.String(),\n 'latitude': fields.Float(),\n 'longitude': fields.Float(),\n 'event_url': fields.Uri(),\n 'background_url': fields.Upload(),\n 'description': fields.String(),\n 'location_name': fields.String(),\n 'organizer_name': fields.String(),\n 'organizer_description': fields.String(),\n 'state': fields.String(),\n 'closing_datetime': fields.DateTime(),\n 'type': EventTypeField(),\n 'topic': EventTopicField(),\n 'sub_topic': EventSubTopicField(),\n 'privacy': EventPrivacyField(),\n 'ticket_url': fields.Uri(),\n 'creator': fields.Nested(EVENT_CREATOR, allow_null=True),\n 'schedule_published_on': fields.DateTime(),\n 'code_of_conduct': fields.String(),\n 'social_links': fields.List(fields.Nested(SOCIAL_LINK), attribute='social_link')\n})\n\nEVENT_PAGINATED = api.clone('EventPaginated', PAGINATED_MODEL, {\n 'results': fields.List(fields.Nested(EVENT))\n})\n\nEVENT_POST = api.clone('EventPost', EVENT)\ndel EVENT_POST['id']\ndel EVENT_POST['creator']\ndel EVENT_POST['social_links']\n\n\n# ###################\n# Data Access Objects\n# ###################\n\n\nclass SocialLinkDAO(ServiceDAO):\n \"\"\"\n Social Link DAO\n \"\"\"\n pass\n\n\nclass EventDAO(BaseDAO):\n \"\"\"\n Event DAO\n \"\"\"\n\n def fix_payload(self, data):\n \"\"\"\n Fixes the payload data.\n Here converts string time from datetime obj\n \"\"\"\n datetime_fields = [\n 'start_time', 'end_time', 'closing_datetime',\n 'schedule_published_on'\n ]\n for i in datetime_fields:\n data[i] = EVENT_POST[i].from_str(data.get(i))\n return data\n\n def create(self, data, url):\n data = self.validate(data)\n payload = self.fix_payload(data)\n new_event = self.model(**payload)\n new_event.creator = g.user\n save_to_db(new_event, \"Event saved\")\n # set organizer\n role = Role.query.filter_by(name=ORGANIZER).first()\n uer = UsersEventsRoles(g.user, new_event, role)\n save_to_db(uer, 'UER saved')\n update_version(\n event_id=new_event.id,\n is_created=True,\n column_to_increment=\"event_ver\"\n )\n # Return created resource with a 201 status code and its Location\n # (url) in the header.\n resource_location = url + '/' + str(new_event.id)\n return self.get(new_event.id), 201, {'Location': resource_location}\n\n def update(self, event_id, data):\n data = self.validate(data)\n payload = self.fix_payload(data)\n return BaseDAO.update(self, event_id, payload, validate=False)\n\n\nLinkDAO = SocialLinkDAO(SocialLinkModel, SOCIAL_LINK_POST)\nDAO = EventDAO(EventModel, EVENT_POST)\n\n# DEFINE PARAMS\n\nEVENT_PARAMS = {\n 'location': {},\n 'contains': {\n 'description': 'Contains the string in name and description'\n },\n 'state': {},\n 'privacy': {},\n 'type': {},\n 'topic': {},\n 'sub_topic': {},\n 'start_time_gt': {},\n 'start_time_lt': {},\n 'end_time_gt': {},\n 'end_time_lt': {},\n 'time_period': {}\n}\n\n\n# DEFINE RESOURCES\n\n\nclass EventResource():\n \"\"\"\n Event Resource Base class\n \"\"\"\n event_parser = reqparse.RequestParser()\n event_parser.add_argument('location', type=str, dest='__event_location')\n event_parser.add_argument('contains', type=str, dest='__event_contains')\n event_parser.add_argument('state', type=str)\n event_parser.add_argument('privacy', type=str)\n event_parser.add_argument('type', type=str)\n event_parser.add_argument('topic', type=str)\n event_parser.add_argument('sub_topic', type=str)\n event_parser.add_argument('start_time_gt', dest='__event_start_time_gt')\n event_parser.add_argument('start_time_lt', dest='__event_start_time_lt')\n event_parser.add_argument('end_time_gt', dest='__event_end_time_gt')\n event_parser.add_argument('end_time_lt', dest='__event_end_time_lt')\n event_parser.add_argument('time_period', type=str, dest='__event_time_period')\n\n\[email protected]('/<int:event_id>')\[email protected]('event_id')\[email protected](404, 'Event not found')\nclass Event(Resource):\n @api.doc('get_event')\n @api.marshal_with(EVENT)\n def get(self, event_id):\n \"\"\"Fetch an event given its id\"\"\"\n return DAO.get(event_id)\n\n @requires_auth\n @api.doc('delete_event')\n @api.marshal_with(EVENT)\n def delete(self, event_id):\n \"\"\"Delete an event given its id\"\"\"\n event = DAO.delete(event_id)\n record_activity('delete_event', event_id=event_id)\n return event\n\n @requires_auth\n @api.doc('update_event', responses=PUT_RESPONSES)\n @api.marshal_with(EVENT)\n @api.expect(EVENT_POST)\n def put(self, event_id):\n \"\"\"Update an event given its id\"\"\"\n event = DAO.update(event_id, self.api.payload)\n record_activity('update_event', event_id=event_id)\n return event\n\n\[email protected]('')\nclass EventList(Resource, EventResource):\n @api.doc('list_events', params=EVENT_PARAMS)\n @api.marshal_list_with(EVENT)\n def get(self):\n \"\"\"List all events\"\"\"\n return DAO.list(**parse_args(self.event_parser))\n\n @requires_auth\n @api.doc('create_event', responses=POST_RESPONSES)\n @api.marshal_with(EVENT)\n @api.expect(EVENT_POST)\n def post(self):\n \"\"\"Create an event\"\"\"\n item = DAO.create(self.api.payload, self.api.url_for(self))\n record_activity('create_event', event_id=item[0].id)\n return item\n\n\[email protected]('/page')\nclass EventListPaginated(Resource, PaginatedResourceBase, EventResource):\n @api.doc('list_events_paginated', params=PAGE_PARAMS)\n @api.doc(params=EVENT_PARAMS)\n @api.marshal_with(EVENT_PAGINATED)\n def get(self):\n \"\"\"List events in a paginated manner\"\"\"\n args = self.parser.parse_args()\n return get_paginated_list(\n EventModel, args=args,\n **parse_args(self.event_parser)\n )\n\n\[email protected]('/<int:event_id>/links')\[email protected]('event_id')\nclass SocialLinkList(Resource):\n @api.doc('list_social_links')\n @api.marshal_list_with(SOCIAL_LINK)\n def get(self, event_id):\n \"\"\"List all social links\"\"\"\n return LinkDAO.list(event_id)\n\n @requires_auth\n @api.doc('create_social_link', responses=POST_RESPONSES)\n @api.marshal_with(SOCIAL_LINK_POST)\n @api.expect(SOCIAL_LINK_POST)\n def post(self, event_id):\n \"\"\"Create a social link\"\"\"\n return LinkDAO.create(\n event_id,\n self.api.payload,\n self.api.url_for(self, event_id=event_id)\n )\n\n\[email protected]('/<int:event_id>/links/<int:link_id>')\nclass SocialLink(Resource):\n @requires_auth\n @api.doc('delete_social_link')\n @api.marshal_with(SOCIAL_LINK)\n def delete(self, event_id, link_id):\n \"\"\"Delete a social link given its id\"\"\"\n return LinkDAO.delete(event_id, link_id)\n\n @requires_auth\n @api.doc('update_social_link', responses=PUT_RESPONSES)\n @api.marshal_with(SOCIAL_LINK_POST)\n @api.expect(SOCIAL_LINK_POST)\n def put(self, event_id, link_id):\n \"\"\"Update a social link given its id\"\"\"\n return LinkDAO.update(event_id, link_id, self.api.payload)\n", "path": "open_event/api/events.py"}]}
| 3,252 | 509 |
gh_patches_debug_3160
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-7560
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Displaying a widget using displayhook produces misaligned Out[N] prompt

This doesn't look right. @jdfreder, can you investigate?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/kernel/zmq/displayhook.py`
Content:
```
1 """Replacements for sys.displayhook that publish over ZMQ."""
2
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 import sys
7
8 from IPython.core.displayhook import DisplayHook
9 from IPython.kernel.inprocess.socket import SocketABC
10 from IPython.utils.jsonutil import encode_images
11 from IPython.utils.py3compat import builtin_mod
12 from IPython.utils.traitlets import Instance, Dict
13 from .session import extract_header, Session
14
15 class ZMQDisplayHook(object):
16 """A simple displayhook that publishes the object's repr over a ZeroMQ
17 socket."""
18 topic=b'execute_result'
19
20 def __init__(self, session, pub_socket):
21 self.session = session
22 self.pub_socket = pub_socket
23 self.parent_header = {}
24
25 def __call__(self, obj):
26 if obj is None:
27 return
28
29 builtin_mod._ = obj
30 sys.stdout.flush()
31 sys.stderr.flush()
32 msg = self.session.send(self.pub_socket, u'execute_result', {u'data':repr(obj)},
33 parent=self.parent_header, ident=self.topic)
34
35 def set_parent(self, parent):
36 self.parent_header = extract_header(parent)
37
38
39 class ZMQShellDisplayHook(DisplayHook):
40 """A displayhook subclass that publishes data using ZeroMQ. This is intended
41 to work with an InteractiveShell instance. It sends a dict of different
42 representations of the object."""
43 topic=None
44
45 session = Instance(Session)
46 pub_socket = Instance(SocketABC)
47 parent_header = Dict({})
48
49 def set_parent(self, parent):
50 """Set the parent for outbound messages."""
51 self.parent_header = extract_header(parent)
52
53 def start_displayhook(self):
54 self.msg = self.session.msg(u'execute_result', {
55 'data': {},
56 'metadata': {},
57 }, parent=self.parent_header)
58
59 def write_output_prompt(self):
60 """Write the output prompt."""
61 self.msg['content']['execution_count'] = self.prompt_count
62
63 def write_format_data(self, format_dict, md_dict=None):
64 self.msg['content']['data'] = encode_images(format_dict)
65 self.msg['content']['metadata'] = md_dict
66
67 def finish_displayhook(self):
68 """Finish up all displayhook activities."""
69 sys.stdout.flush()
70 sys.stderr.flush()
71 self.session.send(self.pub_socket, self.msg, ident=self.topic)
72 self.msg = None
73
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/IPython/kernel/zmq/displayhook.py b/IPython/kernel/zmq/displayhook.py
--- a/IPython/kernel/zmq/displayhook.py
+++ b/IPython/kernel/zmq/displayhook.py
@@ -68,6 +68,7 @@
"""Finish up all displayhook activities."""
sys.stdout.flush()
sys.stderr.flush()
- self.session.send(self.pub_socket, self.msg, ident=self.topic)
+ if self.msg['content']['data']:
+ self.session.send(self.pub_socket, self.msg, ident=self.topic)
self.msg = None
|
{"golden_diff": "diff --git a/IPython/kernel/zmq/displayhook.py b/IPython/kernel/zmq/displayhook.py\n--- a/IPython/kernel/zmq/displayhook.py\n+++ b/IPython/kernel/zmq/displayhook.py\n@@ -68,6 +68,7 @@\n \"\"\"Finish up all displayhook activities.\"\"\"\n sys.stdout.flush()\n sys.stderr.flush()\n- self.session.send(self.pub_socket, self.msg, ident=self.topic)\n+ if self.msg['content']['data']:\n+ self.session.send(self.pub_socket, self.msg, ident=self.topic)\n self.msg = None\n", "issue": "Displaying a widget using displayhook produces misaligned Out[N] prompt\n\n\nThis doesn't look right. @jdfreder, can you investigate?\n\n", "before_files": [{"content": "\"\"\"Replacements for sys.displayhook that publish over ZMQ.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport sys\n\nfrom IPython.core.displayhook import DisplayHook\nfrom IPython.kernel.inprocess.socket import SocketABC\nfrom IPython.utils.jsonutil import encode_images\nfrom IPython.utils.py3compat import builtin_mod\nfrom IPython.utils.traitlets import Instance, Dict\nfrom .session import extract_header, Session\n\nclass ZMQDisplayHook(object):\n \"\"\"A simple displayhook that publishes the object's repr over a ZeroMQ\n socket.\"\"\"\n topic=b'execute_result'\n\n def __init__(self, session, pub_socket):\n self.session = session\n self.pub_socket = pub_socket\n self.parent_header = {}\n\n def __call__(self, obj):\n if obj is None:\n return\n\n builtin_mod._ = obj\n sys.stdout.flush()\n sys.stderr.flush()\n msg = self.session.send(self.pub_socket, u'execute_result', {u'data':repr(obj)},\n parent=self.parent_header, ident=self.topic)\n\n def set_parent(self, parent):\n self.parent_header = extract_header(parent)\n\n\nclass ZMQShellDisplayHook(DisplayHook):\n \"\"\"A displayhook subclass that publishes data using ZeroMQ. This is intended\n to work with an InteractiveShell instance. It sends a dict of different\n representations of the object.\"\"\"\n topic=None\n\n session = Instance(Session)\n pub_socket = Instance(SocketABC)\n parent_header = Dict({})\n\n def set_parent(self, parent):\n \"\"\"Set the parent for outbound messages.\"\"\"\n self.parent_header = extract_header(parent)\n\n def start_displayhook(self):\n self.msg = self.session.msg(u'execute_result', {\n 'data': {},\n 'metadata': {},\n }, parent=self.parent_header)\n\n def write_output_prompt(self):\n \"\"\"Write the output prompt.\"\"\"\n self.msg['content']['execution_count'] = self.prompt_count\n\n def write_format_data(self, format_dict, md_dict=None):\n self.msg['content']['data'] = encode_images(format_dict)\n self.msg['content']['metadata'] = md_dict\n\n def finish_displayhook(self):\n \"\"\"Finish up all displayhook activities.\"\"\"\n sys.stdout.flush()\n sys.stderr.flush()\n self.session.send(self.pub_socket, self.msg, ident=self.topic)\n self.msg = None\n\n", "path": "IPython/kernel/zmq/displayhook.py"}], "after_files": [{"content": "\"\"\"Replacements for sys.displayhook that publish over ZMQ.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport sys\n\nfrom IPython.core.displayhook import DisplayHook\nfrom IPython.kernel.inprocess.socket import SocketABC\nfrom IPython.utils.jsonutil import encode_images\nfrom IPython.utils.py3compat import builtin_mod\nfrom IPython.utils.traitlets import Instance, Dict\nfrom .session import extract_header, Session\n\nclass ZMQDisplayHook(object):\n \"\"\"A simple displayhook that publishes the object's repr over a ZeroMQ\n socket.\"\"\"\n topic=b'execute_result'\n\n def __init__(self, session, pub_socket):\n self.session = session\n self.pub_socket = pub_socket\n self.parent_header = {}\n\n def __call__(self, obj):\n if obj is None:\n return\n\n builtin_mod._ = obj\n sys.stdout.flush()\n sys.stderr.flush()\n msg = self.session.send(self.pub_socket, u'execute_result', {u'data':repr(obj)},\n parent=self.parent_header, ident=self.topic)\n\n def set_parent(self, parent):\n self.parent_header = extract_header(parent)\n\n\nclass ZMQShellDisplayHook(DisplayHook):\n \"\"\"A displayhook subclass that publishes data using ZeroMQ. This is intended\n to work with an InteractiveShell instance. It sends a dict of different\n representations of the object.\"\"\"\n topic=None\n\n session = Instance(Session)\n pub_socket = Instance(SocketABC)\n parent_header = Dict({})\n\n def set_parent(self, parent):\n \"\"\"Set the parent for outbound messages.\"\"\"\n self.parent_header = extract_header(parent)\n\n def start_displayhook(self):\n self.msg = self.session.msg(u'execute_result', {\n 'data': {},\n 'metadata': {},\n }, parent=self.parent_header)\n\n def write_output_prompt(self):\n \"\"\"Write the output prompt.\"\"\"\n self.msg['content']['execution_count'] = self.prompt_count\n\n def write_format_data(self, format_dict, md_dict=None):\n self.msg['content']['data'] = encode_images(format_dict)\n self.msg['content']['metadata'] = md_dict\n\n def finish_displayhook(self):\n \"\"\"Finish up all displayhook activities.\"\"\"\n sys.stdout.flush()\n sys.stderr.flush()\n if self.msg['content']['data']:\n self.session.send(self.pub_socket, self.msg, ident=self.topic)\n self.msg = None\n\n", "path": "IPython/kernel/zmq/displayhook.py"}]}
| 1,033 | 123 |
gh_patches_debug_32522
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmcv-2149
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unexpected behaviour `by_epoch` DvcliveLoggerHook/AnyLogger
Thanks for reporting the unexpected results and we appreciate it a lot.
See also the DVClive issue: https://github.com/iterative/dvclive/issues/267
**Describe the Issue**
I was using the `dvclive` hook for mmcv and expected the `by_epoch` variable to mean something else then it is doing right now. I expected to get a result per epoch. However, it seems to have no (or not the expected) effect.
**Reproduction**
1. What command, code, or script did you run? I added the following to my config.
```bash
log_config = dict(
hooks=[
dict(
type="DvcliveLoggerHook",
path="{{ fileDirname }}/../live",
interval=1,
by_epoch=True,
),
],
)
```
2. Did you make any modifications on the code? Did you understand what you have modified?
I did not modify the code.
**Environment**
1. Please run `python -c "from mmcv.utils import collect_env; print(collect_env())"` to collect necessary environment information and paste it here.
<details><summary>Output</summary>
```none
{
'sys.platform': 'linux',
'Python': '3.8.8 (default, Feb 24 2021, 21: 46: 12) [GCC 7.3.0]',
'CUDA available': True,
'GPU 0, 1, 2, 3': 'NVIDIA TITAN RTX',
'CUDA_HOME': '/usr/local/cuda',
'NVCC': 'Cuda compilation tools,
release 11.2, V11.2.142',
'GCC': 'gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0',
'PyTorch': '1.9.0a0+df837d0',
'PyTorch compiling details': 'PyTorch built with:\n - GCC 9.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2019.0.4 Product Build 20190411 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v1.7.0 (Git Hash N/A)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.2\n - NVCC architecture flags: -gencode;arch=compute_52,
code=sm_52;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,
code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,
code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,
code=sm_86;-gencode;arch=compute_86,code=compute_86\n - CuDNN 8.1.1\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl,
BUILD_TYPE=Release,
CUDA_VERSION=11.2,
CUDNN_VERSION=8.1.1,
CXX_COMPILER=/usr/bin/c++,
CXX_FLAGS= -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow,
FORCE_FALLBACK_CUDA_MPI=1,
LAPACK_INFO=mkl,
PERF_WITH_AVX=1,
PERF_WITH_AVX2=1,
PERF_WITH_AVX512=1,
TORCH_VERSION=1.9.0,
USE_CUDA=ON,
USE_CUDNN=ON,
USE_EXCEPTION_PTR=1,
USE_GFLAGS=OFF,
USE_GLOG=OFF,
USE_MKL=ON,
USE_MKLDNN=ON,
USE_MPI=ON,
USE_NCCL=ON,
USE_NNPACK=ON,
USE_OPENMP=ON,
\n',
'TorchVision': '0.9.0a0',
'OpenCV': '3.4.11',
'MMCV': '1.5.0',
'MMCV Compiler': 'GCC 9.3',
'MMCV CUDA Compiler': '11.2'
}
```
</details>
2. You may add addition that may be helpful for locating the problem, such as
- How you installed PyTorch \[e.g., pip, conda, source\]
- Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
**Error traceback**
If applicable, paste the error traceback here.
n/a
**Bug fix**
If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
n/a
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmcv/runner/hooks/logger/dvclive.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 from pathlib import Path
3 from typing import Optional
4
5 from ...dist_utils import master_only
6 from ..hook import HOOKS
7 from .base import LoggerHook
8
9
10 @HOOKS.register_module()
11 class DvcliveLoggerHook(LoggerHook):
12 """Class to log metrics with dvclive.
13
14 It requires `dvclive`_ to be installed.
15
16 Args:
17 model_file (str): Default None. If not None, after each epoch the
18 model will be saved to {model_file}.
19 interval (int): Logging interval (every k iterations). Default 10.
20 ignore_last (bool): Ignore the log of last iterations in each epoch
21 if less than `interval`. Default: True.
22 reset_flag (bool): Whether to clear the output buffer after logging.
23 Default: False.
24 by_epoch (bool): Whether EpochBasedRunner is used. Default: True.
25 dvclive (Live, optional): An instance of the `Live`_ logger to use
26 instead of initializing a new one internally. Defaults to None.
27 kwargs: Arguments for instantiating `Live`_ (ignored if `dvclive` is
28 provided).
29
30 .. _dvclive:
31 https://dvc.org/doc/dvclive
32
33 .. _Live:
34 https://dvc.org/doc/dvclive/api-reference/live#parameters
35 """
36
37 def __init__(self,
38 model_file: Optional[str] = None,
39 interval: int = 10,
40 ignore_last: bool = True,
41 reset_flag: bool = False,
42 by_epoch: bool = True,
43 dvclive=None,
44 **kwargs):
45 super().__init__(interval, ignore_last, reset_flag, by_epoch)
46 self.model_file = model_file
47 self._import_dvclive(dvclive, **kwargs)
48
49 def _import_dvclive(self, dvclive=None, **kwargs) -> None:
50 try:
51 from dvclive import Live
52 except ImportError:
53 raise ImportError(
54 'Please run "pip install dvclive" to install dvclive')
55 self.dvclive = dvclive if dvclive is not None else Live(**kwargs)
56
57 @master_only
58 def log(self, runner) -> None:
59 tags = self.get_loggable_tags(runner)
60 if tags:
61 self.dvclive.set_step(self.get_iter(runner))
62 for k, v in tags.items():
63 self.dvclive.log(k, v)
64
65 @master_only
66 def after_train_epoch(self, runner) -> None:
67 super().after_train_epoch(runner)
68 if self.model_file is not None:
69 runner.save_checkpoint(
70 Path(self.model_file).parent,
71 filename_tmpl=Path(self.model_file).name,
72 create_symlink=False,
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmcv/runner/hooks/logger/dvclive.py b/mmcv/runner/hooks/logger/dvclive.py
--- a/mmcv/runner/hooks/logger/dvclive.py
+++ b/mmcv/runner/hooks/logger/dvclive.py
@@ -21,7 +21,10 @@
if less than `interval`. Default: True.
reset_flag (bool): Whether to clear the output buffer after logging.
Default: False.
- by_epoch (bool): Whether EpochBasedRunner is used. Default: True.
+ by_epoch (bool): Whether EpochBasedRunner is used.
+ Determines whether `log` is called `after_train_iter` or
+ `after_train_epoch`.
+ Default: True.
dvclive (Live, optional): An instance of the `Live`_ logger to use
instead of initializing a new one internally. Defaults to None.
kwargs: Arguments for instantiating `Live`_ (ignored if `dvclive` is
@@ -58,16 +61,23 @@
def log(self, runner) -> None:
tags = self.get_loggable_tags(runner)
if tags:
- self.dvclive.set_step(self.get_iter(runner))
+ step = self.get_epoch(runner) if self.by_epoch else self.get_iter(
+ runner)
+ self.dvclive.set_step(step)
+
for k, v in tags.items():
self.dvclive.log(k, v)
- @master_only
def after_train_epoch(self, runner) -> None:
- super().after_train_epoch(runner)
if self.model_file is not None:
runner.save_checkpoint(
Path(self.model_file).parent,
filename_tmpl=Path(self.model_file).name,
create_symlink=False,
)
+ if self.by_epoch:
+ super().after_train_epoch(runner)
+
+ def after_train_iter(self, runner) -> None:
+ if not self.by_epoch:
+ super().after_train_iter(runner)
|
{"golden_diff": "diff --git a/mmcv/runner/hooks/logger/dvclive.py b/mmcv/runner/hooks/logger/dvclive.py\n--- a/mmcv/runner/hooks/logger/dvclive.py\n+++ b/mmcv/runner/hooks/logger/dvclive.py\n@@ -21,7 +21,10 @@\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n- by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n+ by_epoch (bool): Whether EpochBasedRunner is used.\n+ Determines whether `log` is called `after_train_iter` or\n+ `after_train_epoch`.\n+ Default: True.\n dvclive (Live, optional): An instance of the `Live`_ logger to use\n instead of initializing a new one internally. Defaults to None.\n kwargs: Arguments for instantiating `Live`_ (ignored if `dvclive` is\n@@ -58,16 +61,23 @@\n def log(self, runner) -> None:\n tags = self.get_loggable_tags(runner)\n if tags:\n- self.dvclive.set_step(self.get_iter(runner))\n+ step = self.get_epoch(runner) if self.by_epoch else self.get_iter(\n+ runner)\n+ self.dvclive.set_step(step)\n+\n for k, v in tags.items():\n self.dvclive.log(k, v)\n \n- @master_only\n def after_train_epoch(self, runner) -> None:\n- super().after_train_epoch(runner)\n if self.model_file is not None:\n runner.save_checkpoint(\n Path(self.model_file).parent,\n filename_tmpl=Path(self.model_file).name,\n create_symlink=False,\n )\n+ if self.by_epoch:\n+ super().after_train_epoch(runner)\n+\n+ def after_train_iter(self, runner) -> None:\n+ if not self.by_epoch:\n+ super().after_train_iter(runner)\n", "issue": "Unexpected behaviour `by_epoch` DvcliveLoggerHook/AnyLogger\nThanks for reporting the unexpected results and we appreciate it a lot.\r\n\r\nSee also the DVClive issue: https://github.com/iterative/dvclive/issues/267\r\n\r\n**Describe the Issue**\r\n\r\nI was using the `dvclive` hook for mmcv and expected the `by_epoch` variable to mean something else then it is doing right now. I expected to get a result per epoch. However, it seems to have no (or not the expected) effect. \r\n\r\n**Reproduction**\r\n\r\n1. What command, code, or script did you run? I added the following to my config.\r\n\r\n```bash\r\nlog_config = dict(\r\n hooks=[\r\n dict(\r\n type=\"DvcliveLoggerHook\",\r\n path=\"{{ fileDirname }}/../live\",\r\n interval=1,\r\n by_epoch=True,\r\n ),\r\n ],\r\n)\r\n```\r\n\r\n2. Did you make any modifications on the code? Did you understand what you have modified?\r\n\r\nI did not modify the code.\r\n\r\n**Environment**\r\n\r\n1. Please run `python -c \"from mmcv.utils import collect_env; print(collect_env())\"` to collect necessary environment information and paste it here.\r\n\r\n<details><summary>Output</summary>\r\n\r\n```none\r\n{ \r\n 'sys.platform': 'linux', \r\n 'Python': '3.8.8 (default, Feb 24 2021, 21: 46: 12) [GCC 7.3.0]',\r\n 'CUDA available': True,\r\n 'GPU 0, 1, 2, 3': 'NVIDIA TITAN RTX',\r\n 'CUDA_HOME': '/usr/local/cuda',\r\n 'NVCC': 'Cuda compilation tools, \r\n release 11.2, V11.2.142',\r\n 'GCC': 'gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0',\r\n 'PyTorch': '1.9.0a0+df837d0', \r\n 'PyTorch compiling details': 'PyTorch built with:\\n - GCC 9.3\\n - C++ Version: 201402\\n - Intel(R) Math Kernel Library Version 2019.0.4 Product Build 20190411 for Intel(R) 64 architecture applications\\n - Intel(R) MKL-DNN v1.7.0 (Git Hash N/A)\\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\\n - NNPACK is enabled\\n - CPU capability usage: AVX2\\n - CUDA Runtime 11.2\\n - NVCC architecture flags: -gencode;arch=compute_52,\r\n code=sm_52;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,\r\n code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,\r\n code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,\r\n code=sm_86;-gencode;arch=compute_86,code=compute_86\\n - CuDNN 8.1.1\\n - Magma 2.5.2\\n - Build settings: BLAS_INFO=mkl,\r\n BUILD_TYPE=Release,\r\n CUDA_VERSION=11.2,\r\n CUDNN_VERSION=8.1.1,\r\n CXX_COMPILER=/usr/bin/c++,\r\n CXX_FLAGS= -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow,\r\n FORCE_FALLBACK_CUDA_MPI=1,\r\n LAPACK_INFO=mkl,\r\n PERF_WITH_AVX=1,\r\n PERF_WITH_AVX2=1,\r\n PERF_WITH_AVX512=1,\r\n TORCH_VERSION=1.9.0,\r\n USE_CUDA=ON,\r\n USE_CUDNN=ON,\r\n USE_EXCEPTION_PTR=1,\r\n USE_GFLAGS=OFF,\r\n USE_GLOG=OFF,\r\n USE_MKL=ON,\r\n USE_MKLDNN=ON,\r\n USE_MPI=ON,\r\n USE_NCCL=ON,\r\n USE_NNPACK=ON,\r\n USE_OPENMP=ON,\r\n \\n',\r\n 'TorchVision': '0.9.0a0',\r\n 'OpenCV': '3.4.11',\r\n 'MMCV': '1.5.0',\r\n 'MMCV Compiler': 'GCC 9.3',\r\n 'MMCV CUDA Compiler': '11.2'\r\n}\r\n```\r\n\r\n</details>\r\n\r\n\r\n2. You may add addition that may be helpful for locating the problem, such as\r\n - How you installed PyTorch \\[e.g., pip, conda, source\\]\r\n - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)\r\n\r\n\r\n**Error traceback**\r\nIf applicable, paste the error traceback here.\r\n\r\nn/a\r\n\r\n**Bug fix**\r\nIf you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!\r\n\r\nn/a\r\n\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom ...dist_utils import master_only\nfrom ..hook import HOOKS\nfrom .base import LoggerHook\n\n\[email protected]_module()\nclass DvcliveLoggerHook(LoggerHook):\n \"\"\"Class to log metrics with dvclive.\n\n It requires `dvclive`_ to be installed.\n\n Args:\n model_file (str): Default None. If not None, after each epoch the\n model will be saved to {model_file}.\n interval (int): Logging interval (every k iterations). Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n dvclive (Live, optional): An instance of the `Live`_ logger to use\n instead of initializing a new one internally. Defaults to None.\n kwargs: Arguments for instantiating `Live`_ (ignored if `dvclive` is\n provided).\n\n .. _dvclive:\n https://dvc.org/doc/dvclive\n\n .. _Live:\n https://dvc.org/doc/dvclive/api-reference/live#parameters\n \"\"\"\n\n def __init__(self,\n model_file: Optional[str] = None,\n interval: int = 10,\n ignore_last: bool = True,\n reset_flag: bool = False,\n by_epoch: bool = True,\n dvclive=None,\n **kwargs):\n super().__init__(interval, ignore_last, reset_flag, by_epoch)\n self.model_file = model_file\n self._import_dvclive(dvclive, **kwargs)\n\n def _import_dvclive(self, dvclive=None, **kwargs) -> None:\n try:\n from dvclive import Live\n except ImportError:\n raise ImportError(\n 'Please run \"pip install dvclive\" to install dvclive')\n self.dvclive = dvclive if dvclive is not None else Live(**kwargs)\n\n @master_only\n def log(self, runner) -> None:\n tags = self.get_loggable_tags(runner)\n if tags:\n self.dvclive.set_step(self.get_iter(runner))\n for k, v in tags.items():\n self.dvclive.log(k, v)\n\n @master_only\n def after_train_epoch(self, runner) -> None:\n super().after_train_epoch(runner)\n if self.model_file is not None:\n runner.save_checkpoint(\n Path(self.model_file).parent,\n filename_tmpl=Path(self.model_file).name,\n create_symlink=False,\n )\n", "path": "mmcv/runner/hooks/logger/dvclive.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom ...dist_utils import master_only\nfrom ..hook import HOOKS\nfrom .base import LoggerHook\n\n\[email protected]_module()\nclass DvcliveLoggerHook(LoggerHook):\n \"\"\"Class to log metrics with dvclive.\n\n It requires `dvclive`_ to be installed.\n\n Args:\n model_file (str): Default None. If not None, after each epoch the\n model will be saved to {model_file}.\n interval (int): Logging interval (every k iterations). Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used.\n Determines whether `log` is called `after_train_iter` or\n `after_train_epoch`.\n Default: True.\n dvclive (Live, optional): An instance of the `Live`_ logger to use\n instead of initializing a new one internally. Defaults to None.\n kwargs: Arguments for instantiating `Live`_ (ignored if `dvclive` is\n provided).\n\n .. _dvclive:\n https://dvc.org/doc/dvclive\n\n .. _Live:\n https://dvc.org/doc/dvclive/api-reference/live#parameters\n \"\"\"\n\n def __init__(self,\n model_file: Optional[str] = None,\n interval: int = 10,\n ignore_last: bool = True,\n reset_flag: bool = False,\n by_epoch: bool = True,\n dvclive=None,\n **kwargs):\n super().__init__(interval, ignore_last, reset_flag, by_epoch)\n self.model_file = model_file\n self._import_dvclive(dvclive, **kwargs)\n\n def _import_dvclive(self, dvclive=None, **kwargs) -> None:\n try:\n from dvclive import Live\n except ImportError:\n raise ImportError(\n 'Please run \"pip install dvclive\" to install dvclive')\n self.dvclive = dvclive if dvclive is not None else Live(**kwargs)\n\n @master_only\n def log(self, runner) -> None:\n tags = self.get_loggable_tags(runner)\n if tags:\n step = self.get_epoch(runner) if self.by_epoch else self.get_iter(\n runner)\n self.dvclive.set_step(step)\n\n for k, v in tags.items():\n self.dvclive.log(k, v)\n\n def after_train_epoch(self, runner) -> None:\n if self.model_file is not None:\n runner.save_checkpoint(\n Path(self.model_file).parent,\n filename_tmpl=Path(self.model_file).name,\n create_symlink=False,\n )\n if self.by_epoch:\n super().after_train_epoch(runner)\n\n def after_train_iter(self, runner) -> None:\n if not self.by_epoch:\n super().after_train_iter(runner)\n", "path": "mmcv/runner/hooks/logger/dvclive.py"}]}
| 2,447 | 449 |
gh_patches_debug_10210
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-7188
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RangeSlider css_classes not being updated anymore
It seems in 0.12.7 the `css_classes` attribute of the RangeSlider is not being updated anymore by the callbacks. It was working in 0.12.6.
In the example below, both the text input and the slider starts with the `initial` class as expected. When the slider changes, both should gain the `sent-later` class, but only the text input is gaining it...
```
from bokeh.io import curdoc
from bokeh.models import TextInput, RangeSlider
from bokeh.layouts import widgetbox
from bokeh.layouts import column
from bokeh.models import Div
div1 = Div(text="""
<style>
.sent-later {
border-radius: 0.5em;
padding: 1em;
border: 3px solid #2397D8;
}
</style>
""")
def on_slider_change(attr, old, new):
slider.css_classes.append('sent-later')
text.css_classes.append('sent-later')
print(slider.css_classes)
print(text.css_classes)
text = TextInput(
value=str(2),
title='Population Beta:')
slider = RangeSlider(
start=0,
end=10,
value=(0, 2),
step=1,
title='Population Beta')
slider.css_classes = []
slider.css_classes.append('initial')
text.css_classes = []
text.css_classes.append('initial')
slider.on_change('value', on_slider_change)
curdoc().add_root(column(div1, widgetbox(text, slider)))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/models/layouts.py`
Content:
```
1 ''' Various kinds of layout components.
2
3 '''
4 from __future__ import absolute_import
5
6 import logging
7 logger = logging.getLogger(__name__)
8
9 from ..core.enums import SizingMode
10 from ..core.has_props import abstract
11 from ..core.properties import Bool, Enum, Int, Instance, List, Seq, String
12 from ..core.validation import warning
13 from ..core.validation.warnings import BOTH_CHILD_AND_ROOT, EMPTY_LAYOUT
14 from ..model import Model
15
16 @abstract
17 class LayoutDOM(Model):
18 ''' An abstract base class for layout components.
19
20 '''
21
22 width = Int(help="""
23 An optional width for the component (in pixels).
24 """)
25
26 height = Int(help="""
27 An optional height for the component (in pixels).
28 """)
29
30 disabled = Bool(False, help="""
31 Whether the widget will be disabled when rendered. If ``True``,
32 the widget will be greyed-out, and not respond to UI events.
33 """)
34
35 sizing_mode = Enum(SizingMode, default="fixed", help="""
36 How the item being displayed should size itself. Possible values are
37 ``"fixed"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"``, and
38 ``"stretch_both"``.
39
40 ``"stretch_both"`` elements are completely responsive (independently in width and height) and
41 will resize to occupy all available space, even if this changes the aspect ratio of the element.
42 This is sometimes called outside-in, and is a typical behavior for desktop applications.
43
44 ``"fixed"`` elements are not responsive. They will retain their original width and height
45 regardless of any subsequent browser window resize events.
46
47 ``"scale_width"`` elements will responsively resize to fit to the width available, *while
48 maintaining the original aspect ratio*. This is a typical behavior for modern websites. For a
49 ``Plot``, the aspect ratio ``plot_width/plot_height`` is maintained.
50
51 ``"scale_height"`` elements will responsively resize to fit to the height available, *while
52 maintaining the original aspect ratio*. For a ``Plot``, the aspect ratio
53 ``plot_width/plot_height`` is maintained. A plot with ``"scale_height"`` mode needs
54 to be wrapped in a ``Row`` or ``Column`` to be responsive.
55
56 ``"scale_both"`` elements will responsively resize to for both the width and height available,
57 *while maintaining the original aspect ratio*.
58
59 """)
60
61 css_classes = Seq(String, help="""
62 A list of css class names to add to this DOM element. Note: the class names are
63 simply added as-is, no other guarantees are provided.
64 """)
65
66
67 class Spacer(LayoutDOM):
68 ''' A container for space used to fill an empty spot in a row or column.
69
70 '''
71
72
73 class WidgetBox(LayoutDOM):
74 ''' A container for widgets that are part of a layout.
75
76 '''
77 def __init__(self, *args, **kwargs):
78 if len(args) > 0 and "children" in kwargs:
79 raise ValueError("'children' keyword cannot be used with positional arguments")
80 elif len(args) > 0:
81 kwargs["children"] = list(args)
82 super(WidgetBox, self).__init__(**kwargs)
83
84 @warning(EMPTY_LAYOUT)
85 def _check_empty_layout(self):
86 from itertools import chain
87 if not list(chain(self.children)):
88 return str(self)
89
90 @warning(BOTH_CHILD_AND_ROOT)
91 def _check_child_is_also_root(self):
92 problems = []
93 for c in self.children:
94 if c.document is not None and c in c.document.roots:
95 problems.append(str(c))
96 if problems:
97 return ", ".join(problems)
98 else:
99 return None
100
101 children = List(Instance('bokeh.models.widgets.Widget'), help="""
102 The list of widgets to put in the layout box.
103 """)
104
105
106 @abstract
107 class Box(LayoutDOM):
108 ''' Abstract base class for Row and Column. Do not use directly.
109
110 '''
111
112 def __init__(self, *args, **kwargs):
113
114 if len(args) > 0 and "children" in kwargs:
115 raise ValueError("'children' keyword cannot be used with positional arguments")
116 elif len(args) > 0:
117 kwargs["children"] = list(args)
118
119 unwrapped_children = kwargs.get("children", [])
120 kwargs["children"] = self._wrap_children(unwrapped_children)
121 super(Box, self).__init__(**kwargs)
122
123 def _wrap_children(self, children):
124 ''' Wrap any Widgets of a list of child layouts in a WidgetBox.
125 This allows for the convenience of just spelling Row(button1, button2).
126 '''
127 from .widgets.widget import Widget
128 wrapped_children = []
129 for child in children:
130 if isinstance(child, Widget):
131 child = WidgetBox(
132 children=[child],
133 sizing_mode=child.sizing_mode,
134 width=child.width,
135 height=child.height,
136 disabled=child.disabled
137 )
138 wrapped_children.append(child)
139 return wrapped_children
140
141 @warning(EMPTY_LAYOUT)
142 def _check_empty_layout(self):
143 from itertools import chain
144 if not list(chain(self.children)):
145 return str(self)
146
147 @warning(BOTH_CHILD_AND_ROOT)
148 def _check_child_is_also_root(self):
149 problems = []
150 for c in self.children:
151 if c.document is not None and c in c.document.roots:
152 problems.append(str(c))
153 if problems:
154 return ", ".join(problems)
155 else:
156 return None
157
158 #TODO Debating the following instead to prevent people adding just a plain
159 # widget into a box, which sometimes works and sometimes looks disastrous
160 #children = List(
161 # Either(
162 # Instance('bokeh.models.layouts.Row'),
163 # Instance('bokeh.models.layouts.Column'),
164 # Instance('bokeh.models.plots.Plot'),
165 # Instance('bokeh.models.layouts.WidgetBox')
166 # ), help="""
167 # The list of children, which can be other components including plots, rows, columns, and widgets.
168 #""")
169 children = List(Instance(LayoutDOM), help="""
170 The list of children, which can be other components including plots, rows, columns, and widgets.
171 """)
172
173
174 class Row(Box):
175 ''' Lay out child components in a single horizontal row.
176
177 Children can be specified as positional arguments, as a single argument
178 that is a sequence, or using the ``children`` keyword argument.
179 '''
180
181
182 class Column(Box):
183 ''' Lay out child components in a single vertical row.
184
185 Children can be specified as positional arguments, as a single argument
186 that is a sequence, or using the ``children`` keyword argument.
187 '''
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bokeh/models/layouts.py b/bokeh/models/layouts.py
--- a/bokeh/models/layouts.py
+++ b/bokeh/models/layouts.py
@@ -58,10 +58,14 @@
""")
- css_classes = Seq(String, help="""
+ # List in order for in-place changes to trigger changes, ref: https://github.com/bokeh/bokeh/issues/6841
+ css_classes = List(String, help="""
A list of css class names to add to this DOM element. Note: the class names are
simply added as-is, no other guarantees are provided.
- """)
+
+ It is also permissible to assign from tuples, however these are adapted -- the
+ property will always contain a list.
+ """).accepts(Seq(String), lambda x: list(x))
class Spacer(LayoutDOM):
|
{"golden_diff": "diff --git a/bokeh/models/layouts.py b/bokeh/models/layouts.py\n--- a/bokeh/models/layouts.py\n+++ b/bokeh/models/layouts.py\n@@ -58,10 +58,14 @@\n \n \"\"\")\n \n- css_classes = Seq(String, help=\"\"\"\n+ # List in order for in-place changes to trigger changes, ref: https://github.com/bokeh/bokeh/issues/6841\n+ css_classes = List(String, help=\"\"\"\n A list of css class names to add to this DOM element. Note: the class names are\n simply added as-is, no other guarantees are provided.\n- \"\"\")\n+\n+ It is also permissible to assign from tuples, however these are adapted -- the\n+ property will always contain a list.\n+ \"\"\").accepts(Seq(String), lambda x: list(x))\n \n \n class Spacer(LayoutDOM):\n", "issue": "RangeSlider css_classes not being updated anymore\nIt seems in 0.12.7 the `css_classes` attribute of the RangeSlider is not being updated anymore by the callbacks. It was working in 0.12.6.\r\nIn the example below, both the text input and the slider starts with the `initial` class as expected. When the slider changes, both should gain the `sent-later` class, but only the text input is gaining it...\r\n\r\n```\r\nfrom bokeh.io import curdoc\r\nfrom bokeh.models import TextInput, RangeSlider\r\nfrom bokeh.layouts import widgetbox\r\nfrom bokeh.layouts import column\r\nfrom bokeh.models import Div\r\n\r\ndiv1 = Div(text=\"\"\"\r\n <style>\r\n .sent-later {\r\n border-radius: 0.5em;\r\n padding: 1em;\r\n border: 3px solid #2397D8;\r\n }\r\n </style>\r\n\"\"\")\r\n\r\n\r\ndef on_slider_change(attr, old, new):\r\n slider.css_classes.append('sent-later')\r\n text.css_classes.append('sent-later')\r\n print(slider.css_classes)\r\n print(text.css_classes)\r\n\r\n\r\ntext = TextInput(\r\n value=str(2),\r\n title='Population Beta:')\r\n\r\nslider = RangeSlider(\r\n start=0,\r\n end=10,\r\n value=(0, 2),\r\n step=1,\r\n title='Population Beta')\r\n\r\nslider.css_classes = []\r\nslider.css_classes.append('initial')\r\ntext.css_classes = []\r\ntext.css_classes.append('initial')\r\n\r\nslider.on_change('value', on_slider_change)\r\n\r\ncurdoc().add_root(column(div1, widgetbox(text, slider)))\r\n\r\n```\r\n\n", "before_files": [{"content": "''' Various kinds of layout components.\n\n'''\nfrom __future__ import absolute_import\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom ..core.enums import SizingMode\nfrom ..core.has_props import abstract\nfrom ..core.properties import Bool, Enum, Int, Instance, List, Seq, String\nfrom ..core.validation import warning\nfrom ..core.validation.warnings import BOTH_CHILD_AND_ROOT, EMPTY_LAYOUT\nfrom ..model import Model\n\n@abstract\nclass LayoutDOM(Model):\n ''' An abstract base class for layout components.\n\n '''\n\n width = Int(help=\"\"\"\n An optional width for the component (in pixels).\n \"\"\")\n\n height = Int(help=\"\"\"\n An optional height for the component (in pixels).\n \"\"\")\n\n disabled = Bool(False, help=\"\"\"\n Whether the widget will be disabled when rendered. If ``True``,\n the widget will be greyed-out, and not respond to UI events.\n \"\"\")\n\n sizing_mode = Enum(SizingMode, default=\"fixed\", help=\"\"\"\n How the item being displayed should size itself. Possible values are\n ``\"fixed\"``, ``\"scale_width\"``, ``\"scale_height\"``, ``\"scale_both\"``, and\n ``\"stretch_both\"``.\n\n ``\"stretch_both\"`` elements are completely responsive (independently in width and height) and\n will resize to occupy all available space, even if this changes the aspect ratio of the element.\n This is sometimes called outside-in, and is a typical behavior for desktop applications.\n\n ``\"fixed\"`` elements are not responsive. They will retain their original width and height\n regardless of any subsequent browser window resize events.\n\n ``\"scale_width\"`` elements will responsively resize to fit to the width available, *while\n maintaining the original aspect ratio*. This is a typical behavior for modern websites. For a\n ``Plot``, the aspect ratio ``plot_width/plot_height`` is maintained.\n\n ``\"scale_height\"`` elements will responsively resize to fit to the height available, *while\n maintaining the original aspect ratio*. For a ``Plot``, the aspect ratio\n ``plot_width/plot_height`` is maintained. A plot with ``\"scale_height\"`` mode needs\n to be wrapped in a ``Row`` or ``Column`` to be responsive.\n\n ``\"scale_both\"`` elements will responsively resize to for both the width and height available,\n *while maintaining the original aspect ratio*.\n\n \"\"\")\n\n css_classes = Seq(String, help=\"\"\"\n A list of css class names to add to this DOM element. Note: the class names are\n simply added as-is, no other guarantees are provided.\n \"\"\")\n\n\nclass Spacer(LayoutDOM):\n ''' A container for space used to fill an empty spot in a row or column.\n\n '''\n\n\nclass WidgetBox(LayoutDOM):\n ''' A container for widgets that are part of a layout.\n\n '''\n def __init__(self, *args, **kwargs):\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n super(WidgetBox, self).__init__(**kwargs)\n\n @warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n children = List(Instance('bokeh.models.widgets.Widget'), help=\"\"\"\n The list of widgets to put in the layout box.\n \"\"\")\n\n\n@abstract\nclass Box(LayoutDOM):\n ''' Abstract base class for Row and Column. Do not use directly.\n\n '''\n\n def __init__(self, *args, **kwargs):\n\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n\n unwrapped_children = kwargs.get(\"children\", [])\n kwargs[\"children\"] = self._wrap_children(unwrapped_children)\n super(Box, self).__init__(**kwargs)\n\n def _wrap_children(self, children):\n ''' Wrap any Widgets of a list of child layouts in a WidgetBox.\n This allows for the convenience of just spelling Row(button1, button2).\n '''\n from .widgets.widget import Widget\n wrapped_children = []\n for child in children:\n if isinstance(child, Widget):\n child = WidgetBox(\n children=[child],\n sizing_mode=child.sizing_mode,\n width=child.width,\n height=child.height,\n disabled=child.disabled\n )\n wrapped_children.append(child)\n return wrapped_children\n\n @warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n #TODO Debating the following instead to prevent people adding just a plain\n # widget into a box, which sometimes works and sometimes looks disastrous\n #children = List(\n # Either(\n # Instance('bokeh.models.layouts.Row'),\n # Instance('bokeh.models.layouts.Column'),\n # Instance('bokeh.models.plots.Plot'),\n # Instance('bokeh.models.layouts.WidgetBox')\n # ), help=\"\"\"\n # The list of children, which can be other components including plots, rows, columns, and widgets.\n #\"\"\")\n children = List(Instance(LayoutDOM), help=\"\"\"\n The list of children, which can be other components including plots, rows, columns, and widgets.\n \"\"\")\n\n\nclass Row(Box):\n ''' Lay out child components in a single horizontal row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n '''\n\n\nclass Column(Box):\n ''' Lay out child components in a single vertical row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n '''\n", "path": "bokeh/models/layouts.py"}], "after_files": [{"content": "''' Various kinds of layout components.\n\n'''\nfrom __future__ import absolute_import\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom ..core.enums import SizingMode\nfrom ..core.has_props import abstract\nfrom ..core.properties import Bool, Enum, Int, Instance, List, Seq, String\nfrom ..core.validation import warning\nfrom ..core.validation.warnings import BOTH_CHILD_AND_ROOT, EMPTY_LAYOUT\nfrom ..model import Model\n\n@abstract\nclass LayoutDOM(Model):\n ''' An abstract base class for layout components.\n\n '''\n\n width = Int(help=\"\"\"\n An optional width for the component (in pixels).\n \"\"\")\n\n height = Int(help=\"\"\"\n An optional height for the component (in pixels).\n \"\"\")\n\n disabled = Bool(False, help=\"\"\"\n Whether the widget will be disabled when rendered. If ``True``,\n the widget will be greyed-out, and not respond to UI events.\n \"\"\")\n\n sizing_mode = Enum(SizingMode, default=\"fixed\", help=\"\"\"\n How the item being displayed should size itself. Possible values are\n ``\"fixed\"``, ``\"scale_width\"``, ``\"scale_height\"``, ``\"scale_both\"``, and\n ``\"stretch_both\"``.\n\n ``\"stretch_both\"`` elements are completely responsive (independently in width and height) and\n will resize to occupy all available space, even if this changes the aspect ratio of the element.\n This is sometimes called outside-in, and is a typical behavior for desktop applications.\n\n ``\"fixed\"`` elements are not responsive. They will retain their original width and height\n regardless of any subsequent browser window resize events.\n\n ``\"scale_width\"`` elements will responsively resize to fit to the width available, *while\n maintaining the original aspect ratio*. This is a typical behavior for modern websites. For a\n ``Plot``, the aspect ratio ``plot_width/plot_height`` is maintained.\n\n ``\"scale_height\"`` elements will responsively resize to fit to the height available, *while\n maintaining the original aspect ratio*. For a ``Plot``, the aspect ratio\n ``plot_width/plot_height`` is maintained. A plot with ``\"scale_height\"`` mode needs\n to be wrapped in a ``Row`` or ``Column`` to be responsive.\n\n ``\"scale_both\"`` elements will responsively resize to for both the width and height available,\n *while maintaining the original aspect ratio*.\n\n \"\"\")\n\n # List in order for in-place changes to trigger changes, ref: https://github.com/bokeh/bokeh/issues/6841\n css_classes = List(String, help=\"\"\"\n A list of css class names to add to this DOM element. Note: the class names are\n simply added as-is, no other guarantees are provided.\n\n It is also permissible to assign from tuples, however these are adapted -- the\n property will always contain a list.\n \"\"\").accepts(Seq(String), lambda x: list(x))\n\n\nclass Spacer(LayoutDOM):\n ''' A container for space used to fill an empty spot in a row or column.\n\n '''\n\n\nclass WidgetBox(LayoutDOM):\n ''' A container for widgets that are part of a layout.\n\n '''\n def __init__(self, *args, **kwargs):\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n super(WidgetBox, self).__init__(**kwargs)\n\n @warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n children = List(Instance('bokeh.models.widgets.Widget'), help=\"\"\"\n The list of widgets to put in the layout box.\n \"\"\")\n\n\n@abstract\nclass Box(LayoutDOM):\n ''' Abstract base class for Row and Column. Do not use directly.\n\n '''\n\n def __init__(self, *args, **kwargs):\n\n if len(args) > 0 and \"children\" in kwargs:\n raise ValueError(\"'children' keyword cannot be used with positional arguments\")\n elif len(args) > 0:\n kwargs[\"children\"] = list(args)\n\n unwrapped_children = kwargs.get(\"children\", [])\n kwargs[\"children\"] = self._wrap_children(unwrapped_children)\n super(Box, self).__init__(**kwargs)\n\n def _wrap_children(self, children):\n ''' Wrap any Widgets of a list of child layouts in a WidgetBox.\n This allows for the convenience of just spelling Row(button1, button2).\n '''\n from .widgets.widget import Widget\n wrapped_children = []\n for child in children:\n if isinstance(child, Widget):\n child = WidgetBox(\n children=[child],\n sizing_mode=child.sizing_mode,\n width=child.width,\n height=child.height,\n disabled=child.disabled\n )\n wrapped_children.append(child)\n return wrapped_children\n\n @warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n from itertools import chain\n if not list(chain(self.children)):\n return str(self)\n\n @warning(BOTH_CHILD_AND_ROOT)\n def _check_child_is_also_root(self):\n problems = []\n for c in self.children:\n if c.document is not None and c in c.document.roots:\n problems.append(str(c))\n if problems:\n return \", \".join(problems)\n else:\n return None\n\n #TODO Debating the following instead to prevent people adding just a plain\n # widget into a box, which sometimes works and sometimes looks disastrous\n #children = List(\n # Either(\n # Instance('bokeh.models.layouts.Row'),\n # Instance('bokeh.models.layouts.Column'),\n # Instance('bokeh.models.plots.Plot'),\n # Instance('bokeh.models.layouts.WidgetBox')\n # ), help=\"\"\"\n # The list of children, which can be other components including plots, rows, columns, and widgets.\n #\"\"\")\n children = List(Instance(LayoutDOM), help=\"\"\"\n The list of children, which can be other components including plots, rows, columns, and widgets.\n \"\"\")\n\n\nclass Row(Box):\n ''' Lay out child components in a single horizontal row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n '''\n\n\nclass Column(Box):\n ''' Lay out child components in a single vertical row.\n\n Children can be specified as positional arguments, as a single argument\n that is a sequence, or using the ``children`` keyword argument.\n '''\n", "path": "bokeh/models/layouts.py"}]}
| 2,520 | 196 |
gh_patches_debug_49247
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-1759
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incompatible dependencies in pipenv install
Hello,
When I try to install garage in a fresh pipenv as per the documentation, I get the following error:
```
[pipenv.exceptions.ResolutionFailure]: Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.
First try clearing your dependency cache with $ pipenv lock --clear, then try the original command again.
Alternatively, you can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation.
Hint: try $ pipenv lock --pre if it is a pre-release dependency.
ERROR: Could not find a version that matches cloudpickle==1.3,~=1.2.0 (from garage==2020.6.0->-r /tmp/pipenvprlocesvrequirements/pipenv-7gor6s43-constraints.txt (line 2))
Tried: 0.1.0, 0.1.0, 0.1.1, 0.1.1, 0.2.1, 0.2.1, 0.2.2, 0.2.2, 0.3.0, 0.3.0, 0.3.1, 0.3.1, 0.4.0, 0.4.0, 0.4.1, 0.4.1, 0.4.2, 0.4.2, 0.4.3, 0.4.3, 0.4.4, 0.4.4, 0.5.0, 0.5.0, 0.5.1, 0.5.1, 0.5.2, 0.5.2, 0.5.3, 0.5.3, 0.5.4, 0.5.4, 0.5.5, 0.5.5, 0.5.6, 0.5.6, 0.6.0, 0.6.0, 0.6.1, 0.6.1, 0.7.0, 0.7.0, 0.8.0, 0.8.0, 0.8.1, 0.8.1, 1.0.0, 1.0.0, 1.1.1, 1.1.1, 1.2.0, 1.2.0, 1.2.1, 1.2.1, 1.2.2, 1.2.2, 1.3.0, 1.3.0, 1.4.0, 1.4.0, 1.4.1, 1.4.1, 1.5.0, 1.5.0
There are incompatible versions in the resolved dependencies:
cloudpickle (from garage==2020.6.0->-r /tmp/pipenvprlocesvrequirements/pipenv-7gor6s43-constraints.txt (line 2))
cloudpickle==1.3 (from tensorflow-probability==0.10.1->garage==2020.6.0->-r /tmp/pipenvprlocesvrequirements/pipenv-7gor6s43-constraints.txt (line 2))
cloudpickle~=1.2.0 (from gym[atari,box2d,classic_control]==0.15.4->garage==2020.6.0->-r /tmp/pipenvprlocesvrequirements/pipenv-7gor6s43-constraints.txt (line 2))
```
This can be shortcutted by appending `--skip-lock` to the `pipenv install`, but that's obviously not ideal. Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """setuptools based setup module."""
2 import os
3
4 from setuptools import find_packages, setup
5
6 GARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'
7 GYM_VERSION = '0.15.4'
8
9 # Required dependencies
10 REQUIRED = [
11 # Please keep alphabetized
12 'akro',
13 'click>=2.0',
14 'cloudpickle<1.5',
15 'cma==2.7.0',
16 'dowel==0.0.3',
17 f'gym[atari,box2d,classic_control]=={GYM_VERSION}',
18 'numpy>=1.14.5',
19 'psutil',
20 # Pyglet 1.4.0 introduces some api change which breaks some
21 # gym environments
22 # See: https://github.com/openai/gym/issues/1588
23 'pyglet<1.4.0,>=1.3.0',
24 'python-dateutil',
25 'ray',
26 'scikit-image',
27 'scipy',
28 'setproctitle>=1.0',
29 'tensorflow>=1.14',
30 'tensorflow-probability',
31 'torch>=1.0.0,!=1.5.0',
32 'torchvision>=0.2.1',
33 ]
34
35 # Dependencies for optional features
36 EXTRAS = {}
37
38 EXTRAS['mujoco'] = [
39 'mujoco-py<2.1,>=2.0',
40 f'gym[all]=={GYM_VERSION}',
41 ]
42
43 EXTRAS['dm_control'] = [
44 # dm_control throws an error during install about not being able to
45 # find a build dependency (absl-py). Later pip executes the `install`
46 # command again and the install succeeds because absl-py has been
47 # installed. This is stupid, but harmless.
48 'dm_control==0.0.300771433',
49 ]
50
51 EXTRAS['bullet'] = ['mpi4py', 'pybullet']
52
53 EXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))
54
55 # Development dependencies (*not* included in 'all')
56 EXTRAS['dev'] = [
57 # Please keep alphabetized
58 'flake8',
59 'flake8-docstrings>=1.5.0',
60 'flake8-import-order',
61 f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501
62 'isort>=4.3.21,<5.0.0',
63 'pep8-naming==0.7.0',
64 'pre-commit',
65 'pycodestyle>=2.5.0',
66 'pydocstyle>=4.0.0',
67 'pylint>=2.5.3',
68 'pytest>=4.5.0', # Required for strict-markers
69 'pytest-cov',
70 'pytest-timeout',
71 'pytest-xdist',
72 'recommonmark',
73 'sphinx',
74 'sphinx-autoapi>=1.4.0',
75 'sphinx_rtd_theme',
76 'yapf==0.30.0',
77 ] # yapf: disable
78
79 with open('README.md') as f:
80 README = f.read()
81
82 # Get the package version dynamically
83 with open('VERSION') as v:
84 VERSION = v.read().strip()
85
86 setup(
87 name='garage',
88 version=VERSION,
89 author='Reinforcement Learning Working Group',
90 description='A toolkit for reproducible reinforcement learning research',
91 url='https://github.com/rlworkgroup/garage',
92 packages=find_packages(where='src'),
93 package_dir={'': 'src'},
94 scripts=['scripts/garage'],
95 python_requires='>=3.6',
96 install_requires=REQUIRED,
97 extras_require=EXTRAS,
98 license='MIT',
99 long_description=README,
100 long_description_content_type='text/markdown',
101 classifiers=[
102 'Development Status :: 4 - Beta',
103 'Intended Audience :: Developers',
104 'Intended Audience :: Education',
105 'Intended Audience :: Science/Research',
106 'License :: OSI Approved :: MIT License',
107 'Programming Language :: Python :: 3.6',
108 'Programming Language :: Python :: 3.7',
109 'Programming Language :: Python :: 3 :: Only',
110 'Topic :: Scientific/Engineering :: Artificial Intelligence',
111 'Topic :: Scientific/Engineering :: Mathematics',
112 'Topic :: Software Development :: Libraries',
113 ],
114 )
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@
'scipy',
'setproctitle>=1.0',
'tensorflow>=1.14',
- 'tensorflow-probability',
+ 'tensorflow-probability<=0.10.0',
'torch>=1.0.0,!=1.5.0',
'torchvision>=0.2.1',
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,7 @@\n 'scipy',\n 'setproctitle>=1.0',\n 'tensorflow>=1.14',\n- 'tensorflow-probability',\n+ 'tensorflow-probability<=0.10.0',\n 'torch>=1.0.0,!=1.5.0',\n 'torchvision>=0.2.1',\n ]\n", "issue": "Incompatible dependencies in pipenv install\nHello,\r\n\r\nWhen I try to install garage in a fresh pipenv as per the documentation, I get the following error:\r\n\r\n```\r\n[pipenv.exceptions.ResolutionFailure]: Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\r\n First try clearing your dependency cache with $ pipenv lock --clear, then try the original command again.\r\n Alternatively, you can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation.\r\n Hint: try $ pipenv lock --pre if it is a pre-release dependency.\r\nERROR: Could not find a version that matches cloudpickle==1.3,~=1.2.0 (from garage==2020.6.0->-r /tmp/pipenvprlocesvrequirements/pipenv-7gor6s43-constraints.txt (line 2))\r\nTried: 0.1.0, 0.1.0, 0.1.1, 0.1.1, 0.2.1, 0.2.1, 0.2.2, 0.2.2, 0.3.0, 0.3.0, 0.3.1, 0.3.1, 0.4.0, 0.4.0, 0.4.1, 0.4.1, 0.4.2, 0.4.2, 0.4.3, 0.4.3, 0.4.4, 0.4.4, 0.5.0, 0.5.0, 0.5.1, 0.5.1, 0.5.2, 0.5.2, 0.5.3, 0.5.3, 0.5.4, 0.5.4, 0.5.5, 0.5.5, 0.5.6, 0.5.6, 0.6.0, 0.6.0, 0.6.1, 0.6.1, 0.7.0, 0.7.0, 0.8.0, 0.8.0, 0.8.1, 0.8.1, 1.0.0, 1.0.0, 1.1.1, 1.1.1, 1.2.0, 1.2.0, 1.2.1, 1.2.1, 1.2.2, 1.2.2, 1.3.0, 1.3.0, 1.4.0, 1.4.0, 1.4.1, 1.4.1, 1.5.0, 1.5.0\r\nThere are incompatible versions in the resolved dependencies:\r\n cloudpickle (from garage==2020.6.0->-r /tmp/pipenvprlocesvrequirements/pipenv-7gor6s43-constraints.txt (line 2))\r\n cloudpickle==1.3 (from tensorflow-probability==0.10.1->garage==2020.6.0->-r /tmp/pipenvprlocesvrequirements/pipenv-7gor6s43-constraints.txt (line 2))\r\n cloudpickle~=1.2.0 (from gym[atari,box2d,classic_control]==0.15.4->garage==2020.6.0->-r /tmp/pipenvprlocesvrequirements/pipenv-7gor6s43-constraints.txt (line 2))\r\n```\r\nThis can be shortcutted by appending `--skip-lock` to the `pipenv install`, but that's obviously not ideal. Thanks! \n", "before_files": [{"content": "\"\"\"setuptools based setup module.\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\nGARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\nGYM_VERSION = '0.15.4'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n 'cloudpickle<1.5',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n 'tensorflow>=1.14',\n 'tensorflow-probability',\n 'torch>=1.0.0,!=1.5.0',\n 'torchvision>=0.2.1',\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n f'gym[all]=={GYM_VERSION}',\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control==0.0.300771433',\n]\n\nEXTRAS['bullet'] = ['mpi4py', 'pybullet']\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501\n 'isort>=4.3.21,<5.0.0',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.5.3',\n 'pytest>=4.5.0', # Required for strict-markers\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-xdist',\n 'recommonmark',\n 'sphinx',\n 'sphinx-autoapi>=1.4.0',\n 'sphinx_rtd_theme',\n 'yapf==0.30.0',\n] # yapf: disable\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.6',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"setuptools based setup module.\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\nGARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\nGYM_VERSION = '0.15.4'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n 'cloudpickle<1.5',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n 'tensorflow>=1.14',\n 'tensorflow-probability<=0.10.0',\n 'torch>=1.0.0,!=1.5.0',\n 'torchvision>=0.2.1',\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n f'gym[all]=={GYM_VERSION}',\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control==0.0.300771433',\n]\n\nEXTRAS['bullet'] = ['mpi4py', 'pybullet']\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501\n 'isort>=4.3.21,<5.0.0',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.5.3',\n 'pytest>=4.5.0', # Required for strict-markers\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-xdist',\n 'recommonmark',\n 'sphinx',\n 'sphinx-autoapi>=1.4.0',\n 'sphinx_rtd_theme',\n 'yapf==0.30.0',\n] # yapf: disable\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.6',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]}
| 2,390 | 109 |
gh_patches_debug_22432
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-5004
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
json serialization error with google analytics enabled
### What I'm trying to achieve
I want to enable google analytics of a docker image I build using the current docker files from the master repo. I am starting it in beanstalk and then the error occurs:
```
/var/log/eb-docker/containers/eb-current-app/eb-d5c249ad18da-stdouterr.log
-------------------------------------
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type generator is not JSON serializable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./saleor/core/middleware.py", line 27, in middleware
client_id, path=path, language=language, headers=headers)
File "./saleor/core/analytics.py", line 68, in report_view
_report(client_id, payloads, extra_headers=extra_headers)
File "./saleor/core/analytics.py", line 36, in _report
**extra_data)
File "/usr/local/lib/python3.7/site-packages/celery/app/task.py", line 427, in delay
return self.apply_async(args, kwargs)
File "/usr/local/lib/python3.7/site-packages/celery/app/task.py", line 544, in apply_async
body, serializer,
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 221, in dumps
payload = encoder(data)
File "/usr/local/lib/python3.7/contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 54, in _reraise_errors
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
File "/usr/local/lib/python3.7/site-packages/vine/five.py", line 194, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 50, in _reraise_errors
yield
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 221, in dumps
payload = encoder(data)
File "/usr/local/lib/python3.7/site-packages/kombu/utils/json.py", line 69, in dumps
**dict(default_kwargs, **kwargs))
File "/usr/local/lib/python3.7/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/usr/local/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/local/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/local/lib/python3.7/site-packages/kombu/utils/json.py", line 59, in default
return super(JSONEncoder, self).default(o)
File "/usr/local/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
kombu.exceptions.EncodeError: Object of type generator is not JSON serializable
ERROR saleor.core.middleware Unable to update analytics [PID:17:uWSGIWorker2Core0]
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 50, in _reraise_errors
yield
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 221, in dumps
payload = encoder(data)
File "/usr/local/lib/python3.7/site-packages/kombu/utils/json.py", line 69, in dumps
**dict(default_kwargs, **kwargs))
File "/usr/local/lib/python3.7/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/usr/local/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/local/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/local/lib/python3.7/site-packages/kombu/utils/json.py", line 59, in default
return super(JSONEncoder, self).default(o)
File "/usr/local/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type generator is not JSON serializable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./saleor/core/middleware.py", line 27, in middleware
client_id, path=path, language=language, headers=headers)
File "./saleor/core/analytics.py", line 68, in report_view
_report(client_id, payloads, extra_headers=extra_headers)
File "./saleor/core/analytics.py", line 36, in _report
**extra_data)
File "/usr/local/lib/python3.7/site-packages/celery/app/task.py", line 427, in delay
return self.apply_async(args, kwargs)
File "/usr/local/lib/python3.7/site-packages/celery/app/task.py", line 544, in apply_async
body, serializer,
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 221, in dumps
payload = encoder(data)
File "/usr/local/lib/python3.7/contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 54, in _reraise_errors
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
File "/usr/local/lib/python3.7/site-packages/vine/five.py", line 194, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 50, in _reraise_errors
yield
File "/usr/local/lib/python3.7/site-packages/kombu/serialization.py", line 221, in dumps
payload = encoder(data)
File "/usr/local/lib/python3.7/site-packages/kombu/utils/json.py", line 69, in dumps
**dict(default_kwargs, **kwargs))
File "/usr/local/lib/python3.7/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/usr/local/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/local/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/usr/local/lib/python3.7/site-packages/kombu/utils/json.py", line 59, in default
return super(JSONEncoder, self).default(o)
File "/usr/local/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
kombu.exceptions.EncodeError: Object of type generator is not JSON serializable
UWSGI uwsgi "GET /de/cart/summary/ HTTP/1.1" 200 785 29ms [PID:17:Worker-2] [RSS:162MB]```
### Steps to reproduce the problem
1. Build the docker image
2. Try to start it in beanstalk with the minimal needed env variables set and google analytics id set too
3. Look into the logs and find the error in question
### What I expected to happen
Beanstalk starts the dockerimage with the needed minimal env variables and google analytics enabled.
There is a merge request related to this #3615
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/core/analytics.py`
Content:
```
1 import uuid
2
3 import google_measurement_protocol as ga
4 from django.conf import settings
5
6 from ..celeryconf import app
7
8 FINGERPRINT_PARTS = [
9 "HTTP_ACCEPT_ENCODING",
10 "HTTP_ACCEPT_LANGUAGE",
11 "HTTP_USER_AGENT",
12 "HTTP_X_FORWARDED_FOR",
13 "REMOTE_ADDR",
14 ]
15
16 UUID_NAMESPACE = uuid.UUID("fb4abc05-e2fb-4e3e-8b78-28037ef7d07f")
17
18
19 def get_client_id(request):
20 parts = [request.META.get(key, "") for key in FINGERPRINT_PARTS]
21 name = "_".join(parts)
22 return uuid.uuid5(UUID_NAMESPACE, name)
23
24
25 @app.task
26 def ga_report(tracking_id, client_id, payloads, extra_headers=None, **extra_data):
27 ga.report(
28 tracking_id, client_id, payloads, extra_headers=extra_headers, **extra_data
29 )
30
31
32 def _report(client_id, payloads, extra_headers=None, **extra_data):
33 tracking_id = getattr(settings, "GOOGLE_ANALYTICS_TRACKING_ID", None)
34 if tracking_id and client_id:
35 ga_report.delay(
36 tracking_id, client_id, payloads, extra_headers=extra_headers, **extra_data
37 )
38
39
40 def get_order_payloads(order):
41 items = [
42 ga.item(
43 str(order_line),
44 order_line.unit_price.gross,
45 quantity=order_line.quantity,
46 item_id=order_line.product_sku,
47 )
48 for order_line in order
49 ]
50 return ga.transaction(
51 order.id,
52 items,
53 revenue=order.total.gross,
54 tax=order.total.tax,
55 shipping=order.shipping_price.net,
56 )
57
58
59 def report_order(client_id, order):
60 payloads = get_order_payloads(order)
61 _report(client_id, payloads)
62
63
64 def get_view_payloads(path, language, headers):
65 host_name = headers.get("HTTP_HOST", None)
66 referrer = headers.get("HTTP_REFERER", None)
67 return ga.pageview(path, host_name=host_name, referrer=referrer, language=language)
68
69
70 def report_view(client_id, path, language, headers):
71 payloads = get_view_payloads(path, language, headers)
72 extra_headers = {}
73 user_agent = headers.get("HTTP_USER_AGENT", None)
74 if user_agent:
75 extra_headers["user-agent"] = user_agent
76 _report(client_id, payloads, extra_headers=extra_headers)
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/core/analytics.py b/saleor/core/analytics.py
--- a/saleor/core/analytics.py
+++ b/saleor/core/analytics.py
@@ -1,4 +1,5 @@
import uuid
+from typing import Generator, List
import google_measurement_protocol as ga
from django.conf import settings
@@ -23,17 +24,25 @@
@app.task
-def ga_report(tracking_id, client_id, payloads, extra_headers=None, **extra_data):
+def ga_report(
+ tracking_id, client_id, payloads: List[dict], extra_headers=None, **extra_data
+):
ga.report(
tracking_id, client_id, payloads, extra_headers=extra_headers, **extra_data
)
-def _report(client_id, payloads, extra_headers=None, **extra_data):
+def _report(
+ client_id, payloads: Generator[dict, None, None], extra_headers=None, **extra_data
+):
tracking_id = getattr(settings, "GOOGLE_ANALYTICS_TRACKING_ID", None)
if tracking_id and client_id:
ga_report.delay(
- tracking_id, client_id, payloads, extra_headers=extra_headers, **extra_data
+ tracking_id,
+ client_id,
+ list(payloads),
+ extra_headers=extra_headers,
+ **extra_data,
)
|
{"golden_diff": "diff --git a/saleor/core/analytics.py b/saleor/core/analytics.py\n--- a/saleor/core/analytics.py\n+++ b/saleor/core/analytics.py\n@@ -1,4 +1,5 @@\n import uuid\n+from typing import Generator, List\n \n import google_measurement_protocol as ga\n from django.conf import settings\n@@ -23,17 +24,25 @@\n \n \n @app.task\n-def ga_report(tracking_id, client_id, payloads, extra_headers=None, **extra_data):\n+def ga_report(\n+ tracking_id, client_id, payloads: List[dict], extra_headers=None, **extra_data\n+):\n ga.report(\n tracking_id, client_id, payloads, extra_headers=extra_headers, **extra_data\n )\n \n \n-def _report(client_id, payloads, extra_headers=None, **extra_data):\n+def _report(\n+ client_id, payloads: Generator[dict, None, None], extra_headers=None, **extra_data\n+):\n tracking_id = getattr(settings, \"GOOGLE_ANALYTICS_TRACKING_ID\", None)\n if tracking_id and client_id:\n ga_report.delay(\n- tracking_id, client_id, payloads, extra_headers=extra_headers, **extra_data\n+ tracking_id,\n+ client_id,\n+ list(payloads),\n+ extra_headers=extra_headers,\n+ **extra_data,\n )\n", "issue": "json serialization error with google analytics enabled\n### What I'm trying to achieve\r\nI want to enable google analytics of a docker image I build using the current docker files from the master repo. I am starting it in beanstalk and then the error occurs: \r\n```\r\n/var/log/eb-docker/containers/eb-current-app/eb-d5c249ad18da-stdouterr.log\r\n-------------------------------------\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nTypeError: Object of type generator is not JSON serializable\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"./saleor/core/middleware.py\", line 27, in middleware\r\n client_id, path=path, language=language, headers=headers)\r\n File \"./saleor/core/analytics.py\", line 68, in report_view\r\n _report(client_id, payloads, extra_headers=extra_headers)\r\n File \"./saleor/core/analytics.py\", line 36, in _report\r\n **extra_data)\r\n File \"/usr/local/lib/python3.7/site-packages/celery/app/task.py\", line 427, in delay\r\n return self.apply_async(args, kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/celery/app/task.py\", line 544, in apply_async\r\n body, serializer,\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 221, in dumps\r\n payload = encoder(data)\r\n File \"/usr/local/lib/python3.7/contextlib.py\", line 130, in __exit__\r\n self.gen.throw(type, value, traceback)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 54, in _reraise_errors\r\n reraise(wrapper, wrapper(exc), sys.exc_info()[2])\r\n File \"/usr/local/lib/python3.7/site-packages/vine/five.py\", line 194, in reraise\r\n raise value.with_traceback(tb)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 50, in _reraise_errors\r\n yield\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 221, in dumps\r\n payload = encoder(data)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/utils/json.py\", line 69, in dumps\r\n **dict(default_kwargs, **kwargs))\r\n File \"/usr/local/lib/python3.7/json/__init__.py\", line 238, in dumps\r\n **kw).encode(obj)\r\n File \"/usr/local/lib/python3.7/json/encoder.py\", line 199, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \"/usr/local/lib/python3.7/json/encoder.py\", line 257, in iterencode\r\n return _iterencode(o, 0)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/utils/json.py\", line 59, in default\r\n return super(JSONEncoder, self).default(o)\r\n File \"/usr/local/lib/python3.7/json/encoder.py\", line 179, in default\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nkombu.exceptions.EncodeError: Object of type generator is not JSON serializable\r\nERROR saleor.core.middleware Unable to update analytics [PID:17:uWSGIWorker2Core0]\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 50, in _reraise_errors\r\n yield\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 221, in dumps\r\n payload = encoder(data)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/utils/json.py\", line 69, in dumps\r\n **dict(default_kwargs, **kwargs))\r\n File \"/usr/local/lib/python3.7/json/__init__.py\", line 238, in dumps\r\n **kw).encode(obj)\r\n File \"/usr/local/lib/python3.7/json/encoder.py\", line 199, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \"/usr/local/lib/python3.7/json/encoder.py\", line 257, in iterencode\r\n return _iterencode(o, 0)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/utils/json.py\", line 59, in default\r\n return super(JSONEncoder, self).default(o)\r\n File \"/usr/local/lib/python3.7/json/encoder.py\", line 179, in default\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nTypeError: Object of type generator is not JSON serializable\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"./saleor/core/middleware.py\", line 27, in middleware\r\n client_id, path=path, language=language, headers=headers)\r\n File \"./saleor/core/analytics.py\", line 68, in report_view\r\n _report(client_id, payloads, extra_headers=extra_headers)\r\n File \"./saleor/core/analytics.py\", line 36, in _report\r\n **extra_data)\r\n File \"/usr/local/lib/python3.7/site-packages/celery/app/task.py\", line 427, in delay\r\n return self.apply_async(args, kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/celery/app/task.py\", line 544, in apply_async\r\n body, serializer,\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 221, in dumps\r\n payload = encoder(data)\r\n File \"/usr/local/lib/python3.7/contextlib.py\", line 130, in __exit__\r\n self.gen.throw(type, value, traceback)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 54, in _reraise_errors\r\n reraise(wrapper, wrapper(exc), sys.exc_info()[2])\r\n File \"/usr/local/lib/python3.7/site-packages/vine/five.py\", line 194, in reraise\r\n raise value.with_traceback(tb)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 50, in _reraise_errors\r\n yield\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/serialization.py\", line 221, in dumps\r\n payload = encoder(data)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/utils/json.py\", line 69, in dumps\r\n **dict(default_kwargs, **kwargs))\r\n File \"/usr/local/lib/python3.7/json/__init__.py\", line 238, in dumps\r\n **kw).encode(obj)\r\n File \"/usr/local/lib/python3.7/json/encoder.py\", line 199, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \"/usr/local/lib/python3.7/json/encoder.py\", line 257, in iterencode\r\n return _iterencode(o, 0)\r\n File \"/usr/local/lib/python3.7/site-packages/kombu/utils/json.py\", line 59, in default\r\n return super(JSONEncoder, self).default(o)\r\n File \"/usr/local/lib/python3.7/json/encoder.py\", line 179, in default\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nkombu.exceptions.EncodeError: Object of type generator is not JSON serializable\r\nUWSGI uwsgi \"GET /de/cart/summary/ HTTP/1.1\" 200 785 29ms [PID:17:Worker-2] [RSS:162MB]```\r\n\r\n### Steps to reproduce the problem\r\n1. Build the docker image \r\n2. Try to start it in beanstalk with the minimal needed env variables set and google analytics id set too \r\n3. Look into the logs and find the error in question\r\n\r\n### What I expected to happen\r\nBeanstalk starts the dockerimage with the needed minimal env variables and google analytics enabled. \r\n \r\nThere is a merge request related to this #3615 \n", "before_files": [{"content": "import uuid\n\nimport google_measurement_protocol as ga\nfrom django.conf import settings\n\nfrom ..celeryconf import app\n\nFINGERPRINT_PARTS = [\n \"HTTP_ACCEPT_ENCODING\",\n \"HTTP_ACCEPT_LANGUAGE\",\n \"HTTP_USER_AGENT\",\n \"HTTP_X_FORWARDED_FOR\",\n \"REMOTE_ADDR\",\n]\n\nUUID_NAMESPACE = uuid.UUID(\"fb4abc05-e2fb-4e3e-8b78-28037ef7d07f\")\n\n\ndef get_client_id(request):\n parts = [request.META.get(key, \"\") for key in FINGERPRINT_PARTS]\n name = \"_\".join(parts)\n return uuid.uuid5(UUID_NAMESPACE, name)\n\n\[email protected]\ndef ga_report(tracking_id, client_id, payloads, extra_headers=None, **extra_data):\n ga.report(\n tracking_id, client_id, payloads, extra_headers=extra_headers, **extra_data\n )\n\n\ndef _report(client_id, payloads, extra_headers=None, **extra_data):\n tracking_id = getattr(settings, \"GOOGLE_ANALYTICS_TRACKING_ID\", None)\n if tracking_id and client_id:\n ga_report.delay(\n tracking_id, client_id, payloads, extra_headers=extra_headers, **extra_data\n )\n\n\ndef get_order_payloads(order):\n items = [\n ga.item(\n str(order_line),\n order_line.unit_price.gross,\n quantity=order_line.quantity,\n item_id=order_line.product_sku,\n )\n for order_line in order\n ]\n return ga.transaction(\n order.id,\n items,\n revenue=order.total.gross,\n tax=order.total.tax,\n shipping=order.shipping_price.net,\n )\n\n\ndef report_order(client_id, order):\n payloads = get_order_payloads(order)\n _report(client_id, payloads)\n\n\ndef get_view_payloads(path, language, headers):\n host_name = headers.get(\"HTTP_HOST\", None)\n referrer = headers.get(\"HTTP_REFERER\", None)\n return ga.pageview(path, host_name=host_name, referrer=referrer, language=language)\n\n\ndef report_view(client_id, path, language, headers):\n payloads = get_view_payloads(path, language, headers)\n extra_headers = {}\n user_agent = headers.get(\"HTTP_USER_AGENT\", None)\n if user_agent:\n extra_headers[\"user-agent\"] = user_agent\n _report(client_id, payloads, extra_headers=extra_headers)\n", "path": "saleor/core/analytics.py"}], "after_files": [{"content": "import uuid\nfrom typing import Generator, List\n\nimport google_measurement_protocol as ga\nfrom django.conf import settings\n\nfrom ..celeryconf import app\n\nFINGERPRINT_PARTS = [\n \"HTTP_ACCEPT_ENCODING\",\n \"HTTP_ACCEPT_LANGUAGE\",\n \"HTTP_USER_AGENT\",\n \"HTTP_X_FORWARDED_FOR\",\n \"REMOTE_ADDR\",\n]\n\nUUID_NAMESPACE = uuid.UUID(\"fb4abc05-e2fb-4e3e-8b78-28037ef7d07f\")\n\n\ndef get_client_id(request):\n parts = [request.META.get(key, \"\") for key in FINGERPRINT_PARTS]\n name = \"_\".join(parts)\n return uuid.uuid5(UUID_NAMESPACE, name)\n\n\[email protected]\ndef ga_report(\n tracking_id, client_id, payloads: List[dict], extra_headers=None, **extra_data\n):\n ga.report(\n tracking_id, client_id, payloads, extra_headers=extra_headers, **extra_data\n )\n\n\ndef _report(\n client_id, payloads: Generator[dict, None, None], extra_headers=None, **extra_data\n):\n tracking_id = getattr(settings, \"GOOGLE_ANALYTICS_TRACKING_ID\", None)\n if tracking_id and client_id:\n ga_report.delay(\n tracking_id,\n client_id,\n list(payloads),\n extra_headers=extra_headers,\n **extra_data,\n )\n\n\ndef get_order_payloads(order):\n items = [\n ga.item(\n str(order_line),\n order_line.unit_price.gross,\n quantity=order_line.quantity,\n item_id=order_line.product_sku,\n )\n for order_line in order\n ]\n return ga.transaction(\n order.id,\n items,\n revenue=order.total.gross,\n tax=order.total.tax,\n shipping=order.shipping_price.net,\n )\n\n\ndef report_order(client_id, order):\n payloads = get_order_payloads(order)\n _report(client_id, payloads)\n\n\ndef get_view_payloads(path, language, headers):\n host_name = headers.get(\"HTTP_HOST\", None)\n referrer = headers.get(\"HTTP_REFERER\", None)\n return ga.pageview(path, host_name=host_name, referrer=referrer, language=language)\n\n\ndef report_view(client_id, path, language, headers):\n payloads = get_view_payloads(path, language, headers)\n extra_headers = {}\n user_agent = headers.get(\"HTTP_USER_AGENT\", None)\n if user_agent:\n extra_headers[\"user-agent\"] = user_agent\n _report(client_id, payloads, extra_headers=extra_headers)\n", "path": "saleor/core/analytics.py"}]}
| 2,787 | 298 |
gh_patches_debug_61971
|
rasdani/github-patches
|
git_diff
|
crytic__slither-1110
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug-Candidate]: Phi-node print missing 'f' in f-string
### Describe the issue:
When printing a Phi-node the string is not formatted.
There seems to be a 'f' missing ahead of the str in https://github.com/crytic/slither/blob/dev/slither/slithir/operations/phi.py#L36
### Code example to reproduce the issue:
slither tests/complex_func.sol --print slithir-ssa
### Version:
dev-branch dd91f770f61eaadc286e2af3c72fb5798e376c16
### Relevant log output:
```
Contract Increment
Function Increment.increaseBy1()
IRs:
{self.lvalue}({self.lvalue.type}) := ϕ({[str(v) for v in self._rvalues]})
Expression: i += 1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/slithir/operations/phi.py`
Content:
```
1 from slither.slithir.operations.lvalue import OperationWithLValue
2 from slither.slithir.utils.utils import is_valid_lvalue
3
4
5 class Phi(OperationWithLValue):
6 def __init__(self, left_variable, nodes):
7 # When Phi operations are created the
8 # correct indexes of the variables are not yet computed
9 # We store the nodes where the variables are written
10 # so we can update the rvalues of the Phi operation
11 # after its instantiation
12 assert is_valid_lvalue(left_variable)
13 assert isinstance(nodes, set)
14 super().__init__()
15 self._lvalue = left_variable
16 self._rvalues = []
17 self._nodes = nodes
18
19 @property
20 def read(self):
21 return self.rvalues
22
23 @property
24 def rvalues(self):
25 return self._rvalues
26
27 @rvalues.setter
28 def rvalues(self, vals):
29 self._rvalues = vals
30
31 @property
32 def nodes(self):
33 return self._nodes
34
35 def __str__(self):
36 return "{self.lvalue}({self.lvalue.type}) := \u03D5({[str(v) for v in self._rvalues]})"
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/slithir/operations/phi.py b/slither/slithir/operations/phi.py
--- a/slither/slithir/operations/phi.py
+++ b/slither/slithir/operations/phi.py
@@ -33,4 +33,4 @@
return self._nodes
def __str__(self):
- return "{self.lvalue}({self.lvalue.type}) := \u03D5({[str(v) for v in self._rvalues]})"
+ return f"{self.lvalue}({self.lvalue.type}) := \u03D5({[str(v) for v in self._rvalues]})"
|
{"golden_diff": "diff --git a/slither/slithir/operations/phi.py b/slither/slithir/operations/phi.py\n--- a/slither/slithir/operations/phi.py\n+++ b/slither/slithir/operations/phi.py\n@@ -33,4 +33,4 @@\n return self._nodes\n \n def __str__(self):\n- return \"{self.lvalue}({self.lvalue.type}) := \\u03D5({[str(v) for v in self._rvalues]})\"\n+ return f\"{self.lvalue}({self.lvalue.type}) := \\u03D5({[str(v) for v in self._rvalues]})\"\n", "issue": "[Bug-Candidate]: Phi-node print missing 'f' in f-string\n### Describe the issue:\n\nWhen printing a Phi-node the string is not formatted.\r\nThere seems to be a 'f' missing ahead of the str in https://github.com/crytic/slither/blob/dev/slither/slithir/operations/phi.py#L36\n\n### Code example to reproduce the issue:\n\nslither tests/complex_func.sol --print slithir-ssa\n\n### Version:\n\ndev-branch dd91f770f61eaadc286e2af3c72fb5798e376c16\n\n### Relevant log output:\n\n```\r\nContract Increment\r\n Function Increment.increaseBy1()\r\n IRs:\r\n {self.lvalue}({self.lvalue.type}) := \u03d5({[str(v) for v in self._rvalues]})\r\n Expression: i += 1\r\n```\n", "before_files": [{"content": "from slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue\n\n\nclass Phi(OperationWithLValue):\n def __init__(self, left_variable, nodes):\n # When Phi operations are created the\n # correct indexes of the variables are not yet computed\n # We store the nodes where the variables are written\n # so we can update the rvalues of the Phi operation\n # after its instantiation\n assert is_valid_lvalue(left_variable)\n assert isinstance(nodes, set)\n super().__init__()\n self._lvalue = left_variable\n self._rvalues = []\n self._nodes = nodes\n\n @property\n def read(self):\n return self.rvalues\n\n @property\n def rvalues(self):\n return self._rvalues\n\n @rvalues.setter\n def rvalues(self, vals):\n self._rvalues = vals\n\n @property\n def nodes(self):\n return self._nodes\n\n def __str__(self):\n return \"{self.lvalue}({self.lvalue.type}) := \\u03D5({[str(v) for v in self._rvalues]})\"\n", "path": "slither/slithir/operations/phi.py"}], "after_files": [{"content": "from slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue\n\n\nclass Phi(OperationWithLValue):\n def __init__(self, left_variable, nodes):\n # When Phi operations are created the\n # correct indexes of the variables are not yet computed\n # We store the nodes where the variables are written\n # so we can update the rvalues of the Phi operation\n # after its instantiation\n assert is_valid_lvalue(left_variable)\n assert isinstance(nodes, set)\n super().__init__()\n self._lvalue = left_variable\n self._rvalues = []\n self._nodes = nodes\n\n @property\n def read(self):\n return self.rvalues\n\n @property\n def rvalues(self):\n return self._rvalues\n\n @rvalues.setter\n def rvalues(self, vals):\n self._rvalues = vals\n\n @property\n def nodes(self):\n return self._nodes\n\n def __str__(self):\n return f\"{self.lvalue}({self.lvalue.type}) := \\u03D5({[str(v) for v in self._rvalues]})\"\n", "path": "slither/slithir/operations/phi.py"}]}
| 788 | 148 |
gh_patches_debug_13611
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-5274
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] fontconfig/2.13.93: build fails on macos with bzip2:shared=True
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **fontconfig/2.13.93**
* Operating System+version: **MacOS 10.15**
* Compiler+version: **Apple clang 11.0**
* Conan version: **conan 1.35.1**
* Python version: **Python 3.9.4**
### Conan profile
```
Configuration for profile macos-xcode11-x86_64:
[settings]
os=Macos
os_build=Macos
arch=x86_64
arch_build=x86_64
compiler=apple-clang
compiler.version=11.0
compiler.libcxx=libc++
build_type=Release
os.version=10.13
compiler.cppstd=17
[options]
boost:header_only=True
brotli:shared=True
bzip2:shared=True
cairo:enable_fc=True
cairo:enable_ft=True
cairo:enable_glib=False
cairo:shared=True
ccdcsqlite3:enable_explain_comments=True
ccdcsqlite3:enable_fts3=True
ccdcsqlite3:enable_fts4=True
ccdcsqlite3:enable_fts5=True
ccdcsqlite3:enable_json1=True
ccdcsqlite3:enable_null_trim=True
ccdcsqlite3:max_column=10000
ccdcsqlite3:shared=True
csdprotobufs:shared=True
expat:shared=True
fontconfig:shared=True
freetype:shared=True
gtest:shared=True
inchi:shared=True
jasper:shared=True
jbig:shared=True
lexactivator:shared=True
lexfloatclient:shared=True
libarchive:shared=True
libarchive:with_bzip2=True
libarchive:with_zlib=True
libarchive:with_zstd=True
libcurl:darwin_ssl=True
libcurl:shared=True
libcurl:with_openssl=False
libiconv:shared=False
libjpeg:shared=True
libpng:shared=True
libtiff:shared=True
libuuid:shared=True
libwebp:shared=True
libxl:shared=True
libxml2:shared=True
openscenegraph:shared=True
openssl:shared=True
protobuf:shared=True
protobuf:with_zlib=True
xz_utils:shared=True
zlib:shared=True
zstd:shared=True
[build_requires]
[env]
```
### Steps to reproduce (Include if Applicable)
build fails in conan package stage
### Logs (Include/Attach if Applicable)
<details><summary>Click to expand log</summary>
```
if [ -f source_subfolder/fonts.conf ]; then \
echo " /usr/bin/install -c -m 644 source_subfolder/fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf"; \
/usr/bin/install -c -m 644 source_subfolder/fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf; \
else if [ -f fonts.conf ]; then \
echo " /usr/bin/install -c -m 644 fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf"; \
/usr/bin/install -c -m 644 fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf; \
fi; fi
/usr/bin/install -c -m 644 fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf
fc-cache -s -f -v
/usr/bin/install -c -m 644 fontconfig.pc '/Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/lib/pkgconfig'
/usr/bin/install -c -m 644 source_subfolder/fonts.dtd '/Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/share/xml/fontconfig'
dyld: Library not loaded: libbz2.1.dylib
Referenced from: /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/build/a1eeda7123576b54b8790256237bfc4e70516c86/fc-cache/.libs/fc-cache
Reason: image not found
/bin/sh: line 1: 35414 Abort trap: 6 fc-cache/fc-cache -s -f -v
make[2]: *** [install-data-local] Error 134
make[1]: *** [install-am] Error 2
make: *** [install-recursive] Error 1
ERROR: fontconfig/2.13.93: Error in package() method, line 87
autotools.install()
ConanException: Error 2 while executing make install -j12
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/fontconfig/all/conanfile.py`
Content:
```
1 import os
2 import glob
3
4 from conans import ConanFile, tools, AutoToolsBuildEnvironment
5 from conans.errors import ConanInvalidConfiguration
6
7
8 class FontconfigConan(ConanFile):
9 name = "fontconfig"
10 license = "MIT"
11 url = "https://github.com/conan-io/conan-center-index"
12 description = "Fontconfig is a library for configuring and customizing font access"
13 homepage = "https://gitlab.freedesktop.org/fontconfig/fontconfig"
14 topics = ("conan", "fontconfig", "fonts", "freedesktop")
15 settings = "os", "compiler", "build_type", "arch"
16 options = {"shared": [True, False], "fPIC": [True, False]}
17 default_options = {"shared": False, "fPIC": True}
18 generators = "pkg_config"
19
20 _autotools = None
21
22 @property
23 def _source_subfolder(self):
24 return "source_subfolder"
25
26 def config_options(self):
27 if self.settings.os == "Windows":
28 del self.options.fPIC
29
30 def configure(self):
31 if self.settings.compiler == "Visual Studio":
32 raise ConanInvalidConfiguration("Visual Studio builds are not supported.")
33 if self.options.shared:
34 del self.options.fPIC
35 del self.settings.compiler.libcxx
36 del self.settings.compiler.cppstd
37
38 def requirements(self):
39 self.requires("freetype/2.10.4")
40 self.requires("expat/2.2.10")
41 if self.settings.os == "Linux":
42 self.requires("libuuid/1.0.3")
43 elif self.settings.os == "Macos":
44 self.requires("libgettext/0.20.1")
45
46 def build_requirements(self):
47 self.build_requires("gperf/3.1")
48 self.build_requires("pkgconf/1.7.3")
49 if tools.os_info.is_windows and not tools.get_env("CONAN_BASH_PATH"):
50 self.build_requires("msys2/20200517")
51
52 def source(self):
53 tools.get(**self.conan_data["sources"][self.version])
54 extrated_dir = self.name + "-" + self.version
55 os.rename(extrated_dir, self._source_subfolder)
56
57 def _configure_autotools(self):
58 if not self._autotools:
59 args = ["--enable-static=%s" % ("no" if self.options.shared else "yes"),
60 "--enable-shared=%s" % ("yes" if self.options.shared else "no"),
61 "--disable-docs"]
62 args.append("--sysconfdir=%s" % tools.unix_path(os.path.join(self.package_folder, "bin", "etc")))
63 args.append("--datadir=%s" % tools.unix_path(os.path.join(self.package_folder, "bin", "share")))
64 args.append("--datarootdir=%s" % tools.unix_path(os.path.join(self.package_folder, "bin", "share")))
65 args.append("--localstatedir=%s" % tools.unix_path(os.path.join(self.package_folder, "bin", "var")))
66 self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
67 self._autotools.libs = []
68 self._autotools.configure(configure_dir=self._source_subfolder, args=args)
69 tools.replace_in_file("Makefile", "po-conf test", "po-conf")
70 return self._autotools
71
72 def _patch_files(self):
73 # - fontconfig requires libtool version number, change it for the corresponding freetype one
74 tools.replace_in_file(os.path.join(self._source_subfolder, 'configure'), '21.0.15', '2.8.1')
75
76 def build(self):
77 # Patch files from dependencies
78 self._patch_files()
79 with tools.run_environment(self):
80 autotools = self._configure_autotools()
81 autotools.make()
82
83 def package(self):
84 self.copy("COPYING", dst="licenses", src=self._source_subfolder)
85 with tools.run_environment(self):
86 autotools = self._configure_autotools()
87 autotools.install()
88 os.unlink(os.path.join(self.package_folder, "lib", "libfontconfig.la"))
89 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
90 for f in glob.glob(os.path.join(self.package_folder, "bin", "etc", "fonts", "conf.d", "*.conf")):
91 if os.path.islink(f):
92 os.unlink(f)
93 for def_file in glob.glob(os.path.join(self.package_folder, "lib", "*.def")):
94 os.remove(def_file)
95
96 def package_info(self):
97 self.cpp_info.libs = ["fontconfig"]
98 if self.settings.os in ["Linux", "FreeBSD"]:
99 self.cpp_info.system_libs.extend(["m", "pthread"])
100 self.cpp_info.names["cmake_find_package"] = "Fontconfig"
101 self.cpp_info.names["cmake_find_package_multi"] = "Fontconfig"
102
103 fontconfig_file = os.path.join(self.package_folder, "bin", "etc", "fonts", "fonts.conf")
104 self.output.info("Creating FONTCONFIG_FILE environment variable: {}".format(fontconfig_file))
105 self.env_info.FONTCONFIG_FILE = fontconfig_file
106 fontconfig_path = os.path.join(self.package_folder, "bin", "etc", "fonts")
107 self.output.info("Creating FONTCONFIG_PATH environment variable: {}".format(fontconfig_path))
108 self.env_info.FONTCONFIG_PATH = fontconfig_path
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/fontconfig/all/conanfile.py b/recipes/fontconfig/all/conanfile.py
--- a/recipes/fontconfig/all/conanfile.py
+++ b/recipes/fontconfig/all/conanfile.py
@@ -72,6 +72,12 @@
def _patch_files(self):
# - fontconfig requires libtool version number, change it for the corresponding freetype one
tools.replace_in_file(os.path.join(self._source_subfolder, 'configure'), '21.0.15', '2.8.1')
+ # disable fc-cache test to enable cross compilation but also builds with shared libraries on MacOS
+ tools.replace_in_file(
+ os.path.join(self._source_subfolder, 'Makefile.in'),
+ '@CROSS_COMPILING_TRUE@RUN_FC_CACHE_TEST = false',
+ 'RUN_FC_CACHE_TEST=false'
+ )
def build(self):
# Patch files from dependencies
|
{"golden_diff": "diff --git a/recipes/fontconfig/all/conanfile.py b/recipes/fontconfig/all/conanfile.py\n--- a/recipes/fontconfig/all/conanfile.py\n+++ b/recipes/fontconfig/all/conanfile.py\n@@ -72,6 +72,12 @@\n def _patch_files(self):\n # - fontconfig requires libtool version number, change it for the corresponding freetype one\n tools.replace_in_file(os.path.join(self._source_subfolder, 'configure'), '21.0.15', '2.8.1')\n+ # disable fc-cache test to enable cross compilation but also builds with shared libraries on MacOS\n+ tools.replace_in_file(\n+ os.path.join(self._source_subfolder, 'Makefile.in'),\n+ '@CROSS_COMPILING_TRUE@RUN_FC_CACHE_TEST = false',\n+ 'RUN_FC_CACHE_TEST=false'\n+ )\n \n def build(self):\n # Patch files from dependencies\n", "issue": "[package] fontconfig/2.13.93: build fails on macos with bzip2:shared=True\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **fontconfig/2.13.93**\r\n * Operating System+version: **MacOS 10.15**\r\n * Compiler+version: **Apple clang 11.0**\r\n * Conan version: **conan 1.35.1**\r\n * Python version: **Python 3.9.4**\r\n\r\n\r\n### Conan profile \r\n```\r\nConfiguration for profile macos-xcode11-x86_64:\r\n\r\n[settings]\r\nos=Macos\r\nos_build=Macos\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=apple-clang\r\ncompiler.version=11.0\r\ncompiler.libcxx=libc++\r\nbuild_type=Release\r\nos.version=10.13\r\ncompiler.cppstd=17\r\n[options]\r\nboost:header_only=True\r\nbrotli:shared=True\r\nbzip2:shared=True\r\ncairo:enable_fc=True\r\ncairo:enable_ft=True\r\ncairo:enable_glib=False\r\ncairo:shared=True\r\nccdcsqlite3:enable_explain_comments=True\r\nccdcsqlite3:enable_fts3=True\r\nccdcsqlite3:enable_fts4=True\r\nccdcsqlite3:enable_fts5=True\r\nccdcsqlite3:enable_json1=True\r\nccdcsqlite3:enable_null_trim=True\r\nccdcsqlite3:max_column=10000\r\nccdcsqlite3:shared=True\r\ncsdprotobufs:shared=True\r\nexpat:shared=True\r\nfontconfig:shared=True\r\nfreetype:shared=True\r\ngtest:shared=True\r\ninchi:shared=True\r\njasper:shared=True\r\njbig:shared=True\r\nlexactivator:shared=True\r\nlexfloatclient:shared=True\r\nlibarchive:shared=True\r\nlibarchive:with_bzip2=True\r\nlibarchive:with_zlib=True\r\nlibarchive:with_zstd=True\r\nlibcurl:darwin_ssl=True\r\nlibcurl:shared=True\r\nlibcurl:with_openssl=False\r\nlibiconv:shared=False\r\nlibjpeg:shared=True\r\nlibpng:shared=True\r\nlibtiff:shared=True\r\nlibuuid:shared=True\r\nlibwebp:shared=True\r\nlibxl:shared=True\r\nlibxml2:shared=True\r\nopenscenegraph:shared=True\r\nopenssl:shared=True\r\nprotobuf:shared=True\r\nprotobuf:with_zlib=True\r\nxz_utils:shared=True\r\nzlib:shared=True\r\nzstd:shared=True\r\n[build_requires]\r\n[env]\r\n\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nbuild fails in conan package stage\r\n\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nif [ -f source_subfolder/fonts.conf ]; then \\\r\n\t echo \" /usr/bin/install -c -m 644 source_subfolder/fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf\"; \\\r\n\t /usr/bin/install -c -m 644 source_subfolder/fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf; \\\r\n\telse if [ -f fonts.conf ]; then \\\r\n\t echo \" /usr/bin/install -c -m 644 fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf\"; \\\r\n\t /usr/bin/install -c -m 644 fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf; \\\r\n\tfi; fi\r\n /usr/bin/install -c -m 644 fonts.conf /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/etc/fonts/fonts.conf\r\nfc-cache -s -f -v\r\n /usr/bin/install -c -m 644 fontconfig.pc '/Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/lib/pkgconfig'\r\n /usr/bin/install -c -m 644 source_subfolder/fonts.dtd '/Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/package/a1eeda7123576b54b8790256237bfc4e70516c86/bin/share/xml/fontconfig'\r\ndyld: Library not loaded: libbz2.1.dylib\r\n Referenced from: /Users/vagrant/.conan/data/fontconfig/2.13.93/_/_/build/a1eeda7123576b54b8790256237bfc4e70516c86/fc-cache/.libs/fc-cache\r\n Reason: image not found\r\n/bin/sh: line 1: 35414 Abort trap: 6 fc-cache/fc-cache -s -f -v\r\nmake[2]: *** [install-data-local] Error 134\r\nmake[1]: *** [install-am] Error 2\r\nmake: *** [install-recursive] Error 1\r\nERROR: fontconfig/2.13.93: Error in package() method, line 87\r\n\tautotools.install()\r\n\tConanException: Error 2 while executing make install -j12\r\n\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "import os\nimport glob\n\nfrom conans import ConanFile, tools, AutoToolsBuildEnvironment\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass FontconfigConan(ConanFile):\n name = \"fontconfig\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"Fontconfig is a library for configuring and customizing font access\"\n homepage = \"https://gitlab.freedesktop.org/fontconfig/fontconfig\"\n topics = (\"conan\", \"fontconfig\", \"fonts\", \"freedesktop\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n generators = \"pkg_config\"\n\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.settings.compiler == \"Visual Studio\":\n raise ConanInvalidConfiguration(\"Visual Studio builds are not supported.\")\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def requirements(self):\n self.requires(\"freetype/2.10.4\")\n self.requires(\"expat/2.2.10\")\n if self.settings.os == \"Linux\":\n self.requires(\"libuuid/1.0.3\")\n elif self.settings.os == \"Macos\":\n self.requires(\"libgettext/0.20.1\")\n\n def build_requirements(self):\n self.build_requires(\"gperf/3.1\")\n self.build_requires(\"pkgconf/1.7.3\")\n if tools.os_info.is_windows and not tools.get_env(\"CONAN_BASH_PATH\"):\n self.build_requires(\"msys2/20200517\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extrated_dir = self.name + \"-\" + self.version\n os.rename(extrated_dir, self._source_subfolder)\n\n def _configure_autotools(self):\n if not self._autotools:\n args = [\"--enable-static=%s\" % (\"no\" if self.options.shared else \"yes\"),\n \"--enable-shared=%s\" % (\"yes\" if self.options.shared else \"no\"),\n \"--disable-docs\"]\n args.append(\"--sysconfdir=%s\" % tools.unix_path(os.path.join(self.package_folder, \"bin\", \"etc\")))\n args.append(\"--datadir=%s\" % tools.unix_path(os.path.join(self.package_folder, \"bin\", \"share\")))\n args.append(\"--datarootdir=%s\" % tools.unix_path(os.path.join(self.package_folder, \"bin\", \"share\")))\n args.append(\"--localstatedir=%s\" % tools.unix_path(os.path.join(self.package_folder, \"bin\", \"var\")))\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n self._autotools.libs = []\n self._autotools.configure(configure_dir=self._source_subfolder, args=args)\n tools.replace_in_file(\"Makefile\", \"po-conf test\", \"po-conf\")\n return self._autotools\n\n def _patch_files(self):\n # - fontconfig requires libtool version number, change it for the corresponding freetype one\n tools.replace_in_file(os.path.join(self._source_subfolder, 'configure'), '21.0.15', '2.8.1')\n\n def build(self):\n # Patch files from dependencies\n self._patch_files()\n with tools.run_environment(self):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n with tools.run_environment(self):\n autotools = self._configure_autotools()\n autotools.install()\n os.unlink(os.path.join(self.package_folder, \"lib\", \"libfontconfig.la\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n for f in glob.glob(os.path.join(self.package_folder, \"bin\", \"etc\", \"fonts\", \"conf.d\", \"*.conf\")):\n if os.path.islink(f):\n os.unlink(f)\n for def_file in glob.glob(os.path.join(self.package_folder, \"lib\", \"*.def\")):\n os.remove(def_file)\n\n def package_info(self):\n self.cpp_info.libs = [\"fontconfig\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.system_libs.extend([\"m\", \"pthread\"])\n self.cpp_info.names[\"cmake_find_package\"] = \"Fontconfig\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Fontconfig\"\n\n fontconfig_file = os.path.join(self.package_folder, \"bin\", \"etc\", \"fonts\", \"fonts.conf\")\n self.output.info(\"Creating FONTCONFIG_FILE environment variable: {}\".format(fontconfig_file))\n self.env_info.FONTCONFIG_FILE = fontconfig_file\n fontconfig_path = os.path.join(self.package_folder, \"bin\", \"etc\", \"fonts\")\n self.output.info(\"Creating FONTCONFIG_PATH environment variable: {}\".format(fontconfig_path))\n self.env_info.FONTCONFIG_PATH = fontconfig_path\n", "path": "recipes/fontconfig/all/conanfile.py"}], "after_files": [{"content": "import os\nimport glob\n\nfrom conans import ConanFile, tools, AutoToolsBuildEnvironment\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass FontconfigConan(ConanFile):\n name = \"fontconfig\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"Fontconfig is a library for configuring and customizing font access\"\n homepage = \"https://gitlab.freedesktop.org/fontconfig/fontconfig\"\n topics = (\"conan\", \"fontconfig\", \"fonts\", \"freedesktop\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n generators = \"pkg_config\"\n\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.settings.compiler == \"Visual Studio\":\n raise ConanInvalidConfiguration(\"Visual Studio builds are not supported.\")\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def requirements(self):\n self.requires(\"freetype/2.10.4\")\n self.requires(\"expat/2.2.10\")\n if self.settings.os == \"Linux\":\n self.requires(\"libuuid/1.0.3\")\n elif self.settings.os == \"Macos\":\n self.requires(\"libgettext/0.20.1\")\n\n def build_requirements(self):\n self.build_requires(\"gperf/3.1\")\n self.build_requires(\"pkgconf/1.7.3\")\n if tools.os_info.is_windows and not tools.get_env(\"CONAN_BASH_PATH\"):\n self.build_requires(\"msys2/20200517\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extrated_dir = self.name + \"-\" + self.version\n os.rename(extrated_dir, self._source_subfolder)\n\n def _configure_autotools(self):\n if not self._autotools:\n args = [\"--enable-static=%s\" % (\"no\" if self.options.shared else \"yes\"),\n \"--enable-shared=%s\" % (\"yes\" if self.options.shared else \"no\"),\n \"--disable-docs\"]\n args.append(\"--sysconfdir=%s\" % tools.unix_path(os.path.join(self.package_folder, \"bin\", \"etc\")))\n args.append(\"--datadir=%s\" % tools.unix_path(os.path.join(self.package_folder, \"bin\", \"share\")))\n args.append(\"--datarootdir=%s\" % tools.unix_path(os.path.join(self.package_folder, \"bin\", \"share\")))\n args.append(\"--localstatedir=%s\" % tools.unix_path(os.path.join(self.package_folder, \"bin\", \"var\")))\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n self._autotools.libs = []\n self._autotools.configure(configure_dir=self._source_subfolder, args=args)\n tools.replace_in_file(\"Makefile\", \"po-conf test\", \"po-conf\")\n return self._autotools\n\n def _patch_files(self):\n # - fontconfig requires libtool version number, change it for the corresponding freetype one\n tools.replace_in_file(os.path.join(self._source_subfolder, 'configure'), '21.0.15', '2.8.1')\n # disable fc-cache test to enable cross compilation but also builds with shared libraries on MacOS\n tools.replace_in_file(\n os.path.join(self._source_subfolder, 'Makefile.in'),\n '@CROSS_COMPILING_TRUE@RUN_FC_CACHE_TEST = false',\n 'RUN_FC_CACHE_TEST=false'\n )\n\n def build(self):\n # Patch files from dependencies\n self._patch_files()\n with tools.run_environment(self):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n with tools.run_environment(self):\n autotools = self._configure_autotools()\n autotools.install()\n os.unlink(os.path.join(self.package_folder, \"lib\", \"libfontconfig.la\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n for f in glob.glob(os.path.join(self.package_folder, \"bin\", \"etc\", \"fonts\", \"conf.d\", \"*.conf\")):\n if os.path.islink(f):\n os.unlink(f)\n for def_file in glob.glob(os.path.join(self.package_folder, \"lib\", \"*.def\")):\n os.remove(def_file)\n\n def package_info(self):\n self.cpp_info.libs = [\"fontconfig\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.system_libs.extend([\"m\", \"pthread\"])\n self.cpp_info.names[\"cmake_find_package\"] = \"Fontconfig\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Fontconfig\"\n\n fontconfig_file = os.path.join(self.package_folder, \"bin\", \"etc\", \"fonts\", \"fonts.conf\")\n self.output.info(\"Creating FONTCONFIG_FILE environment variable: {}\".format(fontconfig_file))\n self.env_info.FONTCONFIG_FILE = fontconfig_file\n fontconfig_path = os.path.join(self.package_folder, \"bin\", \"etc\", \"fonts\")\n self.output.info(\"Creating FONTCONFIG_PATH environment variable: {}\".format(fontconfig_path))\n self.env_info.FONTCONFIG_PATH = fontconfig_path\n", "path": "recipes/fontconfig/all/conanfile.py"}]}
| 3,111 | 206 |
gh_patches_debug_31305
|
rasdani/github-patches
|
git_diff
|
sosreport__sos-2660
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[ssh] Can ssh plugin check the permissions set for /home/*/.ssh files?
Hello!
When users set wrong permissions to files in their ~/.ssh/ folder, i.e. they set write permission for `~/.ssh/authenticated_keys` for `other`, the SSH server will refuse to accept connections for this user.
I think it would be nice for the [ssh] plugin to check, if the permissions set for files in the `.ssh` folders of system users are correct, or if they are corrupted in some way.
A very simple solution for that would be to just run `ls -l .ssh/` in every home directory. Would it be OK to extend the ssh plugin to do this? Would it be better to have a separate plugin do this?
Let me know what you think, and I'll give it a try if that's ok :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sos/report/plugins/ssh.py`
Content:
```
1 # Copyright (C) 2007 Red Hat, Inc., Eugene Teo <[email protected]>
2
3 # This file is part of the sos project: https://github.com/sosreport/sos
4 #
5 # This copyrighted material is made available to anyone wishing to use,
6 # modify, copy, or redistribute it subject to the terms and conditions of
7 # version 2 of the GNU General Public License.
8 #
9 # See the LICENSE file in the source distribution for further information.
10
11 from sos.report.plugins import Plugin, IndependentPlugin
12
13
14 class Ssh(Plugin, IndependentPlugin):
15
16 short_desc = 'Secure shell service'
17
18 plugin_name = 'ssh'
19 profiles = ('services', 'security', 'system', 'identity')
20
21 def setup(self):
22
23 self.add_file_tags({
24 '/etc/ssh/sshd_config': 'sshd_config',
25 '/etc/ssh/ssh_config': 'ssh_config'
26 })
27
28 sshcfgs = [
29 "/etc/ssh/ssh_config",
30 "/etc/ssh/sshd_config"
31 ]
32
33 # Include main config files
34 self.add_copy_spec(sshcfgs)
35
36 # Read configs for any includes and copy those
37 try:
38 for sshcfg in sshcfgs:
39 tag = sshcfg.split('/')[-1]
40 with open(sshcfg, 'r') as cfgfile:
41 for line in cfgfile:
42 # skip empty lines and comments
43 if len(line.split()) == 0 or line.startswith('#'):
44 continue
45 # ssh_config keywords are allowed as case-insensitive
46 if line.lower().startswith('include'):
47 confarg = line.split()
48 self.add_copy_spec(confarg[1], tags=tag)
49 except Exception:
50 pass
51
52
53 # vim: set et ts=4 sw=4 :
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sos/report/plugins/ssh.py b/sos/report/plugins/ssh.py
--- a/sos/report/plugins/ssh.py
+++ b/sos/report/plugins/ssh.py
@@ -9,6 +9,7 @@
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, IndependentPlugin
+import os.path
class Ssh(Plugin, IndependentPlugin):
@@ -33,6 +34,10 @@
# Include main config files
self.add_copy_spec(sshcfgs)
+ self.included_configs(sshcfgs)
+ self.user_ssh_files_permissions()
+
+ def included_configs(self, sshcfgs):
# Read configs for any includes and copy those
try:
for sshcfg in sshcfgs:
@@ -49,5 +54,33 @@
except Exception:
pass
+ def user_ssh_files_permissions(self):
+ """
+ Iterate over .ssh folders in user homes to see their permissions.
+
+ Bad permissions can prevent SSH from allowing access to given user.
+ """
+ users_data = self.exec_cmd('getent passwd')
+
+ if users_data['status']:
+ # If getent fails, fallback to just reading /etc/passwd
+ try:
+ with open('/etc/passwd') as passwd_file:
+ users_data_lines = passwd_file.readlines()
+ except Exception:
+ # If we can't read /etc/passwd, then there's something wrong.
+ self._log_error("Couldn't read /etc/passwd")
+ return
+ else:
+ users_data_lines = users_data['output'].splitlines()
+
+ # Read the home paths of users in the system and check the ~/.ssh dirs
+ for usr_line in users_data_lines:
+ try:
+ home_dir = os.path.join(usr_line.split(':')[5], '.ssh')
+ if self.path_isdir(home_dir):
+ self.add_cmd_output('ls -laZ {}'.format(home_dir))
+ except IndexError:
+ pass
# vim: set et ts=4 sw=4 :
|
{"golden_diff": "diff --git a/sos/report/plugins/ssh.py b/sos/report/plugins/ssh.py\n--- a/sos/report/plugins/ssh.py\n+++ b/sos/report/plugins/ssh.py\n@@ -9,6 +9,7 @@\n # See the LICENSE file in the source distribution for further information.\n \n from sos.report.plugins import Plugin, IndependentPlugin\n+import os.path\n \n \n class Ssh(Plugin, IndependentPlugin):\n@@ -33,6 +34,10 @@\n # Include main config files\n self.add_copy_spec(sshcfgs)\n \n+ self.included_configs(sshcfgs)\n+ self.user_ssh_files_permissions()\n+\n+ def included_configs(self, sshcfgs):\n # Read configs for any includes and copy those\n try:\n for sshcfg in sshcfgs:\n@@ -49,5 +54,33 @@\n except Exception:\n pass\n \n+ def user_ssh_files_permissions(self):\n+ \"\"\"\n+ Iterate over .ssh folders in user homes to see their permissions.\n+\n+ Bad permissions can prevent SSH from allowing access to given user.\n+ \"\"\"\n+ users_data = self.exec_cmd('getent passwd')\n+\n+ if users_data['status']:\n+ # If getent fails, fallback to just reading /etc/passwd\n+ try:\n+ with open('/etc/passwd') as passwd_file:\n+ users_data_lines = passwd_file.readlines()\n+ except Exception:\n+ # If we can't read /etc/passwd, then there's something wrong.\n+ self._log_error(\"Couldn't read /etc/passwd\")\n+ return\n+ else:\n+ users_data_lines = users_data['output'].splitlines()\n+\n+ # Read the home paths of users in the system and check the ~/.ssh dirs\n+ for usr_line in users_data_lines:\n+ try:\n+ home_dir = os.path.join(usr_line.split(':')[5], '.ssh')\n+ if self.path_isdir(home_dir):\n+ self.add_cmd_output('ls -laZ {}'.format(home_dir))\n+ except IndexError:\n+ pass\n \n # vim: set et ts=4 sw=4 :\n", "issue": "[ssh] Can ssh plugin check the permissions set for /home/*/.ssh files?\nHello!\r\n\r\nWhen users set wrong permissions to files in their ~/.ssh/ folder, i.e. they set write permission for `~/.ssh/authenticated_keys` for `other`, the SSH server will refuse to accept connections for this user.\r\n\r\nI think it would be nice for the [ssh] plugin to check, if the permissions set for files in the `.ssh` folders of system users are correct, or if they are corrupted in some way. \r\n\r\nA very simple solution for that would be to just run `ls -l .ssh/` in every home directory. Would it be OK to extend the ssh plugin to do this? Would it be better to have a separate plugin do this?\r\n\r\nLet me know what you think, and I'll give it a try if that's ok :)\n", "before_files": [{"content": "# Copyright (C) 2007 Red Hat, Inc., Eugene Teo <[email protected]>\n\n# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import Plugin, IndependentPlugin\n\n\nclass Ssh(Plugin, IndependentPlugin):\n\n short_desc = 'Secure shell service'\n\n plugin_name = 'ssh'\n profiles = ('services', 'security', 'system', 'identity')\n\n def setup(self):\n\n self.add_file_tags({\n '/etc/ssh/sshd_config': 'sshd_config',\n '/etc/ssh/ssh_config': 'ssh_config'\n })\n\n sshcfgs = [\n \"/etc/ssh/ssh_config\",\n \"/etc/ssh/sshd_config\"\n ]\n\n # Include main config files\n self.add_copy_spec(sshcfgs)\n\n # Read configs for any includes and copy those\n try:\n for sshcfg in sshcfgs:\n tag = sshcfg.split('/')[-1]\n with open(sshcfg, 'r') as cfgfile:\n for line in cfgfile:\n # skip empty lines and comments\n if len(line.split()) == 0 or line.startswith('#'):\n continue\n # ssh_config keywords are allowed as case-insensitive\n if line.lower().startswith('include'):\n confarg = line.split()\n self.add_copy_spec(confarg[1], tags=tag)\n except Exception:\n pass\n\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/ssh.py"}], "after_files": [{"content": "# Copyright (C) 2007 Red Hat, Inc., Eugene Teo <[email protected]>\n\n# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import Plugin, IndependentPlugin\nimport os.path\n\n\nclass Ssh(Plugin, IndependentPlugin):\n\n short_desc = 'Secure shell service'\n\n plugin_name = 'ssh'\n profiles = ('services', 'security', 'system', 'identity')\n\n def setup(self):\n\n self.add_file_tags({\n '/etc/ssh/sshd_config': 'sshd_config',\n '/etc/ssh/ssh_config': 'ssh_config'\n })\n\n sshcfgs = [\n \"/etc/ssh/ssh_config\",\n \"/etc/ssh/sshd_config\"\n ]\n\n # Include main config files\n self.add_copy_spec(sshcfgs)\n\n self.included_configs(sshcfgs)\n self.user_ssh_files_permissions()\n\n def included_configs(self, sshcfgs):\n # Read configs for any includes and copy those\n try:\n for sshcfg in sshcfgs:\n tag = sshcfg.split('/')[-1]\n with open(sshcfg, 'r') as cfgfile:\n for line in cfgfile:\n # skip empty lines and comments\n if len(line.split()) == 0 or line.startswith('#'):\n continue\n # ssh_config keywords are allowed as case-insensitive\n if line.lower().startswith('include'):\n confarg = line.split()\n self.add_copy_spec(confarg[1], tags=tag)\n except Exception:\n pass\n\n def user_ssh_files_permissions(self):\n \"\"\"\n Iterate over .ssh folders in user homes to see their permissions.\n\n Bad permissions can prevent SSH from allowing access to given user.\n \"\"\"\n users_data = self.exec_cmd('getent passwd')\n\n if users_data['status']:\n # If getent fails, fallback to just reading /etc/passwd\n try:\n with open('/etc/passwd') as passwd_file:\n users_data_lines = passwd_file.readlines()\n except Exception:\n # If we can't read /etc/passwd, then there's something wrong.\n self._log_error(\"Couldn't read /etc/passwd\")\n return\n else:\n users_data_lines = users_data['output'].splitlines()\n\n # Read the home paths of users in the system and check the ~/.ssh dirs\n for usr_line in users_data_lines:\n try:\n home_dir = os.path.join(usr_line.split(':')[5], '.ssh')\n if self.path_isdir(home_dir):\n self.add_cmd_output('ls -laZ {}'.format(home_dir))\n except IndexError:\n pass\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/ssh.py"}]}
| 925 | 465 |
gh_patches_debug_23795
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-1935
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MemoryFile and seek
# Expected behavior and actual behavior.
I am trying to download a file from AWS to a MemoryFile using boto3 (Sentinel 2 JP2). I would like to use `client.download_fileobj()` because I have finer control over the download :
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-example-download-file.html
## Steps to reproduce the problem.
```
import rasterio.io
import boto3
with rasterio.io.MemoryFile() as memfile:
boto3.client('s3').download_fileobj(
Fileobj=memfile,
Bucket='sentinel-s2-l1c',
Key='tiles/15/R/TN/2019/3/28/0/B02.jp2',
ExtraArgs={'RequestPayer': 'requester'},
Config=None)
```
Raises:
```
ValueError: seek position past end of file: 8388608
```
This works fine with io.BytesIO or the GCP storage client `blob.download_to_file()`.
There is a difference of behavior regarding the seek on positions w.r.t. BytesIO (the problem is probably linked to the multithreading in the AWS case?).
## Operating system
Ubuntu 18
## Rasterio version and provenance
rasterio-1.1.4-cp38-cp38-manylinux1_x86_64.whl
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/io.py`
Content:
```
1 """Classes capable of reading and writing datasets
2
3 Instances of these classes are called dataset objects.
4 """
5
6 import logging
7
8 from rasterio._base import (
9 get_dataset_driver, driver_can_create, driver_can_create_copy)
10 from rasterio._io import (
11 DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,
12 MemoryFileBase)
13 from rasterio.windows import WindowMethodsMixin
14 from rasterio.env import ensure_env, env_ctx_if_needed
15 from rasterio.transform import TransformMethodsMixin
16 from rasterio.path import UnparsedPath
17
18
19 log = logging.getLogger(__name__)
20
21
22 class DatasetReader(DatasetReaderBase, WindowMethodsMixin,
23 TransformMethodsMixin):
24 """An unbuffered data and metadata reader"""
25
26 def __repr__(self):
27 return "<{} DatasetReader name='{}' mode='{}'>".format(
28 self.closed and 'closed' or 'open', self.name, self.mode)
29
30
31 class DatasetWriter(DatasetWriterBase, WindowMethodsMixin,
32 TransformMethodsMixin):
33 """An unbuffered data and metadata writer. Its methods write data
34 directly to disk.
35 """
36
37 def __repr__(self):
38 return "<{} DatasetWriter name='{}' mode='{}'>".format(
39 self.closed and 'closed' or 'open', self.name, self.mode)
40
41
42 class BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,
43 TransformMethodsMixin):
44 """Maintains data and metadata in a buffer, writing to disk or
45 network only when `close()` is called.
46
47 This allows incremental updates to datasets using formats that don't
48 otherwise support updates, such as JPEG.
49 """
50
51 def __repr__(self):
52 return "<{} BufferedDatasetWriter name='{}' mode='{}'>".format(
53 self.closed and 'closed' or 'open', self.name, self.mode)
54
55
56 class MemoryFile(MemoryFileBase):
57 """A BytesIO-like object, backed by an in-memory file.
58
59 This allows formatted files to be read and written without I/O.
60
61 A MemoryFile created with initial bytes becomes immutable. A
62 MemoryFile created without initial bytes may be written to using
63 either file-like or dataset interfaces.
64
65 Examples
66 --------
67
68 A GeoTIFF can be loaded in memory and accessed using the GeoTIFF
69 format driver
70
71 >>> with open('tests/data/RGB.byte.tif', 'rb') as f, MemoryFile(f) as memfile:
72 ... with memfile.open() as src:
73 ... pprint.pprint(src.profile)
74 ...
75 {'count': 3,
76 'crs': CRS({'init': 'epsg:32618'}),
77 'driver': 'GTiff',
78 'dtype': 'uint8',
79 'height': 718,
80 'interleave': 'pixel',
81 'nodata': 0.0,
82 'tiled': False,
83 'transform': Affine(300.0379266750948, 0.0, 101985.0,
84 0.0, -300.041782729805, 2826915.0),
85 'width': 791}
86
87 """
88 def __init__(self, file_or_bytes=None, filename=None, ext=''):
89 """Create a new file in memory
90
91 Parameters
92 ----------
93 file_or_bytes : file-like object or bytes, optional
94 File or bytes holding initial data.
95 filename : str, optional
96 An optional filename. A unique one will otherwise be generated.
97 ext : str, optional
98 An optional extension.
99
100 Returns
101 -------
102 MemoryFile
103 """
104 super(MemoryFile, self).__init__(
105 file_or_bytes=file_or_bytes, filename=filename, ext=ext)
106
107 @ensure_env
108 def open(self, driver=None, width=None, height=None, count=None, crs=None,
109 transform=None, dtype=None, nodata=None, sharing=False, **kwargs):
110 """Open the file and return a Rasterio dataset object.
111
112 If data has already been written, the file is opened in 'r'
113 mode. Otherwise, the file is opened in 'w' mode.
114
115 Parameters
116 ----------
117 Note well that there is no `path` parameter: a `MemoryFile`
118 contains a single dataset and there is no need to specify a
119 path.
120
121 Other parameters are optional and have the same semantics as the
122 parameters of `rasterio.open()`.
123 """
124 mempath = UnparsedPath(self.name)
125
126 if self.closed:
127 raise IOError("I/O operation on closed file.")
128 if self.exists():
129 log.debug("VSI path: {}".format(mempath.path))
130 return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)
131 else:
132 writer = get_writer_for_driver(driver)
133 return writer(mempath, 'w+', driver=driver, width=width,
134 height=height, count=count, crs=crs,
135 transform=transform, dtype=dtype,
136 nodata=nodata, sharing=sharing, **kwargs)
137
138 def __enter__(self):
139 self._env = env_ctx_if_needed()
140 self._env.__enter__()
141 return self
142
143 def __exit__(self, *args, **kwargs):
144 self._env.__exit__()
145 self.close()
146
147
148 class ZipMemoryFile(MemoryFile):
149 """A read-only BytesIO-like object backed by an in-memory zip file.
150
151 This allows a zip file containing formatted files to be read
152 without I/O.
153 """
154
155 def __init__(self, file_or_bytes=None):
156 super(ZipMemoryFile, self).__init__(file_or_bytes, ext='zip')
157
158 @ensure_env
159 def open(self, path, driver=None, sharing=False, **kwargs):
160 """Open a dataset within the zipped stream.
161
162 Parameters
163 ----------
164 path : str
165 Path to a dataset in the zip file, relative to the root of the
166 archive.
167
168 Other parameters are optional and have the same semantics as the
169 parameters of `rasterio.open()`.
170
171 Returns
172 -------
173 A Rasterio dataset object
174 """
175 zippath = UnparsedPath('/vsizip{0}/{1}'.format(self.name, path.lstrip('/')))
176
177 if self.closed:
178 raise IOError("I/O operation on closed file.")
179 return DatasetReader(zippath, driver=driver, sharing=sharing, **kwargs)
180
181
182 def get_writer_for_driver(driver):
183 """Return the writer class appropriate for the specified driver."""
184 if not driver:
185 raise ValueError("'driver' is required to write dataset.")
186 cls = None
187 if driver_can_create(driver):
188 cls = DatasetWriter
189 elif driver_can_create_copy(driver): # pragma: no branch
190 cls = BufferedDatasetWriter
191 return cls
192
193
194 def get_writer_for_path(path, driver=None):
195 """Return the writer class appropriate for the existing dataset."""
196 if not driver:
197 driver = get_dataset_driver(path)
198 return get_writer_for_driver(driver)
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/io.py b/rasterio/io.py
--- a/rasterio/io.py
+++ b/rasterio/io.py
@@ -85,7 +85,7 @@
'width': 791}
"""
- def __init__(self, file_or_bytes=None, filename=None, ext=''):
+ def __init__(self, file_or_bytes=None, dirname=None, filename=None, ext=''):
"""Create a new file in memory
Parameters
@@ -102,7 +102,7 @@
MemoryFile
"""
super(MemoryFile, self).__init__(
- file_or_bytes=file_or_bytes, filename=filename, ext=ext)
+ file_or_bytes=file_or_bytes, dirname=dirname, filename=filename, ext=ext)
@ensure_env
def open(self, driver=None, width=None, height=None, count=None, crs=None,
@@ -125,7 +125,7 @@
if self.closed:
raise IOError("I/O operation on closed file.")
- if self.exists():
+ if len(self) > 0:
log.debug("VSI path: {}".format(mempath.path))
return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)
else:
|
{"golden_diff": "diff --git a/rasterio/io.py b/rasterio/io.py\n--- a/rasterio/io.py\n+++ b/rasterio/io.py\n@@ -85,7 +85,7 @@\n 'width': 791}\n \n \"\"\"\n- def __init__(self, file_or_bytes=None, filename=None, ext=''):\n+ def __init__(self, file_or_bytes=None, dirname=None, filename=None, ext=''):\n \"\"\"Create a new file in memory\n \n Parameters\n@@ -102,7 +102,7 @@\n MemoryFile\n \"\"\"\n super(MemoryFile, self).__init__(\n- file_or_bytes=file_or_bytes, filename=filename, ext=ext)\n+ file_or_bytes=file_or_bytes, dirname=dirname, filename=filename, ext=ext)\n \n @ensure_env\n def open(self, driver=None, width=None, height=None, count=None, crs=None,\n@@ -125,7 +125,7 @@\n \n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n- if self.exists():\n+ if len(self) > 0:\n log.debug(\"VSI path: {}\".format(mempath.path))\n return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)\n else:\n", "issue": "MemoryFile and seek\n# Expected behavior and actual behavior.\r\n\r\nI am trying to download a file from AWS to a MemoryFile using boto3 (Sentinel 2 JP2). I would like to use `client.download_fileobj()` because I have finer control over the download : \r\n\r\nhttps://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-example-download-file.html\r\n\r\n## Steps to reproduce the problem.\r\n\r\n```\r\nimport rasterio.io\r\nimport boto3\r\n\r\nwith rasterio.io.MemoryFile() as memfile:\r\n boto3.client('s3').download_fileobj(\r\n Fileobj=memfile,\r\n Bucket='sentinel-s2-l1c',\r\n Key='tiles/15/R/TN/2019/3/28/0/B02.jp2',\r\n ExtraArgs={'RequestPayer': 'requester'},\r\n Config=None)\r\n```\r\nRaises:\r\n\r\n```\r\nValueError: seek position past end of file: 8388608\r\n```\r\n\r\nThis works fine with io.BytesIO or the GCP storage client `blob.download_to_file()`.\r\nThere is a difference of behavior regarding the seek on positions w.r.t. BytesIO (the problem is probably linked to the multithreading in the AWS case?).\r\n\r\n## Operating system\r\nUbuntu 18\r\n\r\n## Rasterio version and provenance\r\nrasterio-1.1.4-cp38-cp38-manylinux1_x86_64.whl\r\n\n", "before_files": [{"content": "\"\"\"Classes capable of reading and writing datasets\n\nInstances of these classes are called dataset objects.\n\"\"\"\n\nimport logging\n\nfrom rasterio._base import (\n get_dataset_driver, driver_can_create, driver_can_create_copy)\nfrom rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n MemoryFileBase)\nfrom rasterio.windows import WindowMethodsMixin\nfrom rasterio.env import ensure_env, env_ctx_if_needed\nfrom rasterio.transform import TransformMethodsMixin\nfrom rasterio.path import UnparsedPath\n\n\nlog = logging.getLogger(__name__)\n\n\nclass DatasetReader(DatasetReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata reader\"\"\"\n\n def __repr__(self):\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass DatasetWriter(DatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n directly to disk.\n \"\"\"\n\n def __repr__(self):\n return \"<{} DatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"Maintains data and metadata in a buffer, writing to disk or\n network only when `close()` is called.\n\n This allows incremental updates to datasets using formats that don't\n otherwise support updates, such as JPEG.\n \"\"\"\n\n def __repr__(self):\n return \"<{} BufferedDatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass MemoryFile(MemoryFileBase):\n \"\"\"A BytesIO-like object, backed by an in-memory file.\n\n This allows formatted files to be read and written without I/O.\n\n A MemoryFile created with initial bytes becomes immutable. A\n MemoryFile created without initial bytes may be written to using\n either file-like or dataset interfaces.\n\n Examples\n --------\n\n A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n format driver\n\n >>> with open('tests/data/RGB.byte.tif', 'rb') as f, MemoryFile(f) as memfile:\n ... with memfile.open() as src:\n ... pprint.pprint(src.profile)\n ...\n {'count': 3,\n 'crs': CRS({'init': 'epsg:32618'}),\n 'driver': 'GTiff',\n 'dtype': 'uint8',\n 'height': 718,\n 'interleave': 'pixel',\n 'nodata': 0.0,\n 'tiled': False,\n 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n 0.0, -300.041782729805, 2826915.0),\n 'width': 791}\n\n \"\"\"\n def __init__(self, file_or_bytes=None, filename=None, ext=''):\n \"\"\"Create a new file in memory\n\n Parameters\n ----------\n file_or_bytes : file-like object or bytes, optional\n File or bytes holding initial data.\n filename : str, optional\n An optional filename. A unique one will otherwise be generated.\n ext : str, optional\n An optional extension.\n\n Returns\n -------\n MemoryFile\n \"\"\"\n super(MemoryFile, self).__init__(\n file_or_bytes=file_or_bytes, filename=filename, ext=ext)\n\n @ensure_env\n def open(self, driver=None, width=None, height=None, count=None, crs=None,\n transform=None, dtype=None, nodata=None, sharing=False, **kwargs):\n \"\"\"Open the file and return a Rasterio dataset object.\n\n If data has already been written, the file is opened in 'r'\n mode. Otherwise, the file is opened in 'w' mode.\n\n Parameters\n ----------\n Note well that there is no `path` parameter: a `MemoryFile`\n contains a single dataset and there is no need to specify a\n path.\n\n Other parameters are optional and have the same semantics as the\n parameters of `rasterio.open()`.\n \"\"\"\n mempath = UnparsedPath(self.name)\n\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n if self.exists():\n log.debug(\"VSI path: {}\".format(mempath.path))\n return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)\n else:\n writer = get_writer_for_driver(driver)\n return writer(mempath, 'w+', driver=driver, width=width,\n height=height, count=count, crs=crs,\n transform=transform, dtype=dtype,\n nodata=nodata, sharing=sharing, **kwargs)\n\n def __enter__(self):\n self._env = env_ctx_if_needed()\n self._env.__enter__()\n return self\n\n def __exit__(self, *args, **kwargs):\n self._env.__exit__()\n self.close()\n\n\nclass ZipMemoryFile(MemoryFile):\n \"\"\"A read-only BytesIO-like object backed by an in-memory zip file.\n\n This allows a zip file containing formatted files to be read\n without I/O.\n \"\"\"\n\n def __init__(self, file_or_bytes=None):\n super(ZipMemoryFile, self).__init__(file_or_bytes, ext='zip')\n\n @ensure_env\n def open(self, path, driver=None, sharing=False, **kwargs):\n \"\"\"Open a dataset within the zipped stream.\n\n Parameters\n ----------\n path : str\n Path to a dataset in the zip file, relative to the root of the\n archive.\n\n Other parameters are optional and have the same semantics as the\n parameters of `rasterio.open()`.\n\n Returns\n -------\n A Rasterio dataset object\n \"\"\"\n zippath = UnparsedPath('/vsizip{0}/{1}'.format(self.name, path.lstrip('/')))\n\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n return DatasetReader(zippath, driver=driver, sharing=sharing, **kwargs)\n\n\ndef get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n if not driver:\n raise ValueError(\"'driver' is required to write dataset.\")\n cls = None\n if driver_can_create(driver):\n cls = DatasetWriter\n elif driver_can_create_copy(driver): # pragma: no branch\n cls = BufferedDatasetWriter\n return cls\n\n\ndef get_writer_for_path(path, driver=None):\n \"\"\"Return the writer class appropriate for the existing dataset.\"\"\"\n if not driver:\n driver = get_dataset_driver(path)\n return get_writer_for_driver(driver)\n", "path": "rasterio/io.py"}], "after_files": [{"content": "\"\"\"Classes capable of reading and writing datasets\n\nInstances of these classes are called dataset objects.\n\"\"\"\n\nimport logging\n\nfrom rasterio._base import (\n get_dataset_driver, driver_can_create, driver_can_create_copy)\nfrom rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n MemoryFileBase)\nfrom rasterio.windows import WindowMethodsMixin\nfrom rasterio.env import ensure_env, env_ctx_if_needed\nfrom rasterio.transform import TransformMethodsMixin\nfrom rasterio.path import UnparsedPath\n\n\nlog = logging.getLogger(__name__)\n\n\nclass DatasetReader(DatasetReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata reader\"\"\"\n\n def __repr__(self):\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass DatasetWriter(DatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n directly to disk.\n \"\"\"\n\n def __repr__(self):\n return \"<{} DatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"Maintains data and metadata in a buffer, writing to disk or\n network only when `close()` is called.\n\n This allows incremental updates to datasets using formats that don't\n otherwise support updates, such as JPEG.\n \"\"\"\n\n def __repr__(self):\n return \"<{} BufferedDatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass MemoryFile(MemoryFileBase):\n \"\"\"A BytesIO-like object, backed by an in-memory file.\n\n This allows formatted files to be read and written without I/O.\n\n A MemoryFile created with initial bytes becomes immutable. A\n MemoryFile created without initial bytes may be written to using\n either file-like or dataset interfaces.\n\n Examples\n --------\n\n A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n format driver\n\n >>> with open('tests/data/RGB.byte.tif', 'rb') as f, MemoryFile(f) as memfile:\n ... with memfile.open() as src:\n ... pprint.pprint(src.profile)\n ...\n {'count': 3,\n 'crs': CRS({'init': 'epsg:32618'}),\n 'driver': 'GTiff',\n 'dtype': 'uint8',\n 'height': 718,\n 'interleave': 'pixel',\n 'nodata': 0.0,\n 'tiled': False,\n 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n 0.0, -300.041782729805, 2826915.0),\n 'width': 791}\n\n \"\"\"\n def __init__(self, file_or_bytes=None, dirname=None, filename=None, ext=''):\n \"\"\"Create a new file in memory\n\n Parameters\n ----------\n file_or_bytes : file-like object or bytes, optional\n File or bytes holding initial data.\n filename : str, optional\n An optional filename. A unique one will otherwise be generated.\n ext : str, optional\n An optional extension.\n\n Returns\n -------\n MemoryFile\n \"\"\"\n super(MemoryFile, self).__init__(\n file_or_bytes=file_or_bytes, dirname=dirname, filename=filename, ext=ext)\n\n @ensure_env\n def open(self, driver=None, width=None, height=None, count=None, crs=None,\n transform=None, dtype=None, nodata=None, sharing=False, **kwargs):\n \"\"\"Open the file and return a Rasterio dataset object.\n\n If data has already been written, the file is opened in 'r'\n mode. Otherwise, the file is opened in 'w' mode.\n\n Parameters\n ----------\n Note well that there is no `path` parameter: a `MemoryFile`\n contains a single dataset and there is no need to specify a\n path.\n\n Other parameters are optional and have the same semantics as the\n parameters of `rasterio.open()`.\n \"\"\"\n mempath = UnparsedPath(self.name)\n\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n if len(self) > 0:\n log.debug(\"VSI path: {}\".format(mempath.path))\n return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)\n else:\n writer = get_writer_for_driver(driver)\n return writer(mempath, 'w+', driver=driver, width=width,\n height=height, count=count, crs=crs,\n transform=transform, dtype=dtype,\n nodata=nodata, sharing=sharing, **kwargs)\n\n def __enter__(self):\n self._env = env_ctx_if_needed()\n self._env.__enter__()\n return self\n\n def __exit__(self, *args, **kwargs):\n self._env.__exit__()\n self.close()\n\n\nclass ZipMemoryFile(MemoryFile):\n \"\"\"A read-only BytesIO-like object backed by an in-memory zip file.\n\n This allows a zip file containing formatted files to be read\n without I/O.\n \"\"\"\n\n def __init__(self, file_or_bytes=None):\n super(ZipMemoryFile, self).__init__(file_or_bytes, ext='zip')\n\n @ensure_env\n def open(self, path, driver=None, sharing=False, **kwargs):\n \"\"\"Open a dataset within the zipped stream.\n\n Parameters\n ----------\n path : str\n Path to a dataset in the zip file, relative to the root of the\n archive.\n\n Other parameters are optional and have the same semantics as the\n parameters of `rasterio.open()`.\n\n Returns\n -------\n A Rasterio dataset object\n \"\"\"\n zippath = UnparsedPath('/vsizip{0}/{1}'.format(self.name, path.lstrip('/')))\n\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n return DatasetReader(zippath, driver=driver, sharing=sharing, **kwargs)\n\n\ndef get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n if not driver:\n raise ValueError(\"'driver' is required to write dataset.\")\n cls = None\n if driver_can_create(driver):\n cls = DatasetWriter\n elif driver_can_create_copy(driver): # pragma: no branch\n cls = BufferedDatasetWriter\n return cls\n\n\ndef get_writer_for_path(path, driver=None):\n \"\"\"Return the writer class appropriate for the existing dataset.\"\"\"\n if not driver:\n driver = get_dataset_driver(path)\n return get_writer_for_driver(driver)\n", "path": "rasterio/io.py"}]}
| 2,599 | 284 |
gh_patches_debug_14701
|
rasdani/github-patches
|
git_diff
|
hi-primus__optimus-1104
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Profiling bug when creating a dataframe from a dictionary using dask
**Describe the bug**
calling `df.profile("*")` for a dataframe created using `op.create.dataframe` causes an error `KeyError: 'id'`.
**To Reproduce**
```
df = op.create.dataframe({"id":[5,6,10,11,79,100]})
df.profile("*")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optimus/engines/base/dask/dataframe.py`
Content:
```
1 from abc import abstractmethod
2
3 import dask
4 import humanize
5 from dask.distributed import Variable
6 from dask.utils import parse_bytes
7
8 from optimus.engines.base.basedataframe import BaseDataFrame
9 from optimus.engines.pandas.dataframe import PandasDataFrame
10 from optimus.helpers.functions import random_int
11 from optimus.helpers.raiseit import RaiseIt
12 from optimus.infer import is_one_element
13
14
15 class DaskBaseDataFrame(BaseDataFrame):
16
17 def __init__(self, root, data):
18 super().__init__(root, data)
19
20 def _assign(self, kw_columns):
21
22 dfd = self.root.data
23
24 if dfd.known_divisions:
25 for key in kw_columns:
26 kw_column = kw_columns[key]
27 if not is_one_element(kw_column) and not callable(kw_column) and not kw_column.known_divisions:
28 kw_columns[key] = kw_column.reset_index().set_index('index')[key]
29 return dfd.assign(**kw_columns)
30
31 @staticmethod
32 @abstractmethod
33 def _base_to_dfd(df, n_partitions):
34 """
35 Convert a dataframe from the dataframe base (pandas, cudf) in a distributed engine dataframe (dask, dask_cudf)
36 :param pdf:
37 :param n_partitions:
38 :return:
39 """
40 pass
41
42 def execute(self):
43 self.data = self.data.persist()
44 return self
45
46 def compute(self):
47 df = self.data
48 return df.compute()
49
50 def visualize(self):
51 return display(self.data.visualize())
52
53 def export(self):
54 """
55 Helper function to export all the dataframe in text format. Aimed to be used in test functions
56 :return:
57 """
58 df = self.root
59 df_data = df.to_json()
60 df_schema = df.cols.dtypes()
61
62 return f"{df_schema}, {df_data}"
63
64 def _reset_buffer(self):
65 if self.buffer:
66 Variable(self.buffer).delete()
67 self.buffer = None
68
69 def get_buffer(self):
70 return Variable(self.buffer).get() if self.buffer else None
71
72 def _buffer_window(self, input_cols, lower_bound, upper_bound):
73 def func(value):
74 return value[lower_bound:upper_bound]
75
76 return PandasDataFrame(self.data[input_cols].partitions[0].map_partitions(func).compute())
77
78 def sample(self, n=10, random=False):
79 """
80 Return a n number of sample from a dataFrame
81 :param n: Number of samples
82 :param random: if true get a semi random sample
83 :return:
84 """
85 df = self.root
86 if random is True:
87 seed = random_int()
88 elif random is False:
89 seed = 0
90 else:
91 RaiseIt.value_error(random, ["True", "False"])
92
93 rows_count = df.rows.count()
94 if n < rows_count:
95 # n/rows_count can return a number that represent less the total number we expect. multiply by 1.1
96 fraction = (n / rows_count) * 1.1
97 else:
98 fraction = 1.0
99 return self.root.new(df.data.sample(frac=fraction, random_state=seed))
100
101 def stratified_sample(self, col_name, seed: int = 1):
102 """
103 Stratified Sampling
104 :param col_name:
105 :param seed:
106 :return:
107 """
108 df = self.data
109 n = min(5, df[col_name].value_counts().min())
110 df = df.groupby(col_name).apply(lambda x: x.sample(2))
111 # df_.index = df_.index.droplevel(0)
112 return self.root.new(df)
113
114 @staticmethod
115 def pivot(index, column, values):
116 """
117 Return reshaped DataFrame organized by given index / column values.
118 :param index: Column to use to make new frame's index.
119 :param column: Column to use to make new frame's columns.
120 :param values: Column(s) to use for populating new frame's values.
121 :return:
122 """
123 raise NotImplementedError
124
125 @staticmethod
126 def melt(id_vars, value_vars, var_name="variable", value_name="value", data_type="str"):
127 """
128 Convert DataFrame from wide to long format.
129 :param id_vars: column with unique values
130 :param value_vars: Column names that are going to be converted to columns values
131 :param var_name: Column name for vars
132 :param value_name: Column name for values
133 :param data_type: All columns must have the same type. It will transform all columns to this data type.
134 :return:
135 """
136
137 raise NotImplementedError
138
139 def size(self, deep=False, format=None):
140 """
141 Get the size of a dask in bytes
142 :return:
143 """
144 df = self.data
145 result = df.memory_usage(index=True, deep=deep).sum().compute()
146 if format == "human":
147 result = humanize.naturalsize(result)
148
149 return result
150
151 def run(self):
152 """
153 This method is a very useful function to break lineage of transformations. By default Spark uses the lazy
154 evaluation approach in processing data: transformation functions are not computed into an action is called.
155 Sometimes when transformations are numerous, the computations are very extensive because the high number of
156 operations that spark needs to run in order to get the results.
157
158 Other important thing is that Apache Spark save task but not result of dataFrame, so tasks are
159 accumulated and the same situation happens.
160
161 :return:
162 """
163 df = self.data
164 df.cache().count()
165 return df
166
167 @staticmethod
168 def query(sql_expression):
169 raise NotImplementedError
170
171 def partitions(self):
172 return self.data.npartitions
173
174 @staticmethod
175 def partitioner():
176 print("Dask not support custom partitioner")
177 raise NotImplementedError
178
179 def repartition(self, n=None, *args, **kwargs):
180 dfd = self.data
181 df = self
182 if n == "auto":
183 # Follow a heuristic for partitioning a mentioned
184 # https://docs.dask.org/en/latest/best-practices.html#avoid-very-large-partitions
185 client = dask.distributed.get_client()
186 worker_memory = parse_bytes(client.cluster.worker_spec[0]["options"]["memory_limit"])
187 nthreads = client.cluster.worker_spec[0]["options"]["nthreads"]
188
189 part_recommended_size = worker_memory / nthreads / 10
190 n = int(df.size() / part_recommended_size)
191
192 # Partition can not be lower than 1
193 n = n if n < 0 else 1
194 # TODO .repartition(partition_size="100MB"). https://stackoverflow.com/questions/44657631/strategy-for-partitioning-dask-dataframes-efficiently
195 dfd = dfd.repartition(npartitions=n, *args, **kwargs)
196
197 return self.new(dfd, meta=self.meta)
198
199 @staticmethod
200 def debug():
201 """
202
203 :return:
204 """
205 raise NotImplementedError
206
207 @staticmethod
208 def create_id(column="id"):
209 """
210 Create a unique id for every row.
211 :param column: Columns to be processed
212 :return:
213 """
214
215 raise NotImplementedError
216
217 def to_dict(self, orient="records", index=True):
218 """
219 Create a dict
220 :param orient:
221 :param index: Return the series index
222 :return:
223 """
224
225 series = self.data
226 if index is True:
227 return series.compute().to_dict(orient)
228 else:
229 return series.compute().to_list()
230
231 def to_pandas(self):
232 return self.data.compute()
233
234 @property
235 def constants(self):
236 from optimus.engines.base.dask.constants import constants
237 return constants(self)
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/optimus/engines/base/dask/dataframe.py b/optimus/engines/base/dask/dataframe.py
--- a/optimus/engines/base/dask/dataframe.py
+++ b/optimus/engines/base/dask/dataframe.py
@@ -25,7 +25,13 @@
for key in kw_columns:
kw_column = kw_columns[key]
if not is_one_element(kw_column) and not callable(kw_column) and not kw_column.known_divisions:
- kw_columns[key] = kw_column.reset_index().set_index('index')[key]
+ _dfd = kw_column.reset_index().set_index('index')
+ if key in _dfd:
+ # the incoming series has the same column key
+ kw_columns[key] = _dfd[key]
+ else:
+ # the incoming series has no column key
+ kw_columns[key] = _dfd[0]
return dfd.assign(**kw_columns)
@staticmethod
|
{"golden_diff": "diff --git a/optimus/engines/base/dask/dataframe.py b/optimus/engines/base/dask/dataframe.py\n--- a/optimus/engines/base/dask/dataframe.py\n+++ b/optimus/engines/base/dask/dataframe.py\n@@ -25,7 +25,13 @@\n for key in kw_columns:\n kw_column = kw_columns[key]\n if not is_one_element(kw_column) and not callable(kw_column) and not kw_column.known_divisions:\n- kw_columns[key] = kw_column.reset_index().set_index('index')[key]\n+ _dfd = kw_column.reset_index().set_index('index')\n+ if key in _dfd:\n+ # the incoming series has the same column key\n+ kw_columns[key] = _dfd[key]\n+ else:\n+ # the incoming series has no column key\n+ kw_columns[key] = _dfd[0]\n return dfd.assign(**kw_columns)\n \n @staticmethod\n", "issue": "Profiling bug when creating a dataframe from a dictionary using dask\n**Describe the bug**\r\ncalling `df.profile(\"*\")` for a dataframe created using `op.create.dataframe` causes an error `KeyError: 'id'`.\r\n\r\n**To Reproduce**\r\n```\r\ndf = op.create.dataframe({\"id\":[5,6,10,11,79,100]})\r\ndf.profile(\"*\")\r\n```\r\n\n", "before_files": [{"content": "from abc import abstractmethod\n\nimport dask\nimport humanize\nfrom dask.distributed import Variable\nfrom dask.utils import parse_bytes\n\nfrom optimus.engines.base.basedataframe import BaseDataFrame\nfrom optimus.engines.pandas.dataframe import PandasDataFrame\nfrom optimus.helpers.functions import random_int\nfrom optimus.helpers.raiseit import RaiseIt\nfrom optimus.infer import is_one_element\n\n\nclass DaskBaseDataFrame(BaseDataFrame):\n\n def __init__(self, root, data):\n super().__init__(root, data)\n\n def _assign(self, kw_columns):\n\n dfd = self.root.data\n\n if dfd.known_divisions:\n for key in kw_columns:\n kw_column = kw_columns[key]\n if not is_one_element(kw_column) and not callable(kw_column) and not kw_column.known_divisions:\n kw_columns[key] = kw_column.reset_index().set_index('index')[key]\n return dfd.assign(**kw_columns)\n\n @staticmethod\n @abstractmethod\n def _base_to_dfd(df, n_partitions):\n \"\"\"\n Convert a dataframe from the dataframe base (pandas, cudf) in a distributed engine dataframe (dask, dask_cudf)\n :param pdf:\n :param n_partitions:\n :return:\n \"\"\"\n pass\n\n def execute(self):\n self.data = self.data.persist()\n return self\n\n def compute(self):\n df = self.data\n return df.compute()\n\n def visualize(self):\n return display(self.data.visualize())\n\n def export(self):\n \"\"\"\n Helper function to export all the dataframe in text format. Aimed to be used in test functions\n :return:\n \"\"\"\n df = self.root\n df_data = df.to_json()\n df_schema = df.cols.dtypes()\n\n return f\"{df_schema}, {df_data}\"\n\n def _reset_buffer(self):\n if self.buffer:\n Variable(self.buffer).delete()\n self.buffer = None\n\n def get_buffer(self):\n return Variable(self.buffer).get() if self.buffer else None\n\n def _buffer_window(self, input_cols, lower_bound, upper_bound):\n def func(value):\n return value[lower_bound:upper_bound]\n\n return PandasDataFrame(self.data[input_cols].partitions[0].map_partitions(func).compute())\n\n def sample(self, n=10, random=False):\n \"\"\"\n Return a n number of sample from a dataFrame\n :param n: Number of samples\n :param random: if true get a semi random sample\n :return:\n \"\"\"\n df = self.root\n if random is True:\n seed = random_int()\n elif random is False:\n seed = 0\n else:\n RaiseIt.value_error(random, [\"True\", \"False\"])\n\n rows_count = df.rows.count()\n if n < rows_count:\n # n/rows_count can return a number that represent less the total number we expect. multiply by 1.1\n fraction = (n / rows_count) * 1.1\n else:\n fraction = 1.0\n return self.root.new(df.data.sample(frac=fraction, random_state=seed))\n\n def stratified_sample(self, col_name, seed: int = 1):\n \"\"\"\n Stratified Sampling\n :param col_name:\n :param seed:\n :return:\n \"\"\"\n df = self.data\n n = min(5, df[col_name].value_counts().min())\n df = df.groupby(col_name).apply(lambda x: x.sample(2))\n # df_.index = df_.index.droplevel(0)\n return self.root.new(df)\n\n @staticmethod\n def pivot(index, column, values):\n \"\"\"\n Return reshaped DataFrame organized by given index / column values.\n :param index: Column to use to make new frame's index.\n :param column: Column to use to make new frame's columns.\n :param values: Column(s) to use for populating new frame's values.\n :return:\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def melt(id_vars, value_vars, var_name=\"variable\", value_name=\"value\", data_type=\"str\"):\n \"\"\"\n Convert DataFrame from wide to long format.\n :param id_vars: column with unique values\n :param value_vars: Column names that are going to be converted to columns values\n :param var_name: Column name for vars\n :param value_name: Column name for values\n :param data_type: All columns must have the same type. It will transform all columns to this data type.\n :return:\n \"\"\"\n\n raise NotImplementedError\n\n def size(self, deep=False, format=None):\n \"\"\"\n Get the size of a dask in bytes\n :return:\n \"\"\"\n df = self.data\n result = df.memory_usage(index=True, deep=deep).sum().compute()\n if format == \"human\":\n result = humanize.naturalsize(result)\n\n return result\n\n def run(self):\n \"\"\"\n This method is a very useful function to break lineage of transformations. By default Spark uses the lazy\n evaluation approach in processing data: transformation functions are not computed into an action is called.\n Sometimes when transformations are numerous, the computations are very extensive because the high number of\n operations that spark needs to run in order to get the results.\n\n Other important thing is that Apache Spark save task but not result of dataFrame, so tasks are\n accumulated and the same situation happens.\n\n :return:\n \"\"\"\n df = self.data\n df.cache().count()\n return df\n\n @staticmethod\n def query(sql_expression):\n raise NotImplementedError\n\n def partitions(self):\n return self.data.npartitions\n\n @staticmethod\n def partitioner():\n print(\"Dask not support custom partitioner\")\n raise NotImplementedError\n\n def repartition(self, n=None, *args, **kwargs):\n dfd = self.data\n df = self\n if n == \"auto\":\n # Follow a heuristic for partitioning a mentioned\n # https://docs.dask.org/en/latest/best-practices.html#avoid-very-large-partitions\n client = dask.distributed.get_client()\n worker_memory = parse_bytes(client.cluster.worker_spec[0][\"options\"][\"memory_limit\"])\n nthreads = client.cluster.worker_spec[0][\"options\"][\"nthreads\"]\n\n part_recommended_size = worker_memory / nthreads / 10\n n = int(df.size() / part_recommended_size)\n\n # Partition can not be lower than 1\n n = n if n < 0 else 1\n # TODO .repartition(partition_size=\"100MB\"). https://stackoverflow.com/questions/44657631/strategy-for-partitioning-dask-dataframes-efficiently\n dfd = dfd.repartition(npartitions=n, *args, **kwargs)\n\n return self.new(dfd, meta=self.meta)\n\n @staticmethod\n def debug():\n \"\"\"\n\n :return:\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def create_id(column=\"id\"):\n \"\"\"\n Create a unique id for every row.\n :param column: Columns to be processed\n :return:\n \"\"\"\n\n raise NotImplementedError\n\n def to_dict(self, orient=\"records\", index=True):\n \"\"\"\n Create a dict\n :param orient:\n :param index: Return the series index\n :return:\n \"\"\"\n\n series = self.data\n if index is True:\n return series.compute().to_dict(orient)\n else:\n return series.compute().to_list()\n\n def to_pandas(self):\n return self.data.compute()\n\n @property\n def constants(self):\n from optimus.engines.base.dask.constants import constants\n return constants(self)\n", "path": "optimus/engines/base/dask/dataframe.py"}], "after_files": [{"content": "from abc import abstractmethod\n\nimport dask\nimport humanize\nfrom dask.distributed import Variable\nfrom dask.utils import parse_bytes\n\nfrom optimus.engines.base.basedataframe import BaseDataFrame\nfrom optimus.engines.pandas.dataframe import PandasDataFrame\nfrom optimus.helpers.functions import random_int\nfrom optimus.helpers.raiseit import RaiseIt\nfrom optimus.infer import is_one_element\n\n\nclass DaskBaseDataFrame(BaseDataFrame):\n\n def __init__(self, root, data):\n super().__init__(root, data)\n\n def _assign(self, kw_columns):\n\n dfd = self.root.data\n\n if dfd.known_divisions:\n for key in kw_columns:\n kw_column = kw_columns[key]\n if not is_one_element(kw_column) and not callable(kw_column) and not kw_column.known_divisions:\n _dfd = kw_column.reset_index().set_index('index')\n if key in _dfd:\n # the incoming series has the same column key\n kw_columns[key] = _dfd[key]\n else:\n # the incoming series has no column key\n kw_columns[key] = _dfd[0]\n return dfd.assign(**kw_columns)\n\n @staticmethod\n @abstractmethod\n def _base_to_dfd(df, n_partitions):\n \"\"\"\n Convert a dataframe from the dataframe base (pandas, cudf) in a distributed engine dataframe (dask, dask_cudf)\n :param pdf:\n :param n_partitions:\n :return:\n \"\"\"\n pass\n\n def execute(self):\n self.data = self.data.persist()\n return self\n\n def compute(self):\n df = self.data\n return df.compute()\n\n def visualize(self):\n return display(self.data.visualize())\n\n def export(self):\n \"\"\"\n Helper function to export all the dataframe in text format. Aimed to be used in test functions\n :return:\n \"\"\"\n df = self.root\n df_data = df.to_json()\n df_schema = df.cols.dtypes()\n\n return f\"{df_schema}, {df_data}\"\n\n def _reset_buffer(self):\n if self.buffer:\n Variable(self.buffer).delete()\n self.buffer = None\n\n def get_buffer(self):\n return Variable(self.buffer).get() if self.buffer else None\n\n def _buffer_window(self, input_cols, lower_bound, upper_bound):\n def func(value):\n return value[lower_bound:upper_bound]\n\n return PandasDataFrame(self.data[input_cols].partitions[0].map_partitions(func).compute())\n\n def sample(self, n=10, random=False):\n \"\"\"\n Return a n number of sample from a dataFrame\n :param n: Number of samples\n :param random: if true get a semi random sample\n :return:\n \"\"\"\n df = self.root\n if random is True:\n seed = random_int()\n elif random is False:\n seed = 0\n else:\n RaiseIt.value_error(random, [\"True\", \"False\"])\n\n rows_count = df.rows.count()\n if n < rows_count:\n # n/rows_count can return a number that represent less the total number we expect. multiply by 1.1\n fraction = (n / rows_count) * 1.1\n else:\n fraction = 1.0\n return self.root.new(df.data.sample(frac=fraction, random_state=seed))\n\n def stratified_sample(self, col_name, seed: int = 1):\n \"\"\"\n Stratified Sampling\n :param col_name:\n :param seed:\n :return:\n \"\"\"\n df = self.data\n n = min(5, df[col_name].value_counts().min())\n df = df.groupby(col_name).apply(lambda x: x.sample(2))\n # df_.index = df_.index.droplevel(0)\n return self.root.new(df)\n\n @staticmethod\n def pivot(index, column, values):\n \"\"\"\n Return reshaped DataFrame organized by given index / column values.\n :param index: Column to use to make new frame's index.\n :param column: Column to use to make new frame's columns.\n :param values: Column(s) to use for populating new frame's values.\n :return:\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def melt(id_vars, value_vars, var_name=\"variable\", value_name=\"value\", data_type=\"str\"):\n \"\"\"\n Convert DataFrame from wide to long format.\n :param id_vars: column with unique values\n :param value_vars: Column names that are going to be converted to columns values\n :param var_name: Column name for vars\n :param value_name: Column name for values\n :param data_type: All columns must have the same type. It will transform all columns to this data type.\n :return:\n \"\"\"\n\n raise NotImplementedError\n\n def size(self, deep=False, format=None):\n \"\"\"\n Get the size of a dask in bytes\n :return:\n \"\"\"\n df = self.data\n result = df.memory_usage(index=True, deep=deep).sum().compute()\n if format == \"human\":\n result = humanize.naturalsize(result)\n\n return result\n\n def run(self):\n \"\"\"\n This method is a very useful function to break lineage of transformations. By default Spark uses the lazy\n evaluation approach in processing data: transformation functions are not computed into an action is called.\n Sometimes when transformations are numerous, the computations are very extensive because the high number of\n operations that spark needs to run in order to get the results.\n\n Other important thing is that Apache Spark save task but not result of dataFrame, so tasks are\n accumulated and the same situation happens.\n\n :return:\n \"\"\"\n df = self.data\n df.cache().count()\n return df\n\n @staticmethod\n def query(sql_expression):\n raise NotImplementedError\n\n def partitions(self):\n return self.data.npartitions\n\n @staticmethod\n def partitioner():\n print(\"Dask not support custom partitioner\")\n raise NotImplementedError\n\n def repartition(self, n=None, *args, **kwargs):\n dfd = self.data\n df = self\n if n == \"auto\":\n # Follow a heuristic for partitioning a mentioned\n # https://docs.dask.org/en/latest/best-practices.html#avoid-very-large-partitions\n client = dask.distributed.get_client()\n worker_memory = parse_bytes(client.cluster.worker_spec[0][\"options\"][\"memory_limit\"])\n nthreads = client.cluster.worker_spec[0][\"options\"][\"nthreads\"]\n\n part_recommended_size = worker_memory / nthreads / 10\n n = int(df.size() / part_recommended_size)\n\n # Partition can not be lower than 1\n n = n if n < 0 else 1\n # TODO .repartition(partition_size=\"100MB\"). https://stackoverflow.com/questions/44657631/strategy-for-partitioning-dask-dataframes-efficiently\n dfd = dfd.repartition(npartitions=n, *args, **kwargs)\n\n return self.new(dfd, meta=self.meta)\n\n @staticmethod\n def debug():\n \"\"\"\n\n :return:\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def create_id(column=\"id\"):\n \"\"\"\n Create a unique id for every row.\n :param column: Columns to be processed\n :return:\n \"\"\"\n\n raise NotImplementedError\n\n def to_dict(self, orient=\"records\", index=True):\n \"\"\"\n Create a dict\n :param orient:\n :param index: Return the series index\n :return:\n \"\"\"\n\n series = self.data\n if index is True:\n return series.compute().to_dict(orient)\n else:\n return series.compute().to_list()\n\n def to_pandas(self):\n return self.data.compute()\n\n @property\n def constants(self):\n from optimus.engines.base.dask.constants import constants\n return constants(self)\n", "path": "optimus/engines/base/dask/dataframe.py"}]}
| 2,665 | 220 |
gh_patches_debug_18995
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-3323
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] mars.tensor.int is not compatible with numpy since v1.24
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
Since numpy v1.24, numpy.int, numpy.float, numpy.object, numpy.bool is no longer able to be imported, while cases are that mt.int is used. Deprecate warnings removed by PR #2788 finally come into reality.
**To Reproduce**
To help us reproducing this bug, please provide information below:
1. Your Python version: v3.8.13
2. The version of Mars you use: master
3. Versions of crucial packages, numpy v1.24.1
4. Full stack of the error.
AttributeError: module 'mars.tensor' has no attribute 'int'
5. Minimized code to reproduce the error.
> import mars.tensor as mt
> mt.int
**Expected behavior**
AttributeError: module 'mars.tensor' has no attribute 'int'
**Additional context**
- [Using the aliases of builtin types like np.int is deprecated](https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations)
- [NumPy 1.24 Release Notes](https://numpy.org/devdocs/release/1.24.0-notes.html)
- [DEP: Next step in scalar type alias deprecations/futurewarnings by seberg · Pull Request #22607 · numpy/numpy](https://github.com/numpy/numpy/pull/22607)
- [NumPy 1.22 dropped Python 3.7](https://numpy.org/devdocs/release/1.22.0-notes.html)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/tensor/__init__.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2021 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17
18 from .datasource import (
19 tensor,
20 array,
21 asarray,
22 ascontiguousarray,
23 asfortranarray,
24 scalar,
25 empty,
26 empty_like,
27 ones,
28 ones_like,
29 zeros,
30 zeros_like,
31 full,
32 full_like,
33 arange,
34 diag,
35 diagflat,
36 eye,
37 identity,
38 linspace,
39 meshgrid,
40 indices,
41 tril,
42 triu,
43 fromtiledb,
44 fromtiledb as from_tiledb,
45 from_dataframe,
46 fromhdf5,
47 fromhdf5 as from_hdf5,
48 fromzarr,
49 fromzarr as from_zarr,
50 fromvineyard,
51 fromvineyard as from_vineyard,
52 )
53 from .datastore import (
54 totiledb,
55 totiledb as to_tiledb,
56 tohdf5,
57 tohdf5 as to_hdf5,
58 tozarr,
59 tozarr as to_zarr,
60 tovineyard,
61 tovineyard as to_vineyard,
62 ) # pylint: disable=reimported
63 from .base import (
64 result_type,
65 ndim,
66 copyto,
67 transpose,
68 where,
69 broadcast_to,
70 broadcast_arrays,
71 expand_dims,
72 rollaxis,
73 swapaxes,
74 moveaxis,
75 ravel,
76 atleast_1d,
77 atleast_2d,
78 atleast_3d,
79 argwhere,
80 array_split,
81 split,
82 hsplit,
83 vsplit,
84 dsplit,
85 roll,
86 squeeze,
87 diff,
88 ediff1d,
89 flip,
90 flipud,
91 fliplr,
92 repeat,
93 tile,
94 isin,
95 searchsorted,
96 unique,
97 sort,
98 argsort,
99 partition,
100 argpartition,
101 topk,
102 argtopk,
103 copy,
104 trapz,
105 shape,
106 insert,
107 delete,
108 in1d,
109 setdiff1d,
110 )
111 from .arithmetic import (
112 add,
113 subtract,
114 multiply,
115 divide,
116 truediv as true_divide,
117 floordiv as floor_divide,
118 mod,
119 power,
120 float_power,
121 fmod,
122 sqrt,
123 around,
124 round_,
125 round_ as round,
126 logaddexp,
127 logaddexp2,
128 negative,
129 positive,
130 absolute,
131 fabs,
132 absolute as abs,
133 rint,
134 sign,
135 degrees,
136 radians,
137 conj,
138 conjugate,
139 exp,
140 exp2,
141 log,
142 log2,
143 log10,
144 expm1,
145 log1p,
146 square,
147 cbrt,
148 reciprocal,
149 equal,
150 not_equal,
151 less,
152 less_equal,
153 greater,
154 greater_equal,
155 sin,
156 cos,
157 tan,
158 arcsin,
159 arccos,
160 arctan,
161 arctan2,
162 hypot,
163 sinh,
164 cosh,
165 tanh,
166 arcsinh,
167 arccosh,
168 arctanh,
169 deg2rad,
170 rad2deg,
171 bitand as bitwise_and,
172 bitor as bitwise_or,
173 bitxor as bitwise_xor,
174 invert,
175 invert as bitwise_not,
176 lshift as left_shift,
177 rshift as right_shift,
178 logical_and,
179 logical_or,
180 logical_xor,
181 logical_not,
182 maximum,
183 minimum,
184 floor,
185 ceil,
186 trunc,
187 remainder,
188 fmax,
189 fmin,
190 isfinite,
191 isinf,
192 isnan,
193 signbit,
194 copysign,
195 nextafter,
196 spacing,
197 clip,
198 isclose,
199 ldexp,
200 frexp,
201 modf,
202 angle,
203 isreal,
204 iscomplex,
205 real,
206 imag,
207 fix,
208 i0,
209 sinc,
210 nan_to_num,
211 tree_add,
212 tree_multiply,
213 )
214 from .statistics import (
215 average,
216 bincount,
217 cov,
218 corrcoef,
219 digitize,
220 ptp,
221 histogram_bin_edges,
222 histogram,
223 median,
224 quantile,
225 percentile,
226 )
227 from .linalg.tensordot import tensordot
228 from .linalg.dot import dot
229 from .linalg.inner import inner, innerproduct
230 from .linalg.vdot import vdot
231 from .linalg.matmul import matmul
232 from .reduction import (
233 sum,
234 nansum,
235 prod,
236 prod as product,
237 nanprod,
238 max,
239 max as amax,
240 nanmax,
241 min,
242 min as amin,
243 nanmin,
244 all,
245 any,
246 mean,
247 nanmean,
248 argmax,
249 nanargmax,
250 argmin,
251 nanargmin,
252 cumsum,
253 cumprod,
254 var,
255 std,
256 nanvar,
257 nanstd,
258 nancumsum,
259 nancumprod,
260 count_nonzero,
261 allclose,
262 array_equal,
263 )
264 from .reshape import reshape
265 from .merge import (
266 concatenate,
267 stack,
268 hstack,
269 vstack,
270 dstack,
271 column_stack,
272 union1d,
273 block,
274 append,
275 )
276 from .indexing import (
277 take,
278 compress,
279 extract,
280 choose,
281 unravel_index,
282 nonzero,
283 flatnonzero,
284 fill_diagonal,
285 )
286 from .rechunk import rechunk
287 from .einsum import einsum
288 from .images import imread
289
290 # noinspection PyUnresolvedReferences
291 from .lib.index_tricks import mgrid, ogrid, ndindex, r_, c_
292
293 from . import random
294 from . import fft
295 from . import linalg
296 from . import lib
297 from . import special
298 from . import stats
299
300 # types
301 from .core import Tensor
302
303 # noinspection PyUnresolvedReferences
304 from ..core import ExecutableTuple
305
306 # noinspection PyUnresolvedReferences
307 from numpy import (
308 newaxis,
309 AxisError,
310 inf,
311 Inf,
312 NINF,
313 nan,
314 NAN,
315 NaN,
316 pi,
317 e,
318 errstate,
319 geterr,
320 seterr,
321 )
322
323 # import numpy types
324 # noinspection PyUnresolvedReferences
325 from numpy import (
326 dtype,
327 number,
328 inexact,
329 floating,
330 complexfloating,
331 integer,
332 signedinteger,
333 unsignedinteger,
334 character,
335 generic,
336 flexible,
337 int_,
338 bool_,
339 float_,
340 cfloat,
341 bytes_,
342 unicode_,
343 void,
344 object_,
345 intc,
346 intp,
347 int8,
348 int16,
349 int32,
350 int64,
351 uint8,
352 uint16,
353 uint32,
354 uint64,
355 uint,
356 float16,
357 float32,
358 float64,
359 double,
360 complex64,
361 complex128,
362 datetime64,
363 timedelta64,
364 )
365
366 # noinspection PyUnresolvedReferences
367 from numpy import finfo
368
369 # register fuse op and fetch op
370 from .fuse import TensorFuseChunk, TensorCpFuseChunk, TensorNeFuseChunk
371 from .fetch import TensorFetch, TensorFetchShuffle
372 from . import ufunc
373
374 try:
375 import warnings
376
377 # suppress numpy warnings on types
378 with warnings.catch_warnings():
379 warnings.simplefilter("ignore", DeprecationWarning)
380 warnings.simplefilter("ignore", FutureWarning)
381 # noinspection PyUnresolvedReferences
382 from numpy import object, int, bool, float
383 except ImportError: # pragma: no cover
384 pass
385 finally:
386 del warnings
387
388 del (
389 TensorFuseChunk,
390 TensorCpFuseChunk,
391 TensorNeFuseChunk,
392 TensorFetch,
393 TensorFetchShuffle,
394 ufunc,
395 )
396
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mars/tensor/__init__.py b/mars/tensor/__init__.py
--- a/mars/tensor/__init__.py
+++ b/mars/tensor/__init__.py
@@ -334,14 +334,14 @@
character,
generic,
flexible,
- int_,
- bool_,
- float_,
+ int_ as int,
+ bool_ as bool,
+ float_ as float,
cfloat,
bytes_,
unicode_,
void,
- object_,
+ object_ as object,
intc,
intp,
int8,
@@ -371,20 +371,6 @@
from .fetch import TensorFetch, TensorFetchShuffle
from . import ufunc
-try:
- import warnings
-
- # suppress numpy warnings on types
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", DeprecationWarning)
- warnings.simplefilter("ignore", FutureWarning)
- # noinspection PyUnresolvedReferences
- from numpy import object, int, bool, float
-except ImportError: # pragma: no cover
- pass
-finally:
- del warnings
-
del (
TensorFuseChunk,
TensorCpFuseChunk,
|
{"golden_diff": "diff --git a/mars/tensor/__init__.py b/mars/tensor/__init__.py\n--- a/mars/tensor/__init__.py\n+++ b/mars/tensor/__init__.py\n@@ -334,14 +334,14 @@\n character,\n generic,\n flexible,\n- int_,\n- bool_,\n- float_,\n+ int_ as int,\n+ bool_ as bool,\n+ float_ as float,\n cfloat,\n bytes_,\n unicode_,\n void,\n- object_,\n+ object_ as object,\n intc,\n intp,\n int8,\n@@ -371,20 +371,6 @@\n from .fetch import TensorFetch, TensorFetchShuffle\n from . import ufunc\n \n-try:\n- import warnings\n-\n- # suppress numpy warnings on types\n- with warnings.catch_warnings():\n- warnings.simplefilter(\"ignore\", DeprecationWarning)\n- warnings.simplefilter(\"ignore\", FutureWarning)\n- # noinspection PyUnresolvedReferences\n- from numpy import object, int, bool, float\n-except ImportError: # pragma: no cover\n- pass\n-finally:\n- del warnings\n-\n del (\n TensorFuseChunk,\n TensorCpFuseChunk,\n", "issue": "[BUG] mars.tensor.int is not compatible with numpy since v1.24\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nSince numpy v1.24, numpy.int, numpy.float, numpy.object, numpy.bool is no longer able to be imported, while cases are that mt.int is used. Deprecate warnings removed by PR #2788 finally come into reality.\r\n\r\n**To Reproduce**\r\nTo help us reproducing this bug, please provide information below:\r\n1. Your Python version: v3.8.13\r\n2. The version of Mars you use: master\r\n3. Versions of crucial packages, numpy v1.24.1\r\n4. Full stack of the error.\r\nAttributeError: module 'mars.tensor' has no attribute 'int'\r\n5. Minimized code to reproduce the error.\r\n> import mars.tensor as mt\r\n> mt.int\r\n\r\n**Expected behavior**\r\nAttributeError: module 'mars.tensor' has no attribute 'int'\r\n\r\n**Additional context**\r\n- [Using the aliases of builtin types like np.int is deprecated](https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations)\r\n- [NumPy 1.24 Release Notes](https://numpy.org/devdocs/release/1.24.0-notes.html)\r\n- [DEP: Next step in scalar type alias deprecations/futurewarnings by seberg \u00b7 Pull Request #22607 \u00b7 numpy/numpy](https://github.com/numpy/numpy/pull/22607)\r\n- [NumPy 1.22 dropped Python 3.7](https://numpy.org/devdocs/release/1.22.0-notes.html)\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom .datasource import (\n tensor,\n array,\n asarray,\n ascontiguousarray,\n asfortranarray,\n scalar,\n empty,\n empty_like,\n ones,\n ones_like,\n zeros,\n zeros_like,\n full,\n full_like,\n arange,\n diag,\n diagflat,\n eye,\n identity,\n linspace,\n meshgrid,\n indices,\n tril,\n triu,\n fromtiledb,\n fromtiledb as from_tiledb,\n from_dataframe,\n fromhdf5,\n fromhdf5 as from_hdf5,\n fromzarr,\n fromzarr as from_zarr,\n fromvineyard,\n fromvineyard as from_vineyard,\n)\nfrom .datastore import (\n totiledb,\n totiledb as to_tiledb,\n tohdf5,\n tohdf5 as to_hdf5,\n tozarr,\n tozarr as to_zarr,\n tovineyard,\n tovineyard as to_vineyard,\n) # pylint: disable=reimported\nfrom .base import (\n result_type,\n ndim,\n copyto,\n transpose,\n where,\n broadcast_to,\n broadcast_arrays,\n expand_dims,\n rollaxis,\n swapaxes,\n moveaxis,\n ravel,\n atleast_1d,\n atleast_2d,\n atleast_3d,\n argwhere,\n array_split,\n split,\n hsplit,\n vsplit,\n dsplit,\n roll,\n squeeze,\n diff,\n ediff1d,\n flip,\n flipud,\n fliplr,\n repeat,\n tile,\n isin,\n searchsorted,\n unique,\n sort,\n argsort,\n partition,\n argpartition,\n topk,\n argtopk,\n copy,\n trapz,\n shape,\n insert,\n delete,\n in1d,\n setdiff1d,\n)\nfrom .arithmetic import (\n add,\n subtract,\n multiply,\n divide,\n truediv as true_divide,\n floordiv as floor_divide,\n mod,\n power,\n float_power,\n fmod,\n sqrt,\n around,\n round_,\n round_ as round,\n logaddexp,\n logaddexp2,\n negative,\n positive,\n absolute,\n fabs,\n absolute as abs,\n rint,\n sign,\n degrees,\n radians,\n conj,\n conjugate,\n exp,\n exp2,\n log,\n log2,\n log10,\n expm1,\n log1p,\n square,\n cbrt,\n reciprocal,\n equal,\n not_equal,\n less,\n less_equal,\n greater,\n greater_equal,\n sin,\n cos,\n tan,\n arcsin,\n arccos,\n arctan,\n arctan2,\n hypot,\n sinh,\n cosh,\n tanh,\n arcsinh,\n arccosh,\n arctanh,\n deg2rad,\n rad2deg,\n bitand as bitwise_and,\n bitor as bitwise_or,\n bitxor as bitwise_xor,\n invert,\n invert as bitwise_not,\n lshift as left_shift,\n rshift as right_shift,\n logical_and,\n logical_or,\n logical_xor,\n logical_not,\n maximum,\n minimum,\n floor,\n ceil,\n trunc,\n remainder,\n fmax,\n fmin,\n isfinite,\n isinf,\n isnan,\n signbit,\n copysign,\n nextafter,\n spacing,\n clip,\n isclose,\n ldexp,\n frexp,\n modf,\n angle,\n isreal,\n iscomplex,\n real,\n imag,\n fix,\n i0,\n sinc,\n nan_to_num,\n tree_add,\n tree_multiply,\n)\nfrom .statistics import (\n average,\n bincount,\n cov,\n corrcoef,\n digitize,\n ptp,\n histogram_bin_edges,\n histogram,\n median,\n quantile,\n percentile,\n)\nfrom .linalg.tensordot import tensordot\nfrom .linalg.dot import dot\nfrom .linalg.inner import inner, innerproduct\nfrom .linalg.vdot import vdot\nfrom .linalg.matmul import matmul\nfrom .reduction import (\n sum,\n nansum,\n prod,\n prod as product,\n nanprod,\n max,\n max as amax,\n nanmax,\n min,\n min as amin,\n nanmin,\n all,\n any,\n mean,\n nanmean,\n argmax,\n nanargmax,\n argmin,\n nanargmin,\n cumsum,\n cumprod,\n var,\n std,\n nanvar,\n nanstd,\n nancumsum,\n nancumprod,\n count_nonzero,\n allclose,\n array_equal,\n)\nfrom .reshape import reshape\nfrom .merge import (\n concatenate,\n stack,\n hstack,\n vstack,\n dstack,\n column_stack,\n union1d,\n block,\n append,\n)\nfrom .indexing import (\n take,\n compress,\n extract,\n choose,\n unravel_index,\n nonzero,\n flatnonzero,\n fill_diagonal,\n)\nfrom .rechunk import rechunk\nfrom .einsum import einsum\nfrom .images import imread\n\n# noinspection PyUnresolvedReferences\nfrom .lib.index_tricks import mgrid, ogrid, ndindex, r_, c_\n\nfrom . import random\nfrom . import fft\nfrom . import linalg\nfrom . import lib\nfrom . import special\nfrom . import stats\n\n# types\nfrom .core import Tensor\n\n# noinspection PyUnresolvedReferences\nfrom ..core import ExecutableTuple\n\n# noinspection PyUnresolvedReferences\nfrom numpy import (\n newaxis,\n AxisError,\n inf,\n Inf,\n NINF,\n nan,\n NAN,\n NaN,\n pi,\n e,\n errstate,\n geterr,\n seterr,\n)\n\n# import numpy types\n# noinspection PyUnresolvedReferences\nfrom numpy import (\n dtype,\n number,\n inexact,\n floating,\n complexfloating,\n integer,\n signedinteger,\n unsignedinteger,\n character,\n generic,\n flexible,\n int_,\n bool_,\n float_,\n cfloat,\n bytes_,\n unicode_,\n void,\n object_,\n intc,\n intp,\n int8,\n int16,\n int32,\n int64,\n uint8,\n uint16,\n uint32,\n uint64,\n uint,\n float16,\n float32,\n float64,\n double,\n complex64,\n complex128,\n datetime64,\n timedelta64,\n)\n\n# noinspection PyUnresolvedReferences\nfrom numpy import finfo\n\n# register fuse op and fetch op\nfrom .fuse import TensorFuseChunk, TensorCpFuseChunk, TensorNeFuseChunk\nfrom .fetch import TensorFetch, TensorFetchShuffle\nfrom . import ufunc\n\ntry:\n import warnings\n\n # suppress numpy warnings on types\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n warnings.simplefilter(\"ignore\", FutureWarning)\n # noinspection PyUnresolvedReferences\n from numpy import object, int, bool, float\nexcept ImportError: # pragma: no cover\n pass\nfinally:\n del warnings\n\ndel (\n TensorFuseChunk,\n TensorCpFuseChunk,\n TensorNeFuseChunk,\n TensorFetch,\n TensorFetchShuffle,\n ufunc,\n)\n", "path": "mars/tensor/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom .datasource import (\n tensor,\n array,\n asarray,\n ascontiguousarray,\n asfortranarray,\n scalar,\n empty,\n empty_like,\n ones,\n ones_like,\n zeros,\n zeros_like,\n full,\n full_like,\n arange,\n diag,\n diagflat,\n eye,\n identity,\n linspace,\n meshgrid,\n indices,\n tril,\n triu,\n fromtiledb,\n fromtiledb as from_tiledb,\n from_dataframe,\n fromhdf5,\n fromhdf5 as from_hdf5,\n fromzarr,\n fromzarr as from_zarr,\n fromvineyard,\n fromvineyard as from_vineyard,\n)\nfrom .datastore import (\n totiledb,\n totiledb as to_tiledb,\n tohdf5,\n tohdf5 as to_hdf5,\n tozarr,\n tozarr as to_zarr,\n tovineyard,\n tovineyard as to_vineyard,\n) # pylint: disable=reimported\nfrom .base import (\n result_type,\n ndim,\n copyto,\n transpose,\n where,\n broadcast_to,\n broadcast_arrays,\n expand_dims,\n rollaxis,\n swapaxes,\n moveaxis,\n ravel,\n atleast_1d,\n atleast_2d,\n atleast_3d,\n argwhere,\n array_split,\n split,\n hsplit,\n vsplit,\n dsplit,\n roll,\n squeeze,\n diff,\n ediff1d,\n flip,\n flipud,\n fliplr,\n repeat,\n tile,\n isin,\n searchsorted,\n unique,\n sort,\n argsort,\n partition,\n argpartition,\n topk,\n argtopk,\n copy,\n trapz,\n shape,\n insert,\n delete,\n in1d,\n setdiff1d,\n)\nfrom .arithmetic import (\n add,\n subtract,\n multiply,\n divide,\n truediv as true_divide,\n floordiv as floor_divide,\n mod,\n power,\n float_power,\n fmod,\n sqrt,\n around,\n round_,\n round_ as round,\n logaddexp,\n logaddexp2,\n negative,\n positive,\n absolute,\n fabs,\n absolute as abs,\n rint,\n sign,\n degrees,\n radians,\n conj,\n conjugate,\n exp,\n exp2,\n log,\n log2,\n log10,\n expm1,\n log1p,\n square,\n cbrt,\n reciprocal,\n equal,\n not_equal,\n less,\n less_equal,\n greater,\n greater_equal,\n sin,\n cos,\n tan,\n arcsin,\n arccos,\n arctan,\n arctan2,\n hypot,\n sinh,\n cosh,\n tanh,\n arcsinh,\n arccosh,\n arctanh,\n deg2rad,\n rad2deg,\n bitand as bitwise_and,\n bitor as bitwise_or,\n bitxor as bitwise_xor,\n invert,\n invert as bitwise_not,\n lshift as left_shift,\n rshift as right_shift,\n logical_and,\n logical_or,\n logical_xor,\n logical_not,\n maximum,\n minimum,\n floor,\n ceil,\n trunc,\n remainder,\n fmax,\n fmin,\n isfinite,\n isinf,\n isnan,\n signbit,\n copysign,\n nextafter,\n spacing,\n clip,\n isclose,\n ldexp,\n frexp,\n modf,\n angle,\n isreal,\n iscomplex,\n real,\n imag,\n fix,\n i0,\n sinc,\n nan_to_num,\n tree_add,\n tree_multiply,\n)\nfrom .statistics import (\n average,\n bincount,\n cov,\n corrcoef,\n digitize,\n ptp,\n histogram_bin_edges,\n histogram,\n median,\n quantile,\n percentile,\n)\nfrom .linalg.tensordot import tensordot\nfrom .linalg.dot import dot\nfrom .linalg.inner import inner, innerproduct\nfrom .linalg.vdot import vdot\nfrom .linalg.matmul import matmul\nfrom .reduction import (\n sum,\n nansum,\n prod,\n prod as product,\n nanprod,\n max,\n max as amax,\n nanmax,\n min,\n min as amin,\n nanmin,\n all,\n any,\n mean,\n nanmean,\n argmax,\n nanargmax,\n argmin,\n nanargmin,\n cumsum,\n cumprod,\n var,\n std,\n nanvar,\n nanstd,\n nancumsum,\n nancumprod,\n count_nonzero,\n allclose,\n array_equal,\n)\nfrom .reshape import reshape\nfrom .merge import (\n concatenate,\n stack,\n hstack,\n vstack,\n dstack,\n column_stack,\n union1d,\n block,\n append,\n)\nfrom .indexing import (\n take,\n compress,\n extract,\n choose,\n unravel_index,\n nonzero,\n flatnonzero,\n fill_diagonal,\n)\nfrom .rechunk import rechunk\nfrom .einsum import einsum\nfrom .images import imread\n\n# noinspection PyUnresolvedReferences\nfrom .lib.index_tricks import mgrid, ogrid, ndindex, r_, c_\n\nfrom . import random\nfrom . import fft\nfrom . import linalg\nfrom . import lib\nfrom . import special\nfrom . import stats\n\n# types\nfrom .core import Tensor\n\n# noinspection PyUnresolvedReferences\nfrom ..core import ExecutableTuple\n\n# noinspection PyUnresolvedReferences\nfrom numpy import (\n newaxis,\n AxisError,\n inf,\n Inf,\n NINF,\n nan,\n NAN,\n NaN,\n pi,\n e,\n errstate,\n geterr,\n seterr,\n)\n\n# import numpy types\n# noinspection PyUnresolvedReferences\nfrom numpy import (\n dtype,\n number,\n inexact,\n floating,\n complexfloating,\n integer,\n signedinteger,\n unsignedinteger,\n character,\n generic,\n flexible,\n int_ as int,\n bool_ as bool,\n float_ as float,\n cfloat,\n bytes_,\n unicode_,\n void,\n object_ as object,\n intc,\n intp,\n int8,\n int16,\n int32,\n int64,\n uint8,\n uint16,\n uint32,\n uint64,\n uint,\n float16,\n float32,\n float64,\n double,\n complex64,\n complex128,\n datetime64,\n timedelta64,\n)\n\n# noinspection PyUnresolvedReferences\nfrom numpy import finfo\n\n# register fuse op and fetch op\nfrom .fuse import TensorFuseChunk, TensorCpFuseChunk, TensorNeFuseChunk\nfrom .fetch import TensorFetch, TensorFetchShuffle\nfrom . import ufunc\n\ndel (\n TensorFuseChunk,\n TensorCpFuseChunk,\n TensorNeFuseChunk,\n TensorFetch,\n TensorFetchShuffle,\n ufunc,\n)\n", "path": "mars/tensor/__init__.py"}]}
| 3,546 | 281 |
gh_patches_debug_54607
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-13067
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clean up `update-locked-requirements` and `requirements.in` files to remove `-e` hackery.
It looks like https://github.com/jazzband/pip-tools/pull/807 was included in the latest `pip-tools` release 12 days ago. I think this may mean we can get rid of our semantically incorrect usage of `-e` in our requirements files, which in turn may mean we can remove most of the messy code in `tools/update-locked-requirements` related to hackily removing the `-e` lines.
See `compile_requirements` in that file for details.
My guess is that this means if we upgrade pip-tools, we can delete 50% of the code in `update-locked-requirements` and clean up our `requirements.in` files to not use `-e`.
@hackerkid this might be a good project for you.
Clean up `update-locked-requirements` and `requirements.in` files to remove `-e` hackery.
It looks like https://github.com/jazzband/pip-tools/pull/807 was included in the latest `pip-tools` release 12 days ago. I think this may mean we can get rid of our semantically incorrect usage of `-e` in our requirements files, which in turn may mean we can remove most of the messy code in `tools/update-locked-requirements` related to hackily removing the `-e` lines.
See `compile_requirements` in that file for details.
My guess is that this means if we upgrade pip-tools, we can delete 50% of the code in `update-locked-requirements` and clean up our `requirements.in` files to not use `-e`.
@hackerkid this might be a good project for you.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `version.py`
Content:
```
1 import os
2
3 ZULIP_VERSION = "2.0.4+git"
4 # Add information on number of commits and commit hash to version, if available
5 zulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')
6 if os.path.exists(zulip_git_version_file):
7 with open(zulip_git_version_file) as f:
8 version = f.read().strip()
9 if version:
10 ZULIP_VERSION = version
11
12 LATEST_MAJOR_VERSION = "2.0"
13 LATEST_RELEASE_VERSION = "2.0.4"
14 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.org/2019/03/01/zulip-2-0-released/"
15
16 # Bump the minor PROVISION_VERSION to indicate that folks should provision
17 # only when going from an old version of the code to a newer version. Bump
18 # the major version to indicate that folks should provision in both
19 # directions.
20
21 # Typically,
22 # * adding a dependency only requires a minor version bump;
23 # * removing a dependency requires a major version bump;
24 # * upgrading a dependency requires a major version bump, unless the
25 # upgraded dependency is backwards compatible with all of our
26 # historical commits sharing the same major version, in which case a
27 # minor version bump suffices.
28
29 PROVISION_VERSION = '49.1'
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -26,4 +26,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = '49.1'
+PROVISION_VERSION = '49.2'
|
{"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -26,4 +26,4 @@\n # historical commits sharing the same major version, in which case a\n # minor version bump suffices.\n \n-PROVISION_VERSION = '49.1'\n+PROVISION_VERSION = '49.2'\n", "issue": "Clean up `update-locked-requirements` and `requirements.in` files to remove `-e` hackery.\nIt looks like https://github.com/jazzband/pip-tools/pull/807 was included in the latest `pip-tools` release 12 days ago. I think this may mean we can get rid of our semantically incorrect usage of `-e` in our requirements files, which in turn may mean we can remove most of the messy code in `tools/update-locked-requirements` related to hackily removing the `-e` lines. \r\n See `compile_requirements` in that file for details. \r\n\r\nMy guess is that this means if we upgrade pip-tools, we can delete 50% of the code in `update-locked-requirements` and clean up our `requirements.in` files to not use `-e`. \r\n\r\n@hackerkid this might be a good project for you.\nClean up `update-locked-requirements` and `requirements.in` files to remove `-e` hackery.\nIt looks like https://github.com/jazzband/pip-tools/pull/807 was included in the latest `pip-tools` release 12 days ago. I think this may mean we can get rid of our semantically incorrect usage of `-e` in our requirements files, which in turn may mean we can remove most of the messy code in `tools/update-locked-requirements` related to hackily removing the `-e` lines. \r\n See `compile_requirements` in that file for details. \r\n\r\nMy guess is that this means if we upgrade pip-tools, we can delete 50% of the code in `update-locked-requirements` and clean up our `requirements.in` files to not use `-e`. \r\n\r\n@hackerkid this might be a good project for you.\n", "before_files": [{"content": "import os\n\nZULIP_VERSION = \"2.0.4+git\"\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n version = f.read().strip()\n if version:\n ZULIP_VERSION = version\n\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.4\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = '49.1'\n", "path": "version.py"}], "after_files": [{"content": "import os\n\nZULIP_VERSION = \"2.0.4+git\"\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n version = f.read().strip()\n if version:\n ZULIP_VERSION = version\n\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.4\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = '49.2'\n", "path": "version.py"}]}
| 986 | 78 |
gh_patches_debug_9751
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-481
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
project list does not show text if there are no matching projects
It should show something like "No projects could be found". Note that the text should work for two cases: "there are no projects" and "there are no projects matching the filters".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/contrib/templatetags/contrib_tags.py`
Content:
```
1 from django import template
2 from django.template.loader import render_to_string
3
4 register = template.Library()
5
6
7 @register.assignment_tag
8 def include_template_string(template, **kwargs):
9 rendered_template = render_to_string(template, kwargs)
10 return str(rendered_template)
11
12
13 @register.assignment_tag
14 def combined_url_parameter(request_query_dict, **kwargs):
15 combined_query_dict = request_query_dict.copy()
16 for key in kwargs:
17 combined_query_dict.setlist(key, [kwargs[key]])
18 encoded_parameter = '?' + combined_query_dict.urlencode()
19 return encoded_parameter
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py
--- a/apps/contrib/templatetags/contrib_tags.py
+++ b/apps/contrib/templatetags/contrib_tags.py
@@ -17,3 +17,14 @@
combined_query_dict.setlist(key, [kwargs[key]])
encoded_parameter = '?' + combined_query_dict.urlencode()
return encoded_parameter
+
+
[email protected]_tag
+def filter_has_perm(perm, user, objects):
+ """Filter a list of objects based on user permissions."""
+ if not hasattr(user, 'has_perm'):
+ # If the swapped user model does not support permissions, all objects
+ # will be returned. This is taken from rules.templatetags.has_perm.
+ return objects
+ else:
+ return (obj for obj in objects if user.has_perm(perm, obj))
|
{"golden_diff": "diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py\n--- a/apps/contrib/templatetags/contrib_tags.py\n+++ b/apps/contrib/templatetags/contrib_tags.py\n@@ -17,3 +17,14 @@\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n+\n+\[email protected]_tag\n+def filter_has_perm(perm, user, objects):\n+ \"\"\"Filter a list of objects based on user permissions.\"\"\"\n+ if not hasattr(user, 'has_perm'):\n+ # If the swapped user model does not support permissions, all objects\n+ # will be returned. This is taken from rules.templatetags.has_perm.\n+ return objects\n+ else:\n+ return (obj for obj in objects if user.has_perm(perm, obj))\n", "issue": "project list does not show text if there are no matching projects\nIt should show something like \"No projects could be found\". Note that the text should work for two cases: \"there are no projects\" and \"there are no projects matching the filters\".\n", "before_files": [{"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n", "path": "apps/contrib/templatetags/contrib_tags.py"}], "after_files": [{"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n\n\[email protected]_tag\ndef filter_has_perm(perm, user, objects):\n \"\"\"Filter a list of objects based on user permissions.\"\"\"\n if not hasattr(user, 'has_perm'):\n # If the swapped user model does not support permissions, all objects\n # will be returned. This is taken from rules.templatetags.has_perm.\n return objects\n else:\n return (obj for obj in objects if user.has_perm(perm, obj))\n", "path": "apps/contrib/templatetags/contrib_tags.py"}]}
| 465 | 209 |
gh_patches_debug_8368
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-2488
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keyerror when sending password reset email
When sending a password reset email, I'm getting an internal error
I'll just share the raven error - hopefully that doesn't review all of the site secrets (probably does)
https://app.getsentry.com/share/issue/37343334302e313233323439393235/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailadmin/templatetags/wagtailuserbar.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 from django import template
4 from django.template.loader import render_to_string
5
6 from wagtail.wagtailadmin.userbar import (
7 AddPageItem, AdminItem, ApproveModerationEditPageItem, EditPageItem, ExplorePageItem,
8 RejectModerationEditPageItem)
9 from wagtail.wagtailcore import hooks
10 from wagtail.wagtailcore.models import PAGE_TEMPLATE_VAR, Page, PageRevision
11
12 # from django.contrib.auth.decorators import permission_required
13
14
15 register = template.Library()
16
17
18 def get_page_instance(context):
19 """
20 Given a template context, try and find a Page variable in the common
21 places. Returns None if a page can not be found.
22 """
23 possible_names = [PAGE_TEMPLATE_VAR, 'self']
24 for name in possible_names:
25 if name in context:
26 page = context[name]
27 if isinstance(page, Page):
28 return page
29
30
31 @register.simple_tag(takes_context=True)
32 def wagtailuserbar(context, position='bottom-right'):
33 # Find request object
34 request = context['request']
35
36
37 # Don't render if user doesn't have permission to access the admin area
38 if not request.user.has_perm('wagtailadmin.access_admin'):
39 return ''
40
41 # Only render if the context contains a variable referencing a saved page
42 page = get_page_instance(context)
43 if page is None:
44 return ''
45
46 # Dont render anything if the page has not been saved - i.e. a preview
47 if page.pk is None:
48 return ''
49
50 try:
51 revision_id = request.revision_id
52 except AttributeError:
53 revision_id = None
54
55 if revision_id is None:
56 items = [
57 AdminItem(),
58 ExplorePageItem(Page.objects.get(id=page.id)),
59 EditPageItem(Page.objects.get(id=page.id)),
60 AddPageItem(Page.objects.get(id=page.id)),
61 ]
62 else:
63 items = [
64 AdminItem(),
65 ExplorePageItem(PageRevision.objects.get(id=revision_id).page),
66 EditPageItem(PageRevision.objects.get(id=revision_id).page),
67 AddPageItem(PageRevision.objects.get(id=revision_id).page),
68 ApproveModerationEditPageItem(PageRevision.objects.get(id=revision_id)),
69 RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)),
70 ]
71
72 for fn in hooks.get_hooks('construct_wagtail_userbar'):
73 fn(request, items)
74
75 # Render the items
76 rendered_items = [item.render(request) for item in items]
77
78 # Remove any unrendered items
79 rendered_items = [item for item in rendered_items if item]
80
81 # Render the userbar items
82 return render_to_string('wagtailadmin/userbar/base.html', {
83 'request': request,
84 'items': rendered_items,
85 'position': position,
86 'page': page,
87 'revision_id': revision_id
88 })
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/wagtailadmin/templatetags/wagtailuserbar.py b/wagtail/wagtailadmin/templatetags/wagtailuserbar.py
--- a/wagtail/wagtailadmin/templatetags/wagtailuserbar.py
+++ b/wagtail/wagtailadmin/templatetags/wagtailuserbar.py
@@ -31,8 +31,10 @@
@register.simple_tag(takes_context=True)
def wagtailuserbar(context, position='bottom-right'):
# Find request object
- request = context['request']
-
+ try:
+ request = context['request']
+ except KeyError:
+ return ''
# Don't render if user doesn't have permission to access the admin area
if not request.user.has_perm('wagtailadmin.access_admin'):
|
{"golden_diff": "diff --git a/wagtail/wagtailadmin/templatetags/wagtailuserbar.py b/wagtail/wagtailadmin/templatetags/wagtailuserbar.py\n--- a/wagtail/wagtailadmin/templatetags/wagtailuserbar.py\n+++ b/wagtail/wagtailadmin/templatetags/wagtailuserbar.py\n@@ -31,8 +31,10 @@\n @register.simple_tag(takes_context=True)\n def wagtailuserbar(context, position='bottom-right'):\n # Find request object\n- request = context['request']\n-\n+ try:\n+ request = context['request']\n+ except KeyError:\n+ return ''\n \n # Don't render if user doesn't have permission to access the admin area\n if not request.user.has_perm('wagtailadmin.access_admin'):\n", "issue": "Keyerror when sending password reset email\nWhen sending a password reset email, I'm getting an internal error\n\nI'll just share the raven error - hopefully that doesn't review all of the site secrets (probably does)\n\nhttps://app.getsentry.com/share/issue/37343334302e313233323439393235/\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django import template\nfrom django.template.loader import render_to_string\n\nfrom wagtail.wagtailadmin.userbar import (\n AddPageItem, AdminItem, ApproveModerationEditPageItem, EditPageItem, ExplorePageItem,\n RejectModerationEditPageItem)\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.models import PAGE_TEMPLATE_VAR, Page, PageRevision\n\n# from django.contrib.auth.decorators import permission_required\n\n\nregister = template.Library()\n\n\ndef get_page_instance(context):\n \"\"\"\n Given a template context, try and find a Page variable in the common\n places. Returns None if a page can not be found.\n \"\"\"\n possible_names = [PAGE_TEMPLATE_VAR, 'self']\n for name in possible_names:\n if name in context:\n page = context[name]\n if isinstance(page, Page):\n return page\n\n\[email protected]_tag(takes_context=True)\ndef wagtailuserbar(context, position='bottom-right'):\n # Find request object\n request = context['request']\n\n\n # Don't render if user doesn't have permission to access the admin area\n if not request.user.has_perm('wagtailadmin.access_admin'):\n return ''\n\n # Only render if the context contains a variable referencing a saved page\n page = get_page_instance(context)\n if page is None:\n return ''\n\n # Dont render anything if the page has not been saved - i.e. a preview\n if page.pk is None:\n return ''\n\n try:\n revision_id = request.revision_id\n except AttributeError:\n revision_id = None\n\n if revision_id is None:\n items = [\n AdminItem(),\n ExplorePageItem(Page.objects.get(id=page.id)),\n EditPageItem(Page.objects.get(id=page.id)),\n AddPageItem(Page.objects.get(id=page.id)),\n ]\n else:\n items = [\n AdminItem(),\n ExplorePageItem(PageRevision.objects.get(id=revision_id).page),\n EditPageItem(PageRevision.objects.get(id=revision_id).page),\n AddPageItem(PageRevision.objects.get(id=revision_id).page),\n ApproveModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n ]\n\n for fn in hooks.get_hooks('construct_wagtail_userbar'):\n fn(request, items)\n\n # Render the items\n rendered_items = [item.render(request) for item in items]\n\n # Remove any unrendered items\n rendered_items = [item for item in rendered_items if item]\n\n # Render the userbar items\n return render_to_string('wagtailadmin/userbar/base.html', {\n 'request': request,\n 'items': rendered_items,\n 'position': position,\n 'page': page,\n 'revision_id': revision_id\n })\n", "path": "wagtail/wagtailadmin/templatetags/wagtailuserbar.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django import template\nfrom django.template.loader import render_to_string\n\nfrom wagtail.wagtailadmin.userbar import (\n AddPageItem, AdminItem, ApproveModerationEditPageItem, EditPageItem, ExplorePageItem,\n RejectModerationEditPageItem)\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.models import PAGE_TEMPLATE_VAR, Page, PageRevision\n\n# from django.contrib.auth.decorators import permission_required\n\n\nregister = template.Library()\n\n\ndef get_page_instance(context):\n \"\"\"\n Given a template context, try and find a Page variable in the common\n places. Returns None if a page can not be found.\n \"\"\"\n possible_names = [PAGE_TEMPLATE_VAR, 'self']\n for name in possible_names:\n if name in context:\n page = context[name]\n if isinstance(page, Page):\n return page\n\n\[email protected]_tag(takes_context=True)\ndef wagtailuserbar(context, position='bottom-right'):\n # Find request object\n try:\n request = context['request']\n except KeyError:\n return ''\n\n # Don't render if user doesn't have permission to access the admin area\n if not request.user.has_perm('wagtailadmin.access_admin'):\n return ''\n\n # Only render if the context contains a variable referencing a saved page\n page = get_page_instance(context)\n if page is None:\n return ''\n\n # Dont render anything if the page has not been saved - i.e. a preview\n if page.pk is None:\n return ''\n\n try:\n revision_id = request.revision_id\n except AttributeError:\n revision_id = None\n\n if revision_id is None:\n items = [\n AdminItem(),\n ExplorePageItem(Page.objects.get(id=page.id)),\n EditPageItem(Page.objects.get(id=page.id)),\n AddPageItem(Page.objects.get(id=page.id)),\n ]\n else:\n items = [\n AdminItem(),\n ExplorePageItem(PageRevision.objects.get(id=revision_id).page),\n EditPageItem(PageRevision.objects.get(id=revision_id).page),\n AddPageItem(PageRevision.objects.get(id=revision_id).page),\n ApproveModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n ]\n\n for fn in hooks.get_hooks('construct_wagtail_userbar'):\n fn(request, items)\n\n # Render the items\n rendered_items = [item.render(request) for item in items]\n\n # Remove any unrendered items\n rendered_items = [item for item in rendered_items if item]\n\n # Render the userbar items\n return render_to_string('wagtailadmin/userbar/base.html', {\n 'request': request,\n 'items': rendered_items,\n 'position': position,\n 'page': page,\n 'revision_id': revision_id\n })\n", "path": "wagtail/wagtailadmin/templatetags/wagtailuserbar.py"}]}
| 1,158 | 187 |
gh_patches_debug_13485
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1814
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pre-commit install fails on Windows network mount drive
Hi, I'm trying to help some team members set up pre-commit on a Windows network mount drive, and they're encountering an issue with paths. Seems related to [this comment](https://github.com/pre-commit/pre-commit/issues/1610#issuecomment-719774326) in #1610, which seems to be a different issue than the one the original issue, as we are on the most recent version of pre-commit and git, and the fix that was merged in #1727 for that doesn't seem to address this issue. I also tried other versions of Git for Windows <2.25, and the issue still seemed to persist.
I tested the solution that @christopherdoyle proposed, and that does seems to fix this issue with network mount drives, though I saw that @asottile would prefer not to use `pathlib`. I am not able to propose a fix right now, but I wanted to raise this as an issue that still exists.
Full error below:
```
### version information
```
pre-commit version: 2.10.1
sys.version:
3.9.1 | packaged by conda-forge | (default, Jan 26 2021, 01:29:07) [MSC v.1916 64 bit (AMD64)]
sys.executable: C:\Users\roderick\.conda\envs\nmt\python.exe
os.name: nt
sys.platform: win32
```
### error information
```
An unexpected error has occurred: ValueError: path is on mount 'S:', start on mount '\\\\MyServer\Directory'
```
```
Traceback (most recent call last):
File "C:\Users\roderick\.conda\envs\nmt\lib\site-packages\pre_commit\error_handler.py", line 65, in error_handler
yield
File "C:\Users\roderick\.conda\envs\nmt\lib\site-packages\pre_commit\main.py", line 333, in main
_adjust_args_and_chdir(args)
File "C:\Users\roderick\.conda\envs\nmt\lib\site-packages\pre_commit\main.py", line 153, in _adjust_args_and_chdir
args.config = os.path.relpath(args.config)
File "C:\Users\roderick\.conda\envs\nmt\lib\ntpath.py", line 703, in relpath
raise ValueError("path is on mount %r, start on mount %r" % (
ValueError: path is on mount 'S:', start on mount '\\\\MyServer\Directory'
```
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/git.py`
Content:
```
1 import logging
2 import os.path
3 import sys
4 from typing import Dict
5 from typing import List
6 from typing import MutableMapping
7 from typing import Optional
8 from typing import Set
9
10 from pre_commit.errors import FatalError
11 from pre_commit.util import CalledProcessError
12 from pre_commit.util import cmd_output
13 from pre_commit.util import cmd_output_b
14
15
16 logger = logging.getLogger(__name__)
17
18
19 def zsplit(s: str) -> List[str]:
20 s = s.strip('\0')
21 if s:
22 return s.split('\0')
23 else:
24 return []
25
26
27 def no_git_env(
28 _env: Optional[MutableMapping[str, str]] = None,
29 ) -> Dict[str, str]:
30 # Too many bugs dealing with environment variables and GIT:
31 # https://github.com/pre-commit/pre-commit/issues/300
32 # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
33 # pre-commit hooks
34 # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
35 # while running pre-commit hooks in submodules.
36 # GIT_DIR: Causes git clone to clone wrong thing
37 # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
38 _env = _env if _env is not None else os.environ
39 return {
40 k: v for k, v in _env.items()
41 if not k.startswith('GIT_') or
42 k in {
43 'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO',
44 'GIT_SSL_NO_VERIFY',
45 }
46 }
47
48
49 def get_root() -> str:
50 # Git 2.25 introduced a change to "rev-parse --show-toplevel" that exposed
51 # underlying volumes for Windows drives mapped with SUBST. We use
52 # "rev-parse --show-cdup" to get the appropriate path, but must perform
53 # an extra check to see if we are in the .git directory.
54 try:
55 root = os.path.realpath(
56 cmd_output('git', 'rev-parse', '--show-cdup')[1].strip(),
57 )
58 git_dir = os.path.realpath(get_git_dir())
59 except CalledProcessError:
60 raise FatalError(
61 'git failed. Is it installed, and are you in a Git repository '
62 'directory?',
63 )
64 if os.path.samefile(root, git_dir):
65 raise FatalError(
66 'git toplevel unexpectedly empty! make sure you are not '
67 'inside the `.git` directory of your repository.',
68 )
69 return root
70
71
72 def get_git_dir(git_root: str = '.') -> str:
73 opts = ('--git-common-dir', '--git-dir')
74 _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)
75 for line, opt in zip(out.splitlines(), opts):
76 if line != opt: # pragma: no branch (git < 2.5)
77 return os.path.normpath(os.path.join(git_root, line))
78 else:
79 raise AssertionError('unreachable: no git dir')
80
81
82 def get_remote_url(git_root: str) -> str:
83 _, out, _ = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)
84 return out.strip()
85
86
87 def is_in_merge_conflict() -> bool:
88 git_dir = get_git_dir('.')
89 return (
90 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
91 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
92 )
93
94
95 def parse_merge_msg_for_conflicts(merge_msg: bytes) -> List[str]:
96 # Conflicted files start with tabs
97 return [
98 line.lstrip(b'#').strip().decode()
99 for line in merge_msg.splitlines()
100 # '#\t' for git 2.4.1
101 if line.startswith((b'\t', b'#\t'))
102 ]
103
104
105 def get_conflicted_files() -> Set[str]:
106 logger.info('Checking merge-conflict files only.')
107 # Need to get the conflicted files from the MERGE_MSG because they could
108 # have resolved the conflict by choosing one side or the other
109 with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:
110 merge_msg = f.read()
111 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
112
113 # This will get the rest of the changes made after the merge.
114 # If they resolved the merge conflict by choosing a mesh of both sides
115 # this will also include the conflicted files
116 tree_hash = cmd_output('git', 'write-tree')[1].strip()
117 merge_diff_filenames = zsplit(
118 cmd_output(
119 'git', 'diff', '--name-only', '--no-ext-diff', '-z',
120 '-m', tree_hash, 'HEAD', 'MERGE_HEAD',
121 )[1],
122 )
123 return set(merge_conflict_filenames) | set(merge_diff_filenames)
124
125
126 def get_staged_files(cwd: Optional[str] = None) -> List[str]:
127 return zsplit(
128 cmd_output(
129 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',
130 # Everything except for D
131 '--diff-filter=ACMRTUXB',
132 cwd=cwd,
133 )[1],
134 )
135
136
137 def intent_to_add_files() -> List[str]:
138 _, stdout, _ = cmd_output(
139 'git', 'status', '--ignore-submodules', '--porcelain', '-z',
140 )
141 parts = list(reversed(zsplit(stdout)))
142 intent_to_add = []
143 while parts:
144 line = parts.pop()
145 status, filename = line[:3], line[3:]
146 if status[0] in {'C', 'R'}: # renames / moves have an additional arg
147 parts.pop()
148 if status[1] == 'A':
149 intent_to_add.append(filename)
150 return intent_to_add
151
152
153 def get_all_files() -> List[str]:
154 return zsplit(cmd_output('git', 'ls-files', '-z')[1])
155
156
157 def get_changed_files(old: str, new: str) -> List[str]:
158 return zsplit(
159 cmd_output(
160 'git', 'diff', '--name-only', '--no-ext-diff', '-z',
161 f'{old}...{new}',
162 )[1],
163 )
164
165
166 def head_rev(remote: str) -> str:
167 _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')
168 return out.split()[0]
169
170
171 def has_diff(*args: str, repo: str = '.') -> bool:
172 cmd = ('git', 'diff', '--quiet', '--no-ext-diff', *args)
173 return cmd_output_b(*cmd, cwd=repo, retcode=None)[0] == 1
174
175
176 def has_core_hookpaths_set() -> bool:
177 _, out, _ = cmd_output_b('git', 'config', 'core.hooksPath', retcode=None)
178 return bool(out.strip())
179
180
181 def init_repo(path: str, remote: str) -> None:
182 if os.path.isdir(remote):
183 remote = os.path.abspath(remote)
184
185 env = no_git_env()
186 # avoid the user's template so that hooks do not recurse
187 cmd_output_b('git', 'init', '--template=', path, env=env)
188 cmd_output_b('git', 'remote', 'add', 'origin', remote, cwd=path, env=env)
189
190
191 def commit(repo: str = '.') -> None:
192 env = no_git_env()
193 name, email = 'pre-commit', '[email protected]'
194 env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name
195 env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email
196 cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
197 cmd_output_b(*cmd, cwd=repo, env=env)
198
199
200 def git_path(name: str, repo: str = '.') -> str:
201 _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)
202 return os.path.join(repo, out.strip())
203
204
205 def check_for_cygwin_mismatch() -> None:
206 """See https://github.com/pre-commit/pre-commit/issues/354"""
207 if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
208 is_cygwin_python = sys.platform == 'cygwin'
209 try:
210 toplevel = get_root()
211 except FatalError: # skip the check if we're not in a git repo
212 return
213 is_cygwin_git = toplevel.startswith('/')
214
215 if is_cygwin_python ^ is_cygwin_git:
216 exe_type = {True: '(cygwin)', False: '(windows)'}
217 logger.warn(
218 f'pre-commit has detected a mix of cygwin python / git\n'
219 f'This combination is not supported, it is likely you will '
220 f'receive an error later in the program.\n'
221 f'Make sure to use cygwin git+python while using cygwin\n'
222 f'These can be installed through the cygwin installer.\n'
223 f' - python {exe_type[is_cygwin_python]}\n'
224 f' - git {exe_type[is_cygwin_git]}\n',
225 )
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -52,10 +52,10 @@
# "rev-parse --show-cdup" to get the appropriate path, but must perform
# an extra check to see if we are in the .git directory.
try:
- root = os.path.realpath(
+ root = os.path.abspath(
cmd_output('git', 'rev-parse', '--show-cdup')[1].strip(),
)
- git_dir = os.path.realpath(get_git_dir())
+ git_dir = os.path.abspath(get_git_dir())
except CalledProcessError:
raise FatalError(
'git failed. Is it installed, and are you in a Git repository '
|
{"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -52,10 +52,10 @@\n # \"rev-parse --show-cdup\" to get the appropriate path, but must perform\n # an extra check to see if we are in the .git directory.\n try:\n- root = os.path.realpath(\n+ root = os.path.abspath(\n cmd_output('git', 'rev-parse', '--show-cdup')[1].strip(),\n )\n- git_dir = os.path.realpath(get_git_dir())\n+ git_dir = os.path.abspath(get_git_dir())\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n", "issue": "Pre-commit install fails on Windows network mount drive\nHi, I'm trying to help some team members set up pre-commit on a Windows network mount drive, and they're encountering an issue with paths. Seems related to [this comment](https://github.com/pre-commit/pre-commit/issues/1610#issuecomment-719774326) in #1610, which seems to be a different issue than the one the original issue, as we are on the most recent version of pre-commit and git, and the fix that was merged in #1727 for that doesn't seem to address this issue. I also tried other versions of Git for Windows <2.25, and the issue still seemed to persist.\r\n\r\nI tested the solution that @christopherdoyle proposed, and that does seems to fix this issue with network mount drives, though I saw that @asottile would prefer not to use `pathlib`. I am not able to propose a fix right now, but I wanted to raise this as an issue that still exists.\r\n\r\nFull error below:\r\n\r\n```\r\n ### version information\r\n \r\n ```\r\n pre-commit version: 2.10.1\r\n sys.version:\r\n 3.9.1 | packaged by conda-forge | (default, Jan 26 2021, 01:29:07) [MSC v.1916 64 bit (AMD64)]\r\n sys.executable: C:\\Users\\roderick\\.conda\\envs\\nmt\\python.exe\r\n os.name: nt\r\n sys.platform: win32\r\n ```\r\n \r\n ### error information\r\n \r\n ```\r\n An unexpected error has occurred: ValueError: path is on mount 'S:', start on mount '\\\\\\\\MyServer\\Directory'\r\n ```\r\n \r\n ```\r\n Traceback (most recent call last):\r\n File \"C:\\Users\\roderick\\.conda\\envs\\nmt\\lib\\site-packages\\pre_commit\\error_handler.py\", line 65, in error_handler\r\n yield\r\n File \"C:\\Users\\roderick\\.conda\\envs\\nmt\\lib\\site-packages\\pre_commit\\main.py\", line 333, in main\r\n _adjust_args_and_chdir(args)\r\n File \"C:\\Users\\roderick\\.conda\\envs\\nmt\\lib\\site-packages\\pre_commit\\main.py\", line 153, in _adjust_args_and_chdir\r\n args.config = os.path.relpath(args.config)\r\n File \"C:\\Users\\roderick\\.conda\\envs\\nmt\\lib\\ntpath.py\", line 703, in relpath\r\n raise ValueError(\"path is on mount %r, start on mount %r\" % (\r\n ValueError: path is on mount 'S:', start on mount '\\\\\\\\MyServer\\Directory'\r\n```\r\n\r\n```\n", "before_files": [{"content": "import logging\nimport os.path\nimport sys\nfrom typing import Dict\nfrom typing import List\nfrom typing import MutableMapping\nfrom typing import Optional\nfrom typing import Set\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef zsplit(s: str) -> List[str]:\n s = s.strip('\\0')\n if s:\n return s.split('\\0')\n else:\n return []\n\n\ndef no_git_env(\n _env: Optional[MutableMapping[str, str]] = None,\n) -> Dict[str, str]:\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n _env = _env if _env is not None else os.environ\n return {\n k: v for k, v in _env.items()\n if not k.startswith('GIT_') or\n k in {\n 'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO',\n 'GIT_SSL_NO_VERIFY',\n }\n }\n\n\ndef get_root() -> str:\n # Git 2.25 introduced a change to \"rev-parse --show-toplevel\" that exposed\n # underlying volumes for Windows drives mapped with SUBST. We use\n # \"rev-parse --show-cdup\" to get the appropriate path, but must perform\n # an extra check to see if we are in the .git directory.\n try:\n root = os.path.realpath(\n cmd_output('git', 'rev-parse', '--show-cdup')[1].strip(),\n )\n git_dir = os.path.realpath(get_git_dir())\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?',\n )\n if os.path.samefile(root, git_dir):\n raise FatalError(\n 'git toplevel unexpectedly empty! make sure you are not '\n 'inside the `.git` directory of your repository.',\n )\n return root\n\n\ndef get_git_dir(git_root: str = '.') -> str:\n opts = ('--git-common-dir', '--git-dir')\n _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)\n for line, opt in zip(out.splitlines(), opts):\n if line != opt: # pragma: no branch (git < 2.5)\n return os.path.normpath(os.path.join(git_root, line))\n else:\n raise AssertionError('unreachable: no git dir')\n\n\ndef get_remote_url(git_root: str) -> str:\n _, out, _ = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)\n return out.strip()\n\n\ndef is_in_merge_conflict() -> bool:\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg: bytes) -> List[str]:\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\ndef get_conflicted_files() -> Set[str]:\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:\n merge_msg = f.read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = zsplit(\n cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '-m', tree_hash, 'HEAD', 'MERGE_HEAD',\n )[1],\n )\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\ndef get_staged_files(cwd: Optional[str] = None) -> List[str]:\n return zsplit(\n cmd_output(\n 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',\n # Everything except for D\n '--diff-filter=ACMRTUXB',\n cwd=cwd,\n )[1],\n )\n\n\ndef intent_to_add_files() -> List[str]:\n _, stdout, _ = cmd_output(\n 'git', 'status', '--ignore-submodules', '--porcelain', '-z',\n )\n parts = list(reversed(zsplit(stdout)))\n intent_to_add = []\n while parts:\n line = parts.pop()\n status, filename = line[:3], line[3:]\n if status[0] in {'C', 'R'}: # renames / moves have an additional arg\n parts.pop()\n if status[1] == 'A':\n intent_to_add.append(filename)\n return intent_to_add\n\n\ndef get_all_files() -> List[str]:\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n\n\ndef get_changed_files(old: str, new: str) -> List[str]:\n return zsplit(\n cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n f'{old}...{new}',\n )[1],\n )\n\n\ndef head_rev(remote: str) -> str:\n _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')\n return out.split()[0]\n\n\ndef has_diff(*args: str, repo: str = '.') -> bool:\n cmd = ('git', 'diff', '--quiet', '--no-ext-diff', *args)\n return cmd_output_b(*cmd, cwd=repo, retcode=None)[0] == 1\n\n\ndef has_core_hookpaths_set() -> bool:\n _, out, _ = cmd_output_b('git', 'config', 'core.hooksPath', retcode=None)\n return bool(out.strip())\n\n\ndef init_repo(path: str, remote: str) -> None:\n if os.path.isdir(remote):\n remote = os.path.abspath(remote)\n\n env = no_git_env()\n # avoid the user's template so that hooks do not recurse\n cmd_output_b('git', 'init', '--template=', path, env=env)\n cmd_output_b('git', 'remote', 'add', 'origin', remote, cwd=path, env=env)\n\n\ndef commit(repo: str = '.') -> None:\n env = no_git_env()\n name, email = 'pre-commit', '[email protected]'\n env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name\n env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email\n cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')\n cmd_output_b(*cmd, cwd=repo, env=env)\n\n\ndef git_path(name: str, repo: str = '.') -> str:\n _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)\n return os.path.join(repo, out.strip())\n\n\ndef check_for_cygwin_mismatch() -> None:\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n try:\n toplevel = get_root()\n except FatalError: # skip the check if we're not in a git repo\n return\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n f'pre-commit has detected a mix of cygwin python / git\\n'\n f'This combination is not supported, it is likely you will '\n f'receive an error later in the program.\\n'\n f'Make sure to use cygwin git+python while using cygwin\\n'\n f'These can be installed through the cygwin installer.\\n'\n f' - python {exe_type[is_cygwin_python]}\\n'\n f' - git {exe_type[is_cygwin_git]}\\n',\n )\n", "path": "pre_commit/git.py"}], "after_files": [{"content": "import logging\nimport os.path\nimport sys\nfrom typing import Dict\nfrom typing import List\nfrom typing import MutableMapping\nfrom typing import Optional\nfrom typing import Set\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef zsplit(s: str) -> List[str]:\n s = s.strip('\\0')\n if s:\n return s.split('\\0')\n else:\n return []\n\n\ndef no_git_env(\n _env: Optional[MutableMapping[str, str]] = None,\n) -> Dict[str, str]:\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n _env = _env if _env is not None else os.environ\n return {\n k: v for k, v in _env.items()\n if not k.startswith('GIT_') or\n k in {\n 'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO',\n 'GIT_SSL_NO_VERIFY',\n }\n }\n\n\ndef get_root() -> str:\n # Git 2.25 introduced a change to \"rev-parse --show-toplevel\" that exposed\n # underlying volumes for Windows drives mapped with SUBST. We use\n # \"rev-parse --show-cdup\" to get the appropriate path, but must perform\n # an extra check to see if we are in the .git directory.\n try:\n root = os.path.abspath(\n cmd_output('git', 'rev-parse', '--show-cdup')[1].strip(),\n )\n git_dir = os.path.abspath(get_git_dir())\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?',\n )\n if os.path.samefile(root, git_dir):\n raise FatalError(\n 'git toplevel unexpectedly empty! make sure you are not '\n 'inside the `.git` directory of your repository.',\n )\n return root\n\n\ndef get_git_dir(git_root: str = '.') -> str:\n opts = ('--git-common-dir', '--git-dir')\n _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)\n for line, opt in zip(out.splitlines(), opts):\n if line != opt: # pragma: no branch (git < 2.5)\n return os.path.normpath(os.path.join(git_root, line))\n else:\n raise AssertionError('unreachable: no git dir')\n\n\ndef get_remote_url(git_root: str) -> str:\n _, out, _ = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)\n return out.strip()\n\n\ndef is_in_merge_conflict() -> bool:\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg: bytes) -> List[str]:\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\ndef get_conflicted_files() -> Set[str]:\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:\n merge_msg = f.read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = zsplit(\n cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '-m', tree_hash, 'HEAD', 'MERGE_HEAD',\n )[1],\n )\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\ndef get_staged_files(cwd: Optional[str] = None) -> List[str]:\n return zsplit(\n cmd_output(\n 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',\n # Everything except for D\n '--diff-filter=ACMRTUXB',\n cwd=cwd,\n )[1],\n )\n\n\ndef intent_to_add_files() -> List[str]:\n _, stdout, _ = cmd_output(\n 'git', 'status', '--ignore-submodules', '--porcelain', '-z',\n )\n parts = list(reversed(zsplit(stdout)))\n intent_to_add = []\n while parts:\n line = parts.pop()\n status, filename = line[:3], line[3:]\n if status[0] in {'C', 'R'}: # renames / moves have an additional arg\n parts.pop()\n if status[1] == 'A':\n intent_to_add.append(filename)\n return intent_to_add\n\n\ndef get_all_files() -> List[str]:\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n\n\ndef get_changed_files(old: str, new: str) -> List[str]:\n return zsplit(\n cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n f'{old}...{new}',\n )[1],\n )\n\n\ndef head_rev(remote: str) -> str:\n _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')\n return out.split()[0]\n\n\ndef has_diff(*args: str, repo: str = '.') -> bool:\n cmd = ('git', 'diff', '--quiet', '--no-ext-diff', *args)\n return cmd_output_b(*cmd, cwd=repo, retcode=None)[0] == 1\n\n\ndef has_core_hookpaths_set() -> bool:\n _, out, _ = cmd_output_b('git', 'config', 'core.hooksPath', retcode=None)\n return bool(out.strip())\n\n\ndef init_repo(path: str, remote: str) -> None:\n if os.path.isdir(remote):\n remote = os.path.abspath(remote)\n\n env = no_git_env()\n # avoid the user's template so that hooks do not recurse\n cmd_output_b('git', 'init', '--template=', path, env=env)\n cmd_output_b('git', 'remote', 'add', 'origin', remote, cwd=path, env=env)\n\n\ndef commit(repo: str = '.') -> None:\n env = no_git_env()\n name, email = 'pre-commit', '[email protected]'\n env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name\n env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email\n cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')\n cmd_output_b(*cmd, cwd=repo, env=env)\n\n\ndef git_path(name: str, repo: str = '.') -> str:\n _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)\n return os.path.join(repo, out.strip())\n\n\ndef check_for_cygwin_mismatch() -> None:\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n try:\n toplevel = get_root()\n except FatalError: # skip the check if we're not in a git repo\n return\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n f'pre-commit has detected a mix of cygwin python / git\\n'\n f'This combination is not supported, it is likely you will '\n f'receive an error later in the program.\\n'\n f'Make sure to use cygwin git+python while using cygwin\\n'\n f'These can be installed through the cygwin installer.\\n'\n f' - python {exe_type[is_cygwin_python]}\\n'\n f' - git {exe_type[is_cygwin_git]}\\n',\n )\n", "path": "pre_commit/git.py"}]}
| 3,520 | 170 |
gh_patches_debug_23270
|
rasdani/github-patches
|
git_diff
|
spyder-ide__spyder-11838
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Activation of kernel env fails when using anaconda shortcut (FileNotFound error kernel start 4.1.0)
<!--- **PLEASE READ:** When submitting here, please ensure you've completed the following checklist and checked the boxes to confirm. Issue reports without it may be closed. Thanks! --->
## Problem Description
Spyder4.1.0 IPython kernel startup error
Close spyder and restart, the problem persists.
### What steps reproduce the problem?
After upgrading spyder4.0.1 to 4.1.0, IPython kernel startup error occurs when starting spyder.
The following command is used to upgrade in a virtual environment:
conda install spyder=4.1.0
```python-traceback
启动 IPython 内核时发生错误(An error occurred while starting the IPython kernel)
错误:(Error)
Traceback (most recent call last):
File "C:\Users\Admin\Anaconda3\envs\python37\lib\site‑packages\spyder\plugins\ipythonconsole\plugin.py", line 1209, in create_kernel_manager_and_kernel_client
kernel_manager.start_kernel(stderr=stderr_handle, **kwargs)
File "C:\Users\Admin\Anaconda3\envs\python37\lib\site‑packages\jupyter_client\manager.py", line 259, in start_kernel
**kw)
File "C:\Users\Admin\Anaconda3\envs\python37\lib\site‑packages\jupyter_client\manager.py", line 204, in _launch_kernel
return launch_kernel(kernel_cmd, **kw)
File "C:\Users\Admin\Anaconda3\envs\python37\lib\site‑packages\jupyter_client\launcher.py", line 138, in launch_kernel
proc = Popen(cmd, **kwargs)
File "C:\Users\Admin\Anaconda3\envs\python37\lib\subprocess.py", line 800, in __init__
restore_signals, start_new_session)
File "C:\Users\Admin\Anaconda3\envs\python37\lib\subprocess.py", line 1207, in _execute_child
startupinfo)
FileNotFoundError: [WinError 2] 系统找不到指定的文件(The system can not find the file specified)。
```
## Versions
<!--- You can get this information from Help > About Spyder...
or (if Spyder won't launch) the "conda list" command
from the Anaconda Prompt/Terminal/command line. --->
* Spyder version: 4.1.0
* Python version: Python3.7.5
* Qt version: 5.9.6
* PyQt version: 5.9.2
* Operating System name/version: win10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """
8 Spyder
9 ======
10
11 The Scientific Python Development Environment
12
13 Spyder is a powerful scientific environment written in Python, for Python,
14 and designed by and for scientists, engineers and data analysts.
15
16 It features a unique combination of the advanced editing, analysis, debugging
17 and profiling functionality of a comprehensive development tool with the data
18 exploration, interactive execution, deep inspection and beautiful visualization
19 capabilities of a scientific package.
20 """
21
22 from __future__ import print_function
23
24 import io
25 import os
26 import os.path as osp
27 import subprocess
28 import sys
29 import shutil
30
31 from distutils.core import setup
32 from distutils.command.install_data import install_data
33
34
35 #==============================================================================
36 # Check for Python 3
37 #==============================================================================
38 PY3 = sys.version_info[0] == 3
39
40
41 #==============================================================================
42 # Minimal Python version sanity check
43 # Taken from the notebook setup.py -- Modified BSD License
44 #==============================================================================
45 v = sys.version_info
46 if v[:2] < (2, 7) or (v[0] >= 3 and v[:2] < (3, 5)):
47 error = "ERROR: Spyder requires Python version 2.7 or 3.5 and above."
48 print(error, file=sys.stderr)
49 sys.exit(1)
50
51
52 #==============================================================================
53 # Constants
54 #==============================================================================
55 NAME = 'spyder'
56 LIBNAME = 'spyder'
57 from spyder import __version__, __website_url__ #analysis:ignore
58
59
60 #==============================================================================
61 # Auxiliary functions
62 #==============================================================================
63 def get_package_data(name, extlist):
64 """Return data files for package *name* with extensions in *extlist*"""
65 flist = []
66 # Workaround to replace os.path.relpath (not available until Python 2.6):
67 offset = len(name)+len(os.pathsep)
68 for dirpath, _dirnames, filenames in os.walk(name):
69 if 'tests' not in dirpath:
70 for fname in filenames:
71 if (not fname.startswith('.') and
72 osp.splitext(fname)[1] in extlist):
73 flist.append(osp.join(dirpath, fname)[offset:])
74 return flist
75
76
77 def get_subpackages(name):
78 """Return subpackages of package *name*"""
79 splist = []
80 for dirpath, _dirnames, _filenames in os.walk(name):
81 if 'tests' not in dirpath:
82 if osp.isfile(osp.join(dirpath, '__init__.py')):
83 splist.append(".".join(dirpath.split(os.sep)))
84 return splist
85
86
87 def get_data_files():
88 """Return data_files in a platform dependent manner"""
89 if sys.platform.startswith('linux'):
90 if PY3:
91 data_files = [('share/applications', ['scripts/spyder3.desktop']),
92 ('share/icons', ['img_src/spyder3.png']),
93 ('share/metainfo', ['scripts/spyder3.appdata.xml'])]
94 else:
95 data_files = [('share/applications', ['scripts/spyder.desktop']),
96 ('share/icons', ['img_src/spyder.png'])]
97 elif os.name == 'nt':
98 data_files = [('scripts', ['img_src/spyder.ico',
99 'img_src/spyder_reset.ico'])]
100 else:
101 data_files = []
102 return data_files
103
104
105 def get_packages():
106 """Return package list"""
107 packages = get_subpackages(LIBNAME)
108 return packages
109
110
111 #==============================================================================
112 # Make Linux detect Spyder desktop file
113 #==============================================================================
114 class MyInstallData(install_data):
115 def run(self):
116 install_data.run(self)
117 if sys.platform.startswith('linux'):
118 try:
119 subprocess.call(['update-desktop-database'])
120 except:
121 print("ERROR: unable to update desktop database",
122 file=sys.stderr)
123 CMDCLASS = {'install_data': MyInstallData}
124
125
126 #==============================================================================
127 # Main scripts
128 #==============================================================================
129 # NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows
130 # platforms due to a bug in pip installation process
131 # See spyder-ide/spyder#1158.
132 SCRIPTS = ['%s_win_post_install.py' % NAME]
133 if PY3 and sys.platform.startswith('linux'):
134 SCRIPTS.append('spyder3')
135 else:
136 SCRIPTS.append('spyder')
137
138
139 #==============================================================================
140 # Files added to the package
141 #==============================================================================
142 EXTLIST = ['.pot', '.po', '.mo', '.svg', '.png', '.css', '.html', '.js',
143 '.ini', '.txt', '.qss', '.ttf', '.json', '.rst', '.bloom']
144 if os.name == 'nt':
145 SCRIPTS += ['spyder.bat']
146 EXTLIST += ['.ico']
147
148
149 #==============================================================================
150 # Use Readme for long description
151 #==============================================================================
152 with io.open('README.md', encoding='utf-8') as f:
153 LONG_DESCRIPTION = f.read()
154
155
156 #==============================================================================
157 # Setup arguments
158 #==============================================================================
159 setup_args = dict(
160 name=NAME,
161 version=__version__,
162 description='The Scientific Python Development Environment',
163 long_description=LONG_DESCRIPTION,
164 long_description_content_type='text/markdown',
165 download_url=__website_url__ + "#fh5co-download",
166 author="The Spyder Project Contributors",
167 author_email="[email protected]",
168 url=__website_url__,
169 license='MIT',
170 keywords='PyQt5 editor console widgets IDE science data analysis IPython',
171 platforms=["Windows", "Linux", "Mac OS-X"],
172 packages=get_packages(),
173 package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST)},
174 scripts=[osp.join('scripts', fname) for fname in SCRIPTS],
175 data_files=get_data_files(),
176 classifiers=['License :: OSI Approved :: MIT License',
177 'Operating System :: MacOS',
178 'Operating System :: Microsoft :: Windows',
179 'Operating System :: POSIX :: Linux',
180 'Programming Language :: Python :: 2',
181 'Programming Language :: Python :: 2.7',
182 'Programming Language :: Python :: 3',
183 'Programming Language :: Python :: 3.4',
184 'Programming Language :: Python :: 3.5',
185 'Programming Language :: Python :: 3.6',
186 'Programming Language :: Python :: 3.7',
187 'Development Status :: 5 - Production/Stable',
188 'Intended Audience :: Education',
189 'Intended Audience :: Science/Research',
190 'Intended Audience :: Developers',
191 'Topic :: Scientific/Engineering',
192 'Topic :: Software Development :: Widget Sets'],
193 cmdclass=CMDCLASS)
194
195
196 #==============================================================================
197 # Setuptools deps
198 #==============================================================================
199 if any(arg == 'bdist_wheel' for arg in sys.argv):
200 import setuptools # analysis:ignore
201
202 install_requires = [
203 'applaunchservices>=0.1.7;platform_system=="Darwin"',
204 'atomicwrites>=1.2.0',
205 'chardet>=2.0.0',
206 'cloudpickle>=0.5.0',
207 'diff-match-patch>=20181111',
208 'intervaltree',
209 'ipython>=4.0',
210 # This is here until Jedi 0.15+ fixes completions for
211 # Numpy and Pandas
212 'jedi==0.15.2',
213 # Don't require keyring for Python 2 and Linux
214 # because it depends on system packages
215 'keyring;sys_platform!="linux2"',
216 'nbconvert>=4.0',
217 'numpydoc>=0.6.0',
218 # Required to get SSH connections to remote kernels
219 'paramiko>=2.4.0;platform_system=="Windows"',
220 'parso==0.5.2',
221 'pexpect>=4.4.0',
222 'pickleshare>=0.4',
223 'psutil>=5.3',
224 'pygments>=2.0',
225 'pylint>=0.25',
226 'pyqt5<5.13;python_version>="3"',
227 'pyqtwebengine<5.13;python_version>="3"',
228 'python-language-server[all]>=0.31.2,<0.32.0',
229 'pyxdg>=0.26;platform_system=="Linux"',
230 'pyzmq>=17',
231 'qdarkstyle>=2.8',
232 'qtawesome>=0.5.7',
233 'qtconsole>=4.6.0',
234 'qtpy>=1.5.0',
235 'sphinx>=0.6.6',
236 'spyder-kernels>=1.9.0,<1.10.0',
237 'watchdog',
238 ]
239
240 extras_require = {
241 'test:python_version == "2.7"': ['mock'],
242 'test:platform_system == "Linux"': ['pytest-xvfb'],
243 'test:platform_system == "Windows"': ['pywin32'],
244 'test': [
245 'coverage<5.0',
246 'cython',
247 'flaky',
248 'matplotlib',
249 'mock',
250 'pandas',
251 'pillow',
252 'pytest<5.0',
253 'pytest-cov',
254 'pytest-faulthandler<2.0',
255 'pytest-lazy-fixture',
256 'pytest-mock',
257 'pytest-ordering',
258 'pytest-qt',
259 'pyyaml',
260 'scipy',
261 'sympy',
262 ],
263 }
264
265 if 'setuptools' in sys.modules:
266 setup_args['install_requires'] = install_requires
267 setup_args['extras_require'] = extras_require
268
269 setup_args['entry_points'] = {
270 'gui_scripts': [
271 '{} = spyder.app.start:main'.format(
272 'spyder3' if PY3 else 'spyder')
273 ]
274 }
275
276 setup_args.pop('scripts', None)
277
278
279 #==============================================================================
280 # Main setup
281 #==============================================================================
282 setup(**setup_args)
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -130,20 +130,21 @@
# platforms due to a bug in pip installation process
# See spyder-ide/spyder#1158.
SCRIPTS = ['%s_win_post_install.py' % NAME]
+
if PY3 and sys.platform.startswith('linux'):
SCRIPTS.append('spyder3')
else:
SCRIPTS.append('spyder')
+if os.name == 'nt':
+ SCRIPTS += ['spyder.bat']
#==============================================================================
# Files added to the package
#==============================================================================
EXTLIST = ['.pot', '.po', '.mo', '.svg', '.png', '.css', '.html', '.js',
- '.ini', '.txt', '.qss', '.ttf', '.json', '.rst', '.bloom']
-if os.name == 'nt':
- SCRIPTS += ['spyder.bat']
- EXTLIST += ['.ico']
+ '.ini', '.txt', '.qss', '.ttf', '.json', '.rst', '.bloom',
+ '.ico', '.gif', '.mp3', '.ogg', '.sfd', '.bat', '.sh']
#==============================================================================
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -130,20 +130,21 @@\n # platforms due to a bug in pip installation process\n # See spyder-ide/spyder#1158.\n SCRIPTS = ['%s_win_post_install.py' % NAME]\n+\n if PY3 and sys.platform.startswith('linux'):\n SCRIPTS.append('spyder3')\n else:\n SCRIPTS.append('spyder')\n \n+if os.name == 'nt':\n+ SCRIPTS += ['spyder.bat']\n \n #==============================================================================\n # Files added to the package\n #==============================================================================\n EXTLIST = ['.pot', '.po', '.mo', '.svg', '.png', '.css', '.html', '.js',\n- '.ini', '.txt', '.qss', '.ttf', '.json', '.rst', '.bloom']\n-if os.name == 'nt':\n- SCRIPTS += ['spyder.bat']\n- EXTLIST += ['.ico']\n+ '.ini', '.txt', '.qss', '.ttf', '.json', '.rst', '.bloom',\n+ '.ico', '.gif', '.mp3', '.ogg', '.sfd', '.bat', '.sh']\n \n \n #==============================================================================\n", "issue": "Activation of kernel env fails when using anaconda shortcut (FileNotFound error kernel start 4.1.0)\n<!--- **PLEASE READ:** When submitting here, please ensure you've completed the following checklist and checked the boxes to confirm. Issue reports without it may be closed. Thanks! --->\r\n## Problem Description\r\n\r\nSpyder4.1.0 IPython kernel startup error\r\nClose spyder and restart, the problem persists.\r\n### What steps reproduce the problem?\r\n\r\nAfter upgrading spyder4.0.1 to 4.1.0, IPython kernel startup error occurs when starting spyder.\r\nThe following command is used to upgrade in a virtual environment:\r\nconda install spyder=4.1.0\r\n```python-traceback\r\n\u542f\u52a8 IPython \u5185\u6838\u65f6\u53d1\u751f\u9519\u8bef\uff08An error occurred while starting the IPython kernel\uff09\r\n\u9519\u8bef\uff1a\uff08Error\uff09\r\nTraceback (most recent call last):\r\nFile \"C:\\Users\\Admin\\Anaconda3\\envs\\python37\\lib\\site\u2011packages\\spyder\\plugins\\ipythonconsole\\plugin.py\", line 1209, in create_kernel_manager_and_kernel_client\r\nkernel_manager.start_kernel(stderr=stderr_handle, **kwargs)\r\nFile \"C:\\Users\\Admin\\Anaconda3\\envs\\python37\\lib\\site\u2011packages\\jupyter_client\\manager.py\", line 259, in start_kernel\r\n**kw)\r\nFile \"C:\\Users\\Admin\\Anaconda3\\envs\\python37\\lib\\site\u2011packages\\jupyter_client\\manager.py\", line 204, in _launch_kernel\r\nreturn launch_kernel(kernel_cmd, **kw)\r\nFile \"C:\\Users\\Admin\\Anaconda3\\envs\\python37\\lib\\site\u2011packages\\jupyter_client\\launcher.py\", line 138, in launch_kernel\r\nproc = Popen(cmd, **kwargs)\r\nFile \"C:\\Users\\Admin\\Anaconda3\\envs\\python37\\lib\\subprocess.py\", line 800, in __init__\r\nrestore_signals, start_new_session)\r\nFile \"C:\\Users\\Admin\\Anaconda3\\envs\\python37\\lib\\subprocess.py\", line 1207, in _execute_child\r\nstartupinfo)\r\nFileNotFoundError: [WinError 2] \u7cfb\u7edf\u627e\u4e0d\u5230\u6307\u5b9a\u7684\u6587\u4ef6\uff08The system can not find the file specified\uff09\u3002\r\n```\r\n\r\n## Versions\r\n<!--- You can get this information from Help > About Spyder...\r\nor (if Spyder won't launch) the \"conda list\" command\r\nfrom the Anaconda Prompt/Terminal/command line. --->\r\n\r\n* Spyder version: 4.1.0\r\n* Python version: Python3.7.5\r\n* Qt version: 5.9.6\r\n* PyQt version: 5.9.2\r\n* Operating System name/version: win10\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nSpyder\n======\n\nThe Scientific Python Development Environment\n\nSpyder is a powerful scientific environment written in Python, for Python,\nand designed by and for scientists, engineers and data analysts.\n\nIt features a unique combination of the advanced editing, analysis, debugging\nand profiling functionality of a comprehensive development tool with the data\nexploration, interactive execution, deep inspection and beautiful visualization\ncapabilities of a scientific package.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport io\nimport os\nimport os.path as osp\nimport subprocess\nimport sys\nimport shutil\n\nfrom distutils.core import setup\nfrom distutils.command.install_data import install_data\n\n\n#==============================================================================\n# Check for Python 3\n#==============================================================================\nPY3 = sys.version_info[0] == 3\n\n\n#==============================================================================\n# Minimal Python version sanity check\n# Taken from the notebook setup.py -- Modified BSD License\n#==============================================================================\nv = sys.version_info\nif v[:2] < (2, 7) or (v[0] >= 3 and v[:2] < (3, 5)):\n error = \"ERROR: Spyder requires Python version 2.7 or 3.5 and above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\n#==============================================================================\n# Constants\n#==============================================================================\nNAME = 'spyder'\nLIBNAME = 'spyder'\nfrom spyder import __version__, __website_url__ #analysis:ignore\n\n\n#==============================================================================\n# Auxiliary functions\n#==============================================================================\ndef get_package_data(name, extlist):\n \"\"\"Return data files for package *name* with extensions in *extlist*\"\"\"\n flist = []\n # Workaround to replace os.path.relpath (not available until Python 2.6):\n offset = len(name)+len(os.pathsep)\n for dirpath, _dirnames, filenames in os.walk(name):\n if 'tests' not in dirpath:\n for fname in filenames:\n if (not fname.startswith('.') and\n osp.splitext(fname)[1] in extlist):\n flist.append(osp.join(dirpath, fname)[offset:])\n return flist\n\n\ndef get_subpackages(name):\n \"\"\"Return subpackages of package *name*\"\"\"\n splist = []\n for dirpath, _dirnames, _filenames in os.walk(name):\n if 'tests' not in dirpath:\n if osp.isfile(osp.join(dirpath, '__init__.py')):\n splist.append(\".\".join(dirpath.split(os.sep)))\n return splist\n\n\ndef get_data_files():\n \"\"\"Return data_files in a platform dependent manner\"\"\"\n if sys.platform.startswith('linux'):\n if PY3:\n data_files = [('share/applications', ['scripts/spyder3.desktop']),\n ('share/icons', ['img_src/spyder3.png']),\n ('share/metainfo', ['scripts/spyder3.appdata.xml'])]\n else:\n data_files = [('share/applications', ['scripts/spyder.desktop']),\n ('share/icons', ['img_src/spyder.png'])]\n elif os.name == 'nt':\n data_files = [('scripts', ['img_src/spyder.ico',\n 'img_src/spyder_reset.ico'])]\n else:\n data_files = []\n return data_files\n\n\ndef get_packages():\n \"\"\"Return package list\"\"\"\n packages = get_subpackages(LIBNAME)\n return packages\n\n\n#==============================================================================\n# Make Linux detect Spyder desktop file\n#==============================================================================\nclass MyInstallData(install_data):\n def run(self):\n install_data.run(self)\n if sys.platform.startswith('linux'):\n try:\n subprocess.call(['update-desktop-database'])\n except:\n print(\"ERROR: unable to update desktop database\",\n file=sys.stderr)\nCMDCLASS = {'install_data': MyInstallData}\n\n\n#==============================================================================\n# Main scripts\n#==============================================================================\n# NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows\n# platforms due to a bug in pip installation process\n# See spyder-ide/spyder#1158.\nSCRIPTS = ['%s_win_post_install.py' % NAME]\nif PY3 and sys.platform.startswith('linux'):\n SCRIPTS.append('spyder3')\nelse:\n SCRIPTS.append('spyder')\n\n\n#==============================================================================\n# Files added to the package\n#==============================================================================\nEXTLIST = ['.pot', '.po', '.mo', '.svg', '.png', '.css', '.html', '.js',\n '.ini', '.txt', '.qss', '.ttf', '.json', '.rst', '.bloom']\nif os.name == 'nt':\n SCRIPTS += ['spyder.bat']\n EXTLIST += ['.ico']\n\n\n#==============================================================================\n# Use Readme for long description\n#==============================================================================\nwith io.open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\n\n#==============================================================================\n# Setup arguments\n#==============================================================================\nsetup_args = dict(\n name=NAME,\n version=__version__,\n description='The Scientific Python Development Environment',\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n download_url=__website_url__ + \"#fh5co-download\",\n author=\"The Spyder Project Contributors\",\n author_email=\"[email protected]\",\n url=__website_url__,\n license='MIT',\n keywords='PyQt5 editor console widgets IDE science data analysis IPython',\n platforms=[\"Windows\", \"Linux\", \"Mac OS-X\"],\n packages=get_packages(),\n package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST)},\n scripts=[osp.join('scripts', fname) for fname in SCRIPTS],\n data_files=get_data_files(),\n classifiers=['License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Widget Sets'],\n cmdclass=CMDCLASS)\n\n\n#==============================================================================\n# Setuptools deps\n#==============================================================================\nif any(arg == 'bdist_wheel' for arg in sys.argv):\n import setuptools # analysis:ignore\n\ninstall_requires = [\n 'applaunchservices>=0.1.7;platform_system==\"Darwin\"',\n 'atomicwrites>=1.2.0',\n 'chardet>=2.0.0',\n 'cloudpickle>=0.5.0',\n 'diff-match-patch>=20181111',\n 'intervaltree',\n 'ipython>=4.0',\n # This is here until Jedi 0.15+ fixes completions for\n # Numpy and Pandas\n 'jedi==0.15.2',\n # Don't require keyring for Python 2 and Linux\n # because it depends on system packages\n 'keyring;sys_platform!=\"linux2\"',\n 'nbconvert>=4.0',\n 'numpydoc>=0.6.0',\n # Required to get SSH connections to remote kernels\n 'paramiko>=2.4.0;platform_system==\"Windows\"',\n 'parso==0.5.2',\n 'pexpect>=4.4.0',\n 'pickleshare>=0.4',\n 'psutil>=5.3',\n 'pygments>=2.0',\n 'pylint>=0.25',\n 'pyqt5<5.13;python_version>=\"3\"',\n 'pyqtwebengine<5.13;python_version>=\"3\"',\n 'python-language-server[all]>=0.31.2,<0.32.0',\n 'pyxdg>=0.26;platform_system==\"Linux\"',\n 'pyzmq>=17',\n 'qdarkstyle>=2.8',\n 'qtawesome>=0.5.7',\n 'qtconsole>=4.6.0',\n 'qtpy>=1.5.0',\n 'sphinx>=0.6.6',\n 'spyder-kernels>=1.9.0,<1.10.0',\n 'watchdog',\n]\n\nextras_require = {\n 'test:python_version == \"2.7\"': ['mock'],\n 'test:platform_system == \"Linux\"': ['pytest-xvfb'],\n 'test:platform_system == \"Windows\"': ['pywin32'],\n 'test': [\n 'coverage<5.0',\n 'cython',\n 'flaky',\n 'matplotlib',\n 'mock',\n 'pandas',\n 'pillow',\n 'pytest<5.0',\n 'pytest-cov',\n 'pytest-faulthandler<2.0',\n 'pytest-lazy-fixture',\n 'pytest-mock',\n 'pytest-ordering',\n 'pytest-qt',\n 'pyyaml',\n 'scipy',\n 'sympy',\n ],\n}\n\nif 'setuptools' in sys.modules:\n setup_args['install_requires'] = install_requires\n setup_args['extras_require'] = extras_require\n\n setup_args['entry_points'] = {\n 'gui_scripts': [\n '{} = spyder.app.start:main'.format(\n 'spyder3' if PY3 else 'spyder')\n ]\n }\n\n setup_args.pop('scripts', None)\n\n\n#==============================================================================\n# Main setup\n#==============================================================================\nsetup(**setup_args)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nSpyder\n======\n\nThe Scientific Python Development Environment\n\nSpyder is a powerful scientific environment written in Python, for Python,\nand designed by and for scientists, engineers and data analysts.\n\nIt features a unique combination of the advanced editing, analysis, debugging\nand profiling functionality of a comprehensive development tool with the data\nexploration, interactive execution, deep inspection and beautiful visualization\ncapabilities of a scientific package.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport io\nimport os\nimport os.path as osp\nimport subprocess\nimport sys\nimport shutil\n\nfrom distutils.core import setup\nfrom distutils.command.install_data import install_data\n\n\n#==============================================================================\n# Check for Python 3\n#==============================================================================\nPY3 = sys.version_info[0] == 3\n\n\n#==============================================================================\n# Minimal Python version sanity check\n# Taken from the notebook setup.py -- Modified BSD License\n#==============================================================================\nv = sys.version_info\nif v[:2] < (2, 7) or (v[0] >= 3 and v[:2] < (3, 5)):\n error = \"ERROR: Spyder requires Python version 2.7 or 3.5 and above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\n#==============================================================================\n# Constants\n#==============================================================================\nNAME = 'spyder'\nLIBNAME = 'spyder'\nfrom spyder import __version__, __website_url__ #analysis:ignore\n\n\n#==============================================================================\n# Auxiliary functions\n#==============================================================================\ndef get_package_data(name, extlist):\n \"\"\"Return data files for package *name* with extensions in *extlist*\"\"\"\n flist = []\n # Workaround to replace os.path.relpath (not available until Python 2.6):\n offset = len(name)+len(os.pathsep)\n for dirpath, _dirnames, filenames in os.walk(name):\n if 'tests' not in dirpath:\n for fname in filenames:\n if (not fname.startswith('.') and\n osp.splitext(fname)[1] in extlist):\n flist.append(osp.join(dirpath, fname)[offset:])\n return flist\n\n\ndef get_subpackages(name):\n \"\"\"Return subpackages of package *name*\"\"\"\n splist = []\n for dirpath, _dirnames, _filenames in os.walk(name):\n if 'tests' not in dirpath:\n if osp.isfile(osp.join(dirpath, '__init__.py')):\n splist.append(\".\".join(dirpath.split(os.sep)))\n return splist\n\n\ndef get_data_files():\n \"\"\"Return data_files in a platform dependent manner\"\"\"\n if sys.platform.startswith('linux'):\n if PY3:\n data_files = [('share/applications', ['scripts/spyder3.desktop']),\n ('share/icons', ['img_src/spyder3.png']),\n ('share/metainfo', ['scripts/spyder3.appdata.xml'])]\n else:\n data_files = [('share/applications', ['scripts/spyder.desktop']),\n ('share/icons', ['img_src/spyder.png'])]\n elif os.name == 'nt':\n data_files = [('scripts', ['img_src/spyder.ico',\n 'img_src/spyder_reset.ico'])]\n else:\n data_files = []\n return data_files\n\n\ndef get_packages():\n \"\"\"Return package list\"\"\"\n packages = get_subpackages(LIBNAME)\n return packages\n\n\n#==============================================================================\n# Make Linux detect Spyder desktop file\n#==============================================================================\nclass MyInstallData(install_data):\n def run(self):\n install_data.run(self)\n if sys.platform.startswith('linux'):\n try:\n subprocess.call(['update-desktop-database'])\n except:\n print(\"ERROR: unable to update desktop database\",\n file=sys.stderr)\nCMDCLASS = {'install_data': MyInstallData}\n\n\n#==============================================================================\n# Main scripts\n#==============================================================================\n# NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows\n# platforms due to a bug in pip installation process\n# See spyder-ide/spyder#1158.\nSCRIPTS = ['%s_win_post_install.py' % NAME]\n\nif PY3 and sys.platform.startswith('linux'):\n SCRIPTS.append('spyder3')\nelse:\n SCRIPTS.append('spyder')\n\nif os.name == 'nt':\n SCRIPTS += ['spyder.bat']\n\n#==============================================================================\n# Files added to the package\n#==============================================================================\nEXTLIST = ['.pot', '.po', '.mo', '.svg', '.png', '.css', '.html', '.js',\n '.ini', '.txt', '.qss', '.ttf', '.json', '.rst', '.bloom',\n '.ico', '.gif', '.mp3', '.ogg', '.sfd', '.bat', '.sh']\n\n\n#==============================================================================\n# Use Readme for long description\n#==============================================================================\nwith io.open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\n\n#==============================================================================\n# Setup arguments\n#==============================================================================\nsetup_args = dict(\n name=NAME,\n version=__version__,\n description='The Scientific Python Development Environment',\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n download_url=__website_url__ + \"#fh5co-download\",\n author=\"The Spyder Project Contributors\",\n author_email=\"[email protected]\",\n url=__website_url__,\n license='MIT',\n keywords='PyQt5 editor console widgets IDE science data analysis IPython',\n platforms=[\"Windows\", \"Linux\", \"Mac OS-X\"],\n packages=get_packages(),\n package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST)},\n scripts=[osp.join('scripts', fname) for fname in SCRIPTS],\n data_files=get_data_files(),\n classifiers=['License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Widget Sets'],\n cmdclass=CMDCLASS)\n\n\n#==============================================================================\n# Setuptools deps\n#==============================================================================\nif any(arg == 'bdist_wheel' for arg in sys.argv):\n import setuptools # analysis:ignore\n\ninstall_requires = [\n 'applaunchservices>=0.1.7;platform_system==\"Darwin\"',\n 'atomicwrites>=1.2.0',\n 'chardet>=2.0.0',\n 'cloudpickle>=0.5.0',\n 'diff-match-patch>=20181111',\n 'intervaltree',\n 'ipython>=4.0',\n # This is here until Jedi 0.15+ fixes completions for\n # Numpy and Pandas\n 'jedi==0.15.2',\n # Don't require keyring for Python 2 and Linux\n # because it depends on system packages\n 'keyring;sys_platform!=\"linux2\"',\n 'nbconvert>=4.0',\n 'numpydoc>=0.6.0',\n # Required to get SSH connections to remote kernels\n 'paramiko>=2.4.0;platform_system==\"Windows\"',\n 'parso==0.5.2',\n 'pexpect>=4.4.0',\n 'pickleshare>=0.4',\n 'psutil>=5.3',\n 'pygments>=2.0',\n 'pylint>=0.25',\n 'pyqt5<5.13;python_version>=\"3\"',\n 'pyqtwebengine<5.13;python_version>=\"3\"',\n 'python-language-server[all]>=0.31.2,<0.32.0',\n 'pyxdg>=0.26;platform_system==\"Linux\"',\n 'pyzmq>=17',\n 'qdarkstyle>=2.8',\n 'qtawesome>=0.5.7',\n 'qtconsole>=4.6.0',\n 'qtpy>=1.5.0',\n 'sphinx>=0.6.6',\n 'spyder-kernels>=1.9.0,<1.10.0',\n 'watchdog',\n]\n\nextras_require = {\n 'test:python_version == \"2.7\"': ['mock'],\n 'test:platform_system == \"Linux\"': ['pytest-xvfb'],\n 'test:platform_system == \"Windows\"': ['pywin32'],\n 'test': [\n 'coverage<5.0',\n 'cython',\n 'flaky',\n 'matplotlib',\n 'mock',\n 'pandas',\n 'pillow',\n 'pytest<5.0',\n 'pytest-cov',\n 'pytest-faulthandler<2.0',\n 'pytest-lazy-fixture',\n 'pytest-mock',\n 'pytest-ordering',\n 'pytest-qt',\n 'pyyaml',\n 'scipy',\n 'sympy',\n ],\n}\n\nif 'setuptools' in sys.modules:\n setup_args['install_requires'] = install_requires\n setup_args['extras_require'] = extras_require\n\n setup_args['entry_points'] = {\n 'gui_scripts': [\n '{} = spyder.app.start:main'.format(\n 'spyder3' if PY3 else 'spyder')\n ]\n }\n\n setup_args.pop('scripts', None)\n\n\n#==============================================================================\n# Main setup\n#==============================================================================\nsetup(**setup_args)\n", "path": "setup.py"}]}
| 3,775 | 274 |
gh_patches_debug_36472
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-621
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception when using `quote_ident` in psycopg2
**Describe the bug**:
If you make use of the function `psycopg2.extensions.quote_ident` [docs](http://initd.org/psycopg/docs/extensions.html#psycopg2.extensions.quote_ident), a `TypeError` exception is thrown. This is because the cursor object, when under instrumentation from ES-APM, is an instance of `PGCursorProxy`, not the actual cursor, and `quote_ident` does not allow this because the type is checked in the C code [link](https://github.com/psycopg/psycopg2/blob/2_7_6_1/psycopg/psycopgmodule.c#L181). With the error message saying `TypeError: argument 2 must be a connection or a cursor`. Inspecting the cur object at a debug breakpoint we can see it is the proxy object:
```
>>> cur
<PGCursorProxy at 0x7fd7f70f9a88 for NamedTupleCursor at 0x7fd7f70f0148>
>>> type(cur)
<class 'elasticapm.instrumentation.packages.psycopg2.PGCursorProxy'>
>>> type(cur.__wrapped__)
<class 'psycopg2.extras.NamedTupleCursor'>
```
**To Reproduce**
```python
from psycopg2.extensions import quote_ident
....
....
with psycopg2.connect(DSN) as conn:
with conn.cursor() as curs:
ident = quote_ident("column_name", cur)
curs.execute(f"SELECT {column_name} FROM data.table;")
data = curs.fetchall()
```
passing the underlying wrapped cursor works:
```python
from psycopg2.extensions import quote_ident
....
....
with psycopg2.connect(DSN) as conn:
with conn.cursor() as curs:
ident = quote_ident("column_name", cur.__wrapped__)
curs.execute(f"SELECT {column_name} FROM data.table;")
data = curs.fetchall()
```
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.6.4
- Agent version: 5.2.2
**Additional context**
Looks like the same problem was encountered here https://github.com/DataDog/dd-trace-py/issues/474, and was fixed by also patching quote_ident to pass the `__wrapped__` object. Testing this out with a basic in-module proxy function worked, but obviously a patch right at the top level from the apm module would sort it out.
```python
def quote_ident(string, cursor):
return psycopg2.extensions.quote_ident(string, cursor.__wrapped__)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/register.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31
32 from elasticapm.utils.module_import import import_string
33
34 _cls_register = {
35 "elasticapm.instrumentation.packages.botocore.BotocoreInstrumentation",
36 "elasticapm.instrumentation.packages.jinja2.Jinja2Instrumentation",
37 "elasticapm.instrumentation.packages.psycopg2.Psycopg2Instrumentation",
38 "elasticapm.instrumentation.packages.psycopg2.Psycopg2RegisterTypeInstrumentation",
39 "elasticapm.instrumentation.packages.mysql.MySQLInstrumentation",
40 "elasticapm.instrumentation.packages.pylibmc.PyLibMcInstrumentation",
41 "elasticapm.instrumentation.packages.pymongo.PyMongoInstrumentation",
42 "elasticapm.instrumentation.packages.pymongo.PyMongoBulkInstrumentation",
43 "elasticapm.instrumentation.packages.pymongo.PyMongoCursorInstrumentation",
44 "elasticapm.instrumentation.packages.python_memcached.PythonMemcachedInstrumentation",
45 "elasticapm.instrumentation.packages.redis.RedisInstrumentation",
46 "elasticapm.instrumentation.packages.redis.RedisPipelineInstrumentation",
47 "elasticapm.instrumentation.packages.requests.RequestsInstrumentation",
48 "elasticapm.instrumentation.packages.sqlite.SQLiteInstrumentation",
49 "elasticapm.instrumentation.packages.urllib3.Urllib3Instrumentation",
50 "elasticapm.instrumentation.packages.elasticsearch.ElasticsearchConnectionInstrumentation",
51 "elasticapm.instrumentation.packages.elasticsearch.ElasticsearchInstrumentation",
52 "elasticapm.instrumentation.packages.cassandra.CassandraInstrumentation",
53 "elasticapm.instrumentation.packages.pymssql.PyMSSQLInstrumentation",
54 "elasticapm.instrumentation.packages.pyodbc.PyODBCInstrumentation",
55 "elasticapm.instrumentation.packages.django.template.DjangoTemplateInstrumentation",
56 "elasticapm.instrumentation.packages.django.template.DjangoTemplateSourceInstrumentation",
57 "elasticapm.instrumentation.packages.urllib.UrllibInstrumentation",
58 }
59
60
61 def register(cls):
62 _cls_register.add(cls)
63
64
65 _instrumentation_singletons = {}
66
67
68 def get_instrumentation_objects():
69 for cls_str in _cls_register:
70 if cls_str not in _instrumentation_singletons:
71 cls = import_string(cls_str)
72 _instrumentation_singletons[cls_str] = cls()
73
74 obj = _instrumentation_singletons[cls_str]
75 yield obj
76
```
Path: `elasticapm/instrumentation/packages/psycopg2.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from elasticapm.instrumentation.packages.dbapi2 import (
32 ConnectionProxy,
33 CursorProxy,
34 DbApi2Instrumentation,
35 extract_signature,
36 )
37 from elasticapm.traces import capture_span
38 from elasticapm.utils import default_ports
39
40
41 class PGCursorProxy(CursorProxy):
42 provider_name = "postgresql"
43
44 def _bake_sql(self, sql):
45 # if this is a Composable object, use its `as_string` method
46 # see http://initd.org/psycopg/docs/sql.html
47 if hasattr(sql, "as_string"):
48 return sql.as_string(self.__wrapped__)
49 return sql
50
51 def extract_signature(self, sql):
52 return extract_signature(sql)
53
54 def __enter__(self):
55 return PGCursorProxy(self.__wrapped__.__enter__())
56
57
58 class PGConnectionProxy(ConnectionProxy):
59 cursor_proxy = PGCursorProxy
60
61 def __enter__(self):
62 return PGConnectionProxy(self.__wrapped__.__enter__())
63
64
65 class Psycopg2Instrumentation(DbApi2Instrumentation):
66 name = "psycopg2"
67
68 instrument_list = [("psycopg2", "connect")]
69
70 def call(self, module, method, wrapped, instance, args, kwargs):
71 signature = "psycopg2.connect"
72
73 host = kwargs.get("host")
74 if host:
75 signature += " " + str(host)
76
77 port = kwargs.get("port")
78 if port:
79 port = str(port)
80 if int(port) != default_ports.get("postgresql"):
81 signature += ":" + port
82 else:
83 # Parse connection string and extract host/port
84 pass
85
86 with capture_span(signature, span_type="db", span_subtype="postgresql", span_action="connect"):
87 return PGConnectionProxy(wrapped(*args, **kwargs))
88
89
90 class Psycopg2RegisterTypeInstrumentation(DbApi2Instrumentation):
91 name = "psycopg2-register-type"
92
93 instrument_list = [
94 ("psycopg2.extensions", "register_type"),
95 # specifically instrument `register_json` as it bypasses `register_type`
96 ("psycopg2._json", "register_json"),
97 ]
98
99 def call(self, module, method, wrapped, instance, args, kwargs):
100 if "conn_or_curs" in kwargs and hasattr(kwargs["conn_or_curs"], "__wrapped__"):
101 kwargs["conn_or_curs"] = kwargs["conn_or_curs"].__wrapped__
102 # register_type takes the connection as second argument
103 elif len(args) == 2 and hasattr(args[1], "__wrapped__"):
104 args = (args[0], args[1].__wrapped__)
105 # register_json takes the connection as first argument, and can have
106 # several more arguments
107 elif method == "register_json":
108 if args and hasattr(args[0], "__wrapped__"):
109 args = (args[0].__wrapped__,) + args[1:]
110
111 return wrapped(*args, **kwargs)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/instrumentation/packages/psycopg2.py b/elasticapm/instrumentation/packages/psycopg2.py
--- a/elasticapm/instrumentation/packages/psycopg2.py
+++ b/elasticapm/instrumentation/packages/psycopg2.py
@@ -87,13 +87,21 @@
return PGConnectionProxy(wrapped(*args, **kwargs))
-class Psycopg2RegisterTypeInstrumentation(DbApi2Instrumentation):
- name = "psycopg2-register-type"
+class Psycopg2ExtensionsInstrumentation(DbApi2Instrumentation):
+ """
+ Some extensions do a type check on the Connection/Cursor in C-code, which our
+ proxy fails. For these extensions, we need to ensure that the unwrapped
+ Connection/Cursor is passed.
+ """
+
+ name = "psycopg2"
instrument_list = [
("psycopg2.extensions", "register_type"),
# specifically instrument `register_json` as it bypasses `register_type`
("psycopg2._json", "register_json"),
+ ("psycopg2.extensions", "quote_ident"),
+ ("psycopg2.extensions", "encrypt_password"),
]
def call(self, module, method, wrapped, instance, args, kwargs):
@@ -108,4 +116,11 @@
if args and hasattr(args[0], "__wrapped__"):
args = (args[0].__wrapped__,) + args[1:]
+ elif method == "encrypt_password":
+ # connection/cursor is either 3rd argument, or "scope" keyword argument
+ if len(args) >= 3 and hasattr(args[2], "__wrapped__"):
+ args = args[:2] + (args[2].__wrapped__,) + args[3:]
+ elif "scope" in kwargs and hasattr(kwargs["scope"], "__wrapped__"):
+ kwargs["scope"] = kwargs["scope"].__wrapped__
+
return wrapped(*args, **kwargs)
diff --git a/elasticapm/instrumentation/register.py b/elasticapm/instrumentation/register.py
--- a/elasticapm/instrumentation/register.py
+++ b/elasticapm/instrumentation/register.py
@@ -35,7 +35,7 @@
"elasticapm.instrumentation.packages.botocore.BotocoreInstrumentation",
"elasticapm.instrumentation.packages.jinja2.Jinja2Instrumentation",
"elasticapm.instrumentation.packages.psycopg2.Psycopg2Instrumentation",
- "elasticapm.instrumentation.packages.psycopg2.Psycopg2RegisterTypeInstrumentation",
+ "elasticapm.instrumentation.packages.psycopg2.Psycopg2ExtensionsInstrumentation",
"elasticapm.instrumentation.packages.mysql.MySQLInstrumentation",
"elasticapm.instrumentation.packages.pylibmc.PyLibMcInstrumentation",
"elasticapm.instrumentation.packages.pymongo.PyMongoInstrumentation",
|
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/psycopg2.py b/elasticapm/instrumentation/packages/psycopg2.py\n--- a/elasticapm/instrumentation/packages/psycopg2.py\n+++ b/elasticapm/instrumentation/packages/psycopg2.py\n@@ -87,13 +87,21 @@\n return PGConnectionProxy(wrapped(*args, **kwargs))\n \n \n-class Psycopg2RegisterTypeInstrumentation(DbApi2Instrumentation):\n- name = \"psycopg2-register-type\"\n+class Psycopg2ExtensionsInstrumentation(DbApi2Instrumentation):\n+ \"\"\"\n+ Some extensions do a type check on the Connection/Cursor in C-code, which our\n+ proxy fails. For these extensions, we need to ensure that the unwrapped\n+ Connection/Cursor is passed.\n+ \"\"\"\n+\n+ name = \"psycopg2\"\n \n instrument_list = [\n (\"psycopg2.extensions\", \"register_type\"),\n # specifically instrument `register_json` as it bypasses `register_type`\n (\"psycopg2._json\", \"register_json\"),\n+ (\"psycopg2.extensions\", \"quote_ident\"),\n+ (\"psycopg2.extensions\", \"encrypt_password\"),\n ]\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n@@ -108,4 +116,11 @@\n if args and hasattr(args[0], \"__wrapped__\"):\n args = (args[0].__wrapped__,) + args[1:]\n \n+ elif method == \"encrypt_password\":\n+ # connection/cursor is either 3rd argument, or \"scope\" keyword argument\n+ if len(args) >= 3 and hasattr(args[2], \"__wrapped__\"):\n+ args = args[:2] + (args[2].__wrapped__,) + args[3:]\n+ elif \"scope\" in kwargs and hasattr(kwargs[\"scope\"], \"__wrapped__\"):\n+ kwargs[\"scope\"] = kwargs[\"scope\"].__wrapped__\n+\n return wrapped(*args, **kwargs)\ndiff --git a/elasticapm/instrumentation/register.py b/elasticapm/instrumentation/register.py\n--- a/elasticapm/instrumentation/register.py\n+++ b/elasticapm/instrumentation/register.py\n@@ -35,7 +35,7 @@\n \"elasticapm.instrumentation.packages.botocore.BotocoreInstrumentation\",\n \"elasticapm.instrumentation.packages.jinja2.Jinja2Instrumentation\",\n \"elasticapm.instrumentation.packages.psycopg2.Psycopg2Instrumentation\",\n- \"elasticapm.instrumentation.packages.psycopg2.Psycopg2RegisterTypeInstrumentation\",\n+ \"elasticapm.instrumentation.packages.psycopg2.Psycopg2ExtensionsInstrumentation\",\n \"elasticapm.instrumentation.packages.mysql.MySQLInstrumentation\",\n \"elasticapm.instrumentation.packages.pylibmc.PyLibMcInstrumentation\",\n \"elasticapm.instrumentation.packages.pymongo.PyMongoInstrumentation\",\n", "issue": "Exception when using `quote_ident` in psycopg2\n**Describe the bug**:\r\nIf you make use of the function `psycopg2.extensions.quote_ident` [docs](http://initd.org/psycopg/docs/extensions.html#psycopg2.extensions.quote_ident), a `TypeError` exception is thrown. This is because the cursor object, when under instrumentation from ES-APM, is an instance of `PGCursorProxy`, not the actual cursor, and `quote_ident` does not allow this because the type is checked in the C code [link](https://github.com/psycopg/psycopg2/blob/2_7_6_1/psycopg/psycopgmodule.c#L181). With the error message saying `TypeError: argument 2 must be a connection or a cursor`. Inspecting the cur object at a debug breakpoint we can see it is the proxy object:\r\n\r\n```\r\n>>> cur\r\n<PGCursorProxy at 0x7fd7f70f9a88 for NamedTupleCursor at 0x7fd7f70f0148>\r\n>>> type(cur)\r\n<class 'elasticapm.instrumentation.packages.psycopg2.PGCursorProxy'>\r\n>>> type(cur.__wrapped__)\r\n<class 'psycopg2.extras.NamedTupleCursor'>\r\n```\r\n\r\n**To Reproduce**\r\n\r\n```python\r\nfrom psycopg2.extensions import quote_ident\r\n....\r\n....\r\nwith psycopg2.connect(DSN) as conn:\r\n with conn.cursor() as curs:\r\n ident = quote_ident(\"column_name\", cur)\r\n curs.execute(f\"SELECT {column_name} FROM data.table;\")\r\n data = curs.fetchall()\r\n```\r\npassing the underlying wrapped cursor works:\r\n```python\r\nfrom psycopg2.extensions import quote_ident\r\n....\r\n....\r\nwith psycopg2.connect(DSN) as conn:\r\n with conn.cursor() as curs:\r\n ident = quote_ident(\"column_name\", cur.__wrapped__)\r\n curs.execute(f\"SELECT {column_name} FROM data.table;\")\r\n data = curs.fetchall()\r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.6.4\r\n- Agent version: 5.2.2\r\n\r\n\r\n**Additional context**\r\nLooks like the same problem was encountered here https://github.com/DataDog/dd-trace-py/issues/474, and was fixed by also patching quote_ident to pass the `__wrapped__` object. Testing this out with a basic in-module proxy function worked, but obviously a patch right at the top level from the apm module would sort it out.\r\n\r\n```python\r\ndef quote_ident(string, cursor):\r\n return psycopg2.extensions.quote_ident(string, cursor.__wrapped__)\r\n```\r\n\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom elasticapm.utils.module_import import import_string\n\n_cls_register = {\n \"elasticapm.instrumentation.packages.botocore.BotocoreInstrumentation\",\n \"elasticapm.instrumentation.packages.jinja2.Jinja2Instrumentation\",\n \"elasticapm.instrumentation.packages.psycopg2.Psycopg2Instrumentation\",\n \"elasticapm.instrumentation.packages.psycopg2.Psycopg2RegisterTypeInstrumentation\",\n \"elasticapm.instrumentation.packages.mysql.MySQLInstrumentation\",\n \"elasticapm.instrumentation.packages.pylibmc.PyLibMcInstrumentation\",\n \"elasticapm.instrumentation.packages.pymongo.PyMongoInstrumentation\",\n \"elasticapm.instrumentation.packages.pymongo.PyMongoBulkInstrumentation\",\n \"elasticapm.instrumentation.packages.pymongo.PyMongoCursorInstrumentation\",\n \"elasticapm.instrumentation.packages.python_memcached.PythonMemcachedInstrumentation\",\n \"elasticapm.instrumentation.packages.redis.RedisInstrumentation\",\n \"elasticapm.instrumentation.packages.redis.RedisPipelineInstrumentation\",\n \"elasticapm.instrumentation.packages.requests.RequestsInstrumentation\",\n \"elasticapm.instrumentation.packages.sqlite.SQLiteInstrumentation\",\n \"elasticapm.instrumentation.packages.urllib3.Urllib3Instrumentation\",\n \"elasticapm.instrumentation.packages.elasticsearch.ElasticsearchConnectionInstrumentation\",\n \"elasticapm.instrumentation.packages.elasticsearch.ElasticsearchInstrumentation\",\n \"elasticapm.instrumentation.packages.cassandra.CassandraInstrumentation\",\n \"elasticapm.instrumentation.packages.pymssql.PyMSSQLInstrumentation\",\n \"elasticapm.instrumentation.packages.pyodbc.PyODBCInstrumentation\",\n \"elasticapm.instrumentation.packages.django.template.DjangoTemplateInstrumentation\",\n \"elasticapm.instrumentation.packages.django.template.DjangoTemplateSourceInstrumentation\",\n \"elasticapm.instrumentation.packages.urllib.UrllibInstrumentation\",\n}\n\n\ndef register(cls):\n _cls_register.add(cls)\n\n\n_instrumentation_singletons = {}\n\n\ndef get_instrumentation_objects():\n for cls_str in _cls_register:\n if cls_str not in _instrumentation_singletons:\n cls = import_string(cls_str)\n _instrumentation_singletons[cls_str] = cls()\n\n obj = _instrumentation_singletons[cls_str]\n yield obj\n", "path": "elasticapm/instrumentation/register.py"}, {"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.instrumentation.packages.dbapi2 import (\n ConnectionProxy,\n CursorProxy,\n DbApi2Instrumentation,\n extract_signature,\n)\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils import default_ports\n\n\nclass PGCursorProxy(CursorProxy):\n provider_name = \"postgresql\"\n\n def _bake_sql(self, sql):\n # if this is a Composable object, use its `as_string` method\n # see http://initd.org/psycopg/docs/sql.html\n if hasattr(sql, \"as_string\"):\n return sql.as_string(self.__wrapped__)\n return sql\n\n def extract_signature(self, sql):\n return extract_signature(sql)\n\n def __enter__(self):\n return PGCursorProxy(self.__wrapped__.__enter__())\n\n\nclass PGConnectionProxy(ConnectionProxy):\n cursor_proxy = PGCursorProxy\n\n def __enter__(self):\n return PGConnectionProxy(self.__wrapped__.__enter__())\n\n\nclass Psycopg2Instrumentation(DbApi2Instrumentation):\n name = \"psycopg2\"\n\n instrument_list = [(\"psycopg2\", \"connect\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n signature = \"psycopg2.connect\"\n\n host = kwargs.get(\"host\")\n if host:\n signature += \" \" + str(host)\n\n port = kwargs.get(\"port\")\n if port:\n port = str(port)\n if int(port) != default_ports.get(\"postgresql\"):\n signature += \":\" + port\n else:\n # Parse connection string and extract host/port\n pass\n\n with capture_span(signature, span_type=\"db\", span_subtype=\"postgresql\", span_action=\"connect\"):\n return PGConnectionProxy(wrapped(*args, **kwargs))\n\n\nclass Psycopg2RegisterTypeInstrumentation(DbApi2Instrumentation):\n name = \"psycopg2-register-type\"\n\n instrument_list = [\n (\"psycopg2.extensions\", \"register_type\"),\n # specifically instrument `register_json` as it bypasses `register_type`\n (\"psycopg2._json\", \"register_json\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"conn_or_curs\" in kwargs and hasattr(kwargs[\"conn_or_curs\"], \"__wrapped__\"):\n kwargs[\"conn_or_curs\"] = kwargs[\"conn_or_curs\"].__wrapped__\n # register_type takes the connection as second argument\n elif len(args) == 2 and hasattr(args[1], \"__wrapped__\"):\n args = (args[0], args[1].__wrapped__)\n # register_json takes the connection as first argument, and can have\n # several more arguments\n elif method == \"register_json\":\n if args and hasattr(args[0], \"__wrapped__\"):\n args = (args[0].__wrapped__,) + args[1:]\n\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/psycopg2.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom elasticapm.utils.module_import import import_string\n\n_cls_register = {\n \"elasticapm.instrumentation.packages.botocore.BotocoreInstrumentation\",\n \"elasticapm.instrumentation.packages.jinja2.Jinja2Instrumentation\",\n \"elasticapm.instrumentation.packages.psycopg2.Psycopg2Instrumentation\",\n \"elasticapm.instrumentation.packages.psycopg2.Psycopg2ExtensionsInstrumentation\",\n \"elasticapm.instrumentation.packages.mysql.MySQLInstrumentation\",\n \"elasticapm.instrumentation.packages.pylibmc.PyLibMcInstrumentation\",\n \"elasticapm.instrumentation.packages.pymongo.PyMongoInstrumentation\",\n \"elasticapm.instrumentation.packages.pymongo.PyMongoBulkInstrumentation\",\n \"elasticapm.instrumentation.packages.pymongo.PyMongoCursorInstrumentation\",\n \"elasticapm.instrumentation.packages.python_memcached.PythonMemcachedInstrumentation\",\n \"elasticapm.instrumentation.packages.redis.RedisInstrumentation\",\n \"elasticapm.instrumentation.packages.redis.RedisPipelineInstrumentation\",\n \"elasticapm.instrumentation.packages.requests.RequestsInstrumentation\",\n \"elasticapm.instrumentation.packages.sqlite.SQLiteInstrumentation\",\n \"elasticapm.instrumentation.packages.urllib3.Urllib3Instrumentation\",\n \"elasticapm.instrumentation.packages.elasticsearch.ElasticsearchConnectionInstrumentation\",\n \"elasticapm.instrumentation.packages.elasticsearch.ElasticsearchInstrumentation\",\n \"elasticapm.instrumentation.packages.cassandra.CassandraInstrumentation\",\n \"elasticapm.instrumentation.packages.pymssql.PyMSSQLInstrumentation\",\n \"elasticapm.instrumentation.packages.pyodbc.PyODBCInstrumentation\",\n \"elasticapm.instrumentation.packages.django.template.DjangoTemplateInstrumentation\",\n \"elasticapm.instrumentation.packages.django.template.DjangoTemplateSourceInstrumentation\",\n \"elasticapm.instrumentation.packages.urllib.UrllibInstrumentation\",\n}\n\n\ndef register(cls):\n _cls_register.add(cls)\n\n\n_instrumentation_singletons = {}\n\n\ndef get_instrumentation_objects():\n for cls_str in _cls_register:\n if cls_str not in _instrumentation_singletons:\n cls = import_string(cls_str)\n _instrumentation_singletons[cls_str] = cls()\n\n obj = _instrumentation_singletons[cls_str]\n yield obj\n", "path": "elasticapm/instrumentation/register.py"}, {"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.instrumentation.packages.dbapi2 import (\n ConnectionProxy,\n CursorProxy,\n DbApi2Instrumentation,\n extract_signature,\n)\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils import default_ports\n\n\nclass PGCursorProxy(CursorProxy):\n provider_name = \"postgresql\"\n\n def _bake_sql(self, sql):\n # if this is a Composable object, use its `as_string` method\n # see http://initd.org/psycopg/docs/sql.html\n if hasattr(sql, \"as_string\"):\n return sql.as_string(self.__wrapped__)\n return sql\n\n def extract_signature(self, sql):\n return extract_signature(sql)\n\n def __enter__(self):\n return PGCursorProxy(self.__wrapped__.__enter__())\n\n\nclass PGConnectionProxy(ConnectionProxy):\n cursor_proxy = PGCursorProxy\n\n def __enter__(self):\n return PGConnectionProxy(self.__wrapped__.__enter__())\n\n\nclass Psycopg2Instrumentation(DbApi2Instrumentation):\n name = \"psycopg2\"\n\n instrument_list = [(\"psycopg2\", \"connect\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n signature = \"psycopg2.connect\"\n\n host = kwargs.get(\"host\")\n if host:\n signature += \" \" + str(host)\n\n port = kwargs.get(\"port\")\n if port:\n port = str(port)\n if int(port) != default_ports.get(\"postgresql\"):\n signature += \":\" + port\n else:\n # Parse connection string and extract host/port\n pass\n\n with capture_span(signature, span_type=\"db\", span_subtype=\"postgresql\", span_action=\"connect\"):\n return PGConnectionProxy(wrapped(*args, **kwargs))\n\n\nclass Psycopg2ExtensionsInstrumentation(DbApi2Instrumentation):\n \"\"\"\n Some extensions do a type check on the Connection/Cursor in C-code, which our\n proxy fails. For these extensions, we need to ensure that the unwrapped\n Connection/Cursor is passed.\n \"\"\"\n\n name = \"psycopg2\"\n\n instrument_list = [\n (\"psycopg2.extensions\", \"register_type\"),\n # specifically instrument `register_json` as it bypasses `register_type`\n (\"psycopg2._json\", \"register_json\"),\n (\"psycopg2.extensions\", \"quote_ident\"),\n (\"psycopg2.extensions\", \"encrypt_password\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"conn_or_curs\" in kwargs and hasattr(kwargs[\"conn_or_curs\"], \"__wrapped__\"):\n kwargs[\"conn_or_curs\"] = kwargs[\"conn_or_curs\"].__wrapped__\n # register_type takes the connection as second argument\n elif len(args) == 2 and hasattr(args[1], \"__wrapped__\"):\n args = (args[0], args[1].__wrapped__)\n # register_json takes the connection as first argument, and can have\n # several more arguments\n elif method == \"register_json\":\n if args and hasattr(args[0], \"__wrapped__\"):\n args = (args[0].__wrapped__,) + args[1:]\n\n elif method == \"encrypt_password\":\n # connection/cursor is either 3rd argument, or \"scope\" keyword argument\n if len(args) >= 3 and hasattr(args[2], \"__wrapped__\"):\n args = args[:2] + (args[2].__wrapped__,) + args[3:]\n elif \"scope\" in kwargs and hasattr(kwargs[\"scope\"], \"__wrapped__\"):\n kwargs[\"scope\"] = kwargs[\"scope\"].__wrapped__\n\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/psycopg2.py"}]}
| 3,037 | 651 |
gh_patches_debug_34730
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-8435
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
text_align attribute in NumberFormatter not doing anything
When making a `datatable`, I want to right align numerical values in the table, but when I set the `text_align` attribute in `NumberFormatter`, the values continue to remain left aligned. Here is my enviornment:
* Python 2.7.12 :: Anaconda 4.1.1 (64-bit)
* numpy==1.11.1
* pandas==0.18.1
* bokeh==0.12.4
* Windows 7, Chrome
And here is a code snippet:
```
import pandas as pd
import numpy as np
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import DataTable, NumberFormatter, TableColumn
from bokeh.plotting import show
df = []
for ii in range(1, 11):
df.append({'x': ii, 'y': 1000 * np.random.rand()})
df = pd.DataFrame(df)
source = ColumnDataSource(data=df)
columns = [
TableColumn(field='x', title='Col 1'),
TableColumn(field='y', title='Col 2',
formatter=NumberFormatter(format='$0,0.00',
text_align='right')),
]
dt = DataTable(source=source, columns=columns, width=500, height=200, row_headers=False)
show(dt)
```
Here is the output I am getting in my Jupyter Notebook:

I would expect that the dollar amounts in `Col 2` would be right aligned, but they aren't.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/integration/widgets/data_table_customization.py`
Content:
```
1 from bokeh.io import save
2 from bokeh.models import ColumnDataSource
3 from bokeh.models.widgets import DataTable, TableColumn, HTMLTemplateFormatter
4
5 from bokeh.sampledata.periodic_table import elements
6
7 elements['name_lower'] = elements['name'].str.lower()
8 source = ColumnDataSource(elements)
9
10 html_font_template = '<font color="<%= CPK %>"><%= value %></font>'
11 html_image_template = """
12 <a href="http://images-of-elements.com/<%= value %>.php" target="_blank">
13 <img src="http://images-of-elements.com/<%= value %>.jpg" style="width:40px;height:40px;border:0">
14 </a>
15 """
16 columns = [
17 TableColumn(field='atomic number', title='Atomic Number'),
18 TableColumn(field='symbol', title='Symbol'),
19 TableColumn(field='name', title='Name',
20 formatter=HTMLTemplateFormatter(template=html_font_template)),
21 TableColumn(field='name_lower', title='Image',
22 formatter=HTMLTemplateFormatter(template=html_image_template))
23 ]
24 data_table = DataTable(source=source, columns=columns, editable=False, row_height=45)
25
26 save(data_table)
27
```
Path: `examples/app/dash/main.py`
Content:
```
1 from collections import Counter
2 from math import pi
3
4 import numpy as np
5 import pandas as pd
6
7 from bokeh.io import curdoc
8 from bokeh.layouts import column
9 from bokeh.models import ColumnDataSource, DataTable, RangeTool, TableColumn
10 from bokeh.palettes import Spectral11
11 from bokeh.plotting import figure
12 from bokeh.transform import cumsum
13 from bokeh.sampledata.autompg2 import autompg2 as mpg
14 from bokeh.sampledata.stocks import AAPL
15
16 # Timeseries
17
18 dates = np.array(AAPL['date'], dtype=np.datetime64)
19 source = ColumnDataSource(data=dict(date=dates, close=AAPL['adj_close']))
20
21 p = figure(plot_height=110, tools="", toolbar_location=None, #name="line",
22 x_axis_type="datetime", x_range=(dates[1500], dates[2500]), sizing_mode="scale_width")
23
24 p.line('date', 'close', source=source, line_width=2, alpha=0.7)
25 p.yaxis.axis_label = 'Traffic'
26 p.background_fill_color="#f5f5f5"
27 p.grid.grid_line_color="white"
28
29 select = figure(plot_height=50, plot_width=800, y_range=p.y_range,
30 x_axis_type="datetime", y_axis_type=None,
31 tools="", toolbar_location=None, sizing_mode="scale_width")
32
33 range_rool = RangeTool(x_range=p.x_range)
34 range_rool.overlay.fill_color = "navy"
35 range_rool.overlay.fill_alpha = 0.2
36
37 select.line('date', 'close', source=source)
38 select.ygrid.grid_line_color = None
39 select.add_tools(range_rool)
40 select.toolbar.active_multi = range_rool
41 select.background_fill_color="#f5f5f5"
42 select.grid.grid_line_color="white"
43 select.x_range.range_padding = 0.01
44
45 layout = column(p, select, sizing_mode="scale_width", name="line")
46
47 curdoc().add_root(layout)
48
49 # Donut chart
50
51 x = Counter({ 'United States': 157, 'United Kingdom': 93, 'Japan': 89, 'China': 63,
52 'Germany': 44, 'India': 42, 'Italy': 40, 'Australia': 35, 'Brazil': 32,
53 'France': 31, 'Taiwan': 31 })
54
55 data = pd.DataFrame.from_dict(dict(x), orient='index').reset_index().rename(index=str, columns={0:'value', 'index':'country'})
56 data['angle'] = data['value']/sum(x.values()) * 2*pi
57 data['color'] = Spectral11
58
59 region = figure(plot_height=350, toolbar_location=None, outline_line_color=None, sizing_mode="scale_both", name="region", x_range=(-0.4, 1))
60
61 region.annular_wedge(x=-0, y=1, inner_radius=0.2, outer_radius=0.32,
62 start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
63 line_color="white", fill_color='color', legend='country', source=data)
64
65 region.axis.axis_label=None
66 region.axis.visible=False
67 region.grid.grid_line_color = None
68 region.legend.label_text_font_size = "0.7em"
69 region.legend.spacing = 1
70 region.legend.glyph_height = 15
71 region.legend.label_height = 15
72
73 curdoc().add_root(region)
74
75 # Bar chart
76
77 plats = ("IOS", "Android", "OSX", "Windows", "Other")
78 values = (35, 22, 13, 26, 4)
79 platform = figure(plot_height=350, toolbar_location=None, outline_line_color=None, sizing_mode="scale_both", name="platform",
80 y_range=list(reversed(plats)), x_axis_location="above")
81 platform.x_range.start = 0
82 platform.ygrid.grid_line_color = None
83 platform.axis.minor_tick_line_color = None
84 platform.outline_line_color = None
85
86 platform.hbar(left=0, right=values, y=plats, height=0.8)
87
88 curdoc().add_root(platform)
89
90 # Table
91
92 source = ColumnDataSource(data=mpg[:6])
93 columns = [
94 TableColumn(field="cyl", title="Counts"),
95 TableColumn(field="cty", title="Uniques"),
96 TableColumn(field="hwy", title="Rating"),
97 ]
98 table = DataTable(source=source, columns=columns, height=210, width=330, name="table", sizing_mode="scale_both")
99
100 curdoc().add_root(table)
101
102 # Setup
103
104 curdoc().title = "Bokeh Dashboard"
105 curdoc().template_variables['stats_names'] = ['users', 'new_users', 'time', 'sessions', 'sales']
106 curdoc().template_variables['stats'] = {
107 'users' : {'icon': 'user', 'value': 11200, 'change': 4 , 'label': 'Total Users'},
108 'new_users' : {'icon': 'user', 'value': 350, 'change': 1.2 , 'label': 'New Users'},
109 'time' : {'icon': 'clock-o', 'value': 5.6, 'change': -2.3 , 'label': 'Total Time'},
110 'sessions' : {'icon': 'user', 'value': 27300, 'change': 0.5 , 'label': 'Total Sessions'},
111 'sales' : {'icon': 'dollar-sign', 'value': 8700, 'change': -0.2 , 'label': 'Average Sales'},
112 }
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/app/dash/main.py b/examples/app/dash/main.py
--- a/examples/app/dash/main.py
+++ b/examples/app/dash/main.py
@@ -6,7 +6,8 @@
from bokeh.io import curdoc
from bokeh.layouts import column
-from bokeh.models import ColumnDataSource, DataTable, RangeTool, TableColumn
+from bokeh.models import ColumnDataSource, DataTable, RangeTool, TableColumn, \
+ NumberFormatter, StringFormatter
from bokeh.palettes import Spectral11
from bokeh.plotting import figure
from bokeh.transform import cumsum
@@ -92,8 +93,10 @@
source = ColumnDataSource(data=mpg[:6])
columns = [
TableColumn(field="cyl", title="Counts"),
- TableColumn(field="cty", title="Uniques"),
- TableColumn(field="hwy", title="Rating"),
+ TableColumn(field="cty", title="Uniques",
+ formatter=StringFormatter(text_align="center")),
+ TableColumn(field="hwy", title="Rating",
+ formatter=NumberFormatter(text_align="right")),
]
table = DataTable(source=source, columns=columns, height=210, width=330, name="table", sizing_mode="scale_both")
diff --git a/examples/integration/widgets/data_table_customization.py b/examples/integration/widgets/data_table_customization.py
--- a/examples/integration/widgets/data_table_customization.py
+++ b/examples/integration/widgets/data_table_customization.py
@@ -1,5 +1,5 @@
from bokeh.io import save
-from bokeh.models import ColumnDataSource
+from bokeh.models import ColumnDataSource, NumberFormatter, StringFormatter
from bokeh.models.widgets import DataTable, TableColumn, HTMLTemplateFormatter
from bokeh.sampledata.periodic_table import elements
@@ -14,8 +14,10 @@
</a>
"""
columns = [
- TableColumn(field='atomic number', title='Atomic Number'),
- TableColumn(field='symbol', title='Symbol'),
+ TableColumn(field='atomic number', title='Atomic Number',
+ formatter=NumberFormatter(text_align="right")),
+ TableColumn(field='symbol', title='Symbol',
+ formatter=StringFormatter(text_align="center")),
TableColumn(field='name', title='Name',
formatter=HTMLTemplateFormatter(template=html_font_template)),
TableColumn(field='name_lower', title='Image',
|
{"golden_diff": "diff --git a/examples/app/dash/main.py b/examples/app/dash/main.py\n--- a/examples/app/dash/main.py\n+++ b/examples/app/dash/main.py\n@@ -6,7 +6,8 @@\n \n from bokeh.io import curdoc\n from bokeh.layouts import column\n-from bokeh.models import ColumnDataSource, DataTable, RangeTool, TableColumn\n+from bokeh.models import ColumnDataSource, DataTable, RangeTool, TableColumn, \\\n+ NumberFormatter, StringFormatter\n from bokeh.palettes import Spectral11\n from bokeh.plotting import figure\n from bokeh.transform import cumsum\n@@ -92,8 +93,10 @@\n source = ColumnDataSource(data=mpg[:6])\n columns = [\n TableColumn(field=\"cyl\", title=\"Counts\"),\n- TableColumn(field=\"cty\", title=\"Uniques\"),\n- TableColumn(field=\"hwy\", title=\"Rating\"),\n+ TableColumn(field=\"cty\", title=\"Uniques\",\n+ formatter=StringFormatter(text_align=\"center\")),\n+ TableColumn(field=\"hwy\", title=\"Rating\",\n+ formatter=NumberFormatter(text_align=\"right\")),\n ]\n table = DataTable(source=source, columns=columns, height=210, width=330, name=\"table\", sizing_mode=\"scale_both\")\n \ndiff --git a/examples/integration/widgets/data_table_customization.py b/examples/integration/widgets/data_table_customization.py\n--- a/examples/integration/widgets/data_table_customization.py\n+++ b/examples/integration/widgets/data_table_customization.py\n@@ -1,5 +1,5 @@\n from bokeh.io import save\n-from bokeh.models import ColumnDataSource\n+from bokeh.models import ColumnDataSource, NumberFormatter, StringFormatter\n from bokeh.models.widgets import DataTable, TableColumn, HTMLTemplateFormatter\n \n from bokeh.sampledata.periodic_table import elements\n@@ -14,8 +14,10 @@\n </a>\n \"\"\"\n columns = [\n- TableColumn(field='atomic number', title='Atomic Number'),\n- TableColumn(field='symbol', title='Symbol'),\n+ TableColumn(field='atomic number', title='Atomic Number',\n+ formatter=NumberFormatter(text_align=\"right\")),\n+ TableColumn(field='symbol', title='Symbol',\n+ formatter=StringFormatter(text_align=\"center\")),\n TableColumn(field='name', title='Name',\n formatter=HTMLTemplateFormatter(template=html_font_template)),\n TableColumn(field='name_lower', title='Image',\n", "issue": "text_align attribute in NumberFormatter not doing anything\nWhen making a `datatable`, I want to right align numerical values in the table, but when I set the `text_align` attribute in `NumberFormatter`, the values continue to remain left aligned. Here is my enviornment:\r\n* Python 2.7.12 :: Anaconda 4.1.1 (64-bit)\r\n* numpy==1.11.1\r\n* pandas==0.18.1\r\n* bokeh==0.12.4\r\n* Windows 7, Chrome\r\n\r\nAnd here is a code snippet:\r\n```\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom bokeh.models import ColumnDataSource\r\nfrom bokeh.models.widgets import DataTable, NumberFormatter, TableColumn\r\nfrom bokeh.plotting import show\r\n\r\ndf = []\r\nfor ii in range(1, 11):\r\n df.append({'x': ii, 'y': 1000 * np.random.rand()})\r\ndf = pd.DataFrame(df)\r\n\r\nsource = ColumnDataSource(data=df)\r\n\r\ncolumns = [\r\n TableColumn(field='x', title='Col 1'),\r\n TableColumn(field='y', title='Col 2',\r\n formatter=NumberFormatter(format='$0,0.00',\r\n text_align='right')),\r\n]\r\n\r\ndt = DataTable(source=source, columns=columns, width=500, height=200, row_headers=False)\r\n\r\nshow(dt)\r\n```\r\n\r\nHere is the output I am getting in my Jupyter Notebook:\r\n\r\n\r\nI would expect that the dollar amounts in `Col 2` would be right aligned, but they aren't.\n", "before_files": [{"content": "from bokeh.io import save\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import DataTable, TableColumn, HTMLTemplateFormatter\n\nfrom bokeh.sampledata.periodic_table import elements\n\nelements['name_lower'] = elements['name'].str.lower()\nsource = ColumnDataSource(elements)\n\nhtml_font_template = '<font color=\"<%= CPK %>\"><%= value %></font>'\nhtml_image_template = \"\"\"\n<a href=\"http://images-of-elements.com/<%= value %>.php\" target=\"_blank\">\n <img src=\"http://images-of-elements.com/<%= value %>.jpg\" style=\"width:40px;height:40px;border:0\">\n</a>\n\"\"\"\ncolumns = [\n TableColumn(field='atomic number', title='Atomic Number'),\n TableColumn(field='symbol', title='Symbol'),\n TableColumn(field='name', title='Name',\n formatter=HTMLTemplateFormatter(template=html_font_template)),\n TableColumn(field='name_lower', title='Image',\n formatter=HTMLTemplateFormatter(template=html_image_template))\n]\ndata_table = DataTable(source=source, columns=columns, editable=False, row_height=45)\n\nsave(data_table)\n", "path": "examples/integration/widgets/data_table_customization.py"}, {"content": "from collections import Counter\nfrom math import pi\n\nimport numpy as np\nimport pandas as pd\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, DataTable, RangeTool, TableColumn\nfrom bokeh.palettes import Spectral11\nfrom bokeh.plotting import figure\nfrom bokeh.transform import cumsum\nfrom bokeh.sampledata.autompg2 import autompg2 as mpg\nfrom bokeh.sampledata.stocks import AAPL\n\n# Timeseries\n\ndates = np.array(AAPL['date'], dtype=np.datetime64)\nsource = ColumnDataSource(data=dict(date=dates, close=AAPL['adj_close']))\n\np = figure(plot_height=110, tools=\"\", toolbar_location=None, #name=\"line\",\n x_axis_type=\"datetime\", x_range=(dates[1500], dates[2500]), sizing_mode=\"scale_width\")\n\np.line('date', 'close', source=source, line_width=2, alpha=0.7)\np.yaxis.axis_label = 'Traffic'\np.background_fill_color=\"#f5f5f5\"\np.grid.grid_line_color=\"white\"\n\nselect = figure(plot_height=50, plot_width=800, y_range=p.y_range,\n x_axis_type=\"datetime\", y_axis_type=None,\n tools=\"\", toolbar_location=None, sizing_mode=\"scale_width\")\n\nrange_rool = RangeTool(x_range=p.x_range)\nrange_rool.overlay.fill_color = \"navy\"\nrange_rool.overlay.fill_alpha = 0.2\n\nselect.line('date', 'close', source=source)\nselect.ygrid.grid_line_color = None\nselect.add_tools(range_rool)\nselect.toolbar.active_multi = range_rool\nselect.background_fill_color=\"#f5f5f5\"\nselect.grid.grid_line_color=\"white\"\nselect.x_range.range_padding = 0.01\n\nlayout = column(p, select, sizing_mode=\"scale_width\", name=\"line\")\n\ncurdoc().add_root(layout)\n\n# Donut chart\n\nx = Counter({ 'United States': 157, 'United Kingdom': 93, 'Japan': 89, 'China': 63,\n 'Germany': 44, 'India': 42, 'Italy': 40, 'Australia': 35, 'Brazil': 32,\n 'France': 31, 'Taiwan': 31 })\n\ndata = pd.DataFrame.from_dict(dict(x), orient='index').reset_index().rename(index=str, columns={0:'value', 'index':'country'})\ndata['angle'] = data['value']/sum(x.values()) * 2*pi\ndata['color'] = Spectral11\n\nregion = figure(plot_height=350, toolbar_location=None, outline_line_color=None, sizing_mode=\"scale_both\", name=\"region\", x_range=(-0.4, 1))\n\nregion.annular_wedge(x=-0, y=1, inner_radius=0.2, outer_radius=0.32,\n start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),\n line_color=\"white\", fill_color='color', legend='country', source=data)\n\nregion.axis.axis_label=None\nregion.axis.visible=False\nregion.grid.grid_line_color = None\nregion.legend.label_text_font_size = \"0.7em\"\nregion.legend.spacing = 1\nregion.legend.glyph_height = 15\nregion.legend.label_height = 15\n\ncurdoc().add_root(region)\n\n# Bar chart\n\nplats = (\"IOS\", \"Android\", \"OSX\", \"Windows\", \"Other\")\nvalues = (35, 22, 13, 26, 4)\nplatform = figure(plot_height=350, toolbar_location=None, outline_line_color=None, sizing_mode=\"scale_both\", name=\"platform\",\n y_range=list(reversed(plats)), x_axis_location=\"above\")\nplatform.x_range.start = 0\nplatform.ygrid.grid_line_color = None\nplatform.axis.minor_tick_line_color = None\nplatform.outline_line_color = None\n\nplatform.hbar(left=0, right=values, y=plats, height=0.8)\n\ncurdoc().add_root(platform)\n\n# Table\n\nsource = ColumnDataSource(data=mpg[:6])\ncolumns = [\n TableColumn(field=\"cyl\", title=\"Counts\"),\n TableColumn(field=\"cty\", title=\"Uniques\"),\n TableColumn(field=\"hwy\", title=\"Rating\"),\n]\ntable = DataTable(source=source, columns=columns, height=210, width=330, name=\"table\", sizing_mode=\"scale_both\")\n\ncurdoc().add_root(table)\n\n# Setup\n\ncurdoc().title = \"Bokeh Dashboard\"\ncurdoc().template_variables['stats_names'] = ['users', 'new_users', 'time', 'sessions', 'sales']\ncurdoc().template_variables['stats'] = {\n 'users' : {'icon': 'user', 'value': 11200, 'change': 4 , 'label': 'Total Users'},\n 'new_users' : {'icon': 'user', 'value': 350, 'change': 1.2 , 'label': 'New Users'},\n 'time' : {'icon': 'clock-o', 'value': 5.6, 'change': -2.3 , 'label': 'Total Time'},\n 'sessions' : {'icon': 'user', 'value': 27300, 'change': 0.5 , 'label': 'Total Sessions'},\n 'sales' : {'icon': 'dollar-sign', 'value': 8700, 'change': -0.2 , 'label': 'Average Sales'},\n}\n", "path": "examples/app/dash/main.py"}], "after_files": [{"content": "from bokeh.io import save\nfrom bokeh.models import ColumnDataSource, NumberFormatter, StringFormatter\nfrom bokeh.models.widgets import DataTable, TableColumn, HTMLTemplateFormatter\n\nfrom bokeh.sampledata.periodic_table import elements\n\nelements['name_lower'] = elements['name'].str.lower()\nsource = ColumnDataSource(elements)\n\nhtml_font_template = '<font color=\"<%= CPK %>\"><%= value %></font>'\nhtml_image_template = \"\"\"\n<a href=\"http://images-of-elements.com/<%= value %>.php\" target=\"_blank\">\n <img src=\"http://images-of-elements.com/<%= value %>.jpg\" style=\"width:40px;height:40px;border:0\">\n</a>\n\"\"\"\ncolumns = [\n TableColumn(field='atomic number', title='Atomic Number',\n formatter=NumberFormatter(text_align=\"right\")),\n TableColumn(field='symbol', title='Symbol',\n formatter=StringFormatter(text_align=\"center\")),\n TableColumn(field='name', title='Name',\n formatter=HTMLTemplateFormatter(template=html_font_template)),\n TableColumn(field='name_lower', title='Image',\n formatter=HTMLTemplateFormatter(template=html_image_template))\n]\ndata_table = DataTable(source=source, columns=columns, editable=False, row_height=45)\n\nsave(data_table)\n", "path": "examples/integration/widgets/data_table_customization.py"}, {"content": "from collections import Counter\nfrom math import pi\n\nimport numpy as np\nimport pandas as pd\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, DataTable, RangeTool, TableColumn, \\\n NumberFormatter, StringFormatter\nfrom bokeh.palettes import Spectral11\nfrom bokeh.plotting import figure\nfrom bokeh.transform import cumsum\nfrom bokeh.sampledata.autompg2 import autompg2 as mpg\nfrom bokeh.sampledata.stocks import AAPL\n\n# Timeseries\n\ndates = np.array(AAPL['date'], dtype=np.datetime64)\nsource = ColumnDataSource(data=dict(date=dates, close=AAPL['adj_close']))\n\np = figure(plot_height=110, tools=\"\", toolbar_location=None, #name=\"line\",\n x_axis_type=\"datetime\", x_range=(dates[1500], dates[2500]), sizing_mode=\"scale_width\")\n\np.line('date', 'close', source=source, line_width=2, alpha=0.7)\np.yaxis.axis_label = 'Traffic'\np.background_fill_color=\"#f5f5f5\"\np.grid.grid_line_color=\"white\"\n\nselect = figure(plot_height=50, plot_width=800, y_range=p.y_range,\n x_axis_type=\"datetime\", y_axis_type=None,\n tools=\"\", toolbar_location=None, sizing_mode=\"scale_width\")\n\nrange_rool = RangeTool(x_range=p.x_range)\nrange_rool.overlay.fill_color = \"navy\"\nrange_rool.overlay.fill_alpha = 0.2\n\nselect.line('date', 'close', source=source)\nselect.ygrid.grid_line_color = None\nselect.add_tools(range_rool)\nselect.toolbar.active_multi = range_rool\nselect.background_fill_color=\"#f5f5f5\"\nselect.grid.grid_line_color=\"white\"\nselect.x_range.range_padding = 0.01\n\nlayout = column(p, select, sizing_mode=\"scale_width\", name=\"line\")\n\ncurdoc().add_root(layout)\n\n# Donut chart\n\nx = Counter({ 'United States': 157, 'United Kingdom': 93, 'Japan': 89, 'China': 63,\n 'Germany': 44, 'India': 42, 'Italy': 40, 'Australia': 35, 'Brazil': 32,\n 'France': 31, 'Taiwan': 31 })\n\ndata = pd.DataFrame.from_dict(dict(x), orient='index').reset_index().rename(index=str, columns={0:'value', 'index':'country'})\ndata['angle'] = data['value']/sum(x.values()) * 2*pi\ndata['color'] = Spectral11\n\nregion = figure(plot_height=350, toolbar_location=None, outline_line_color=None, sizing_mode=\"scale_both\", name=\"region\", x_range=(-0.4, 1))\n\nregion.annular_wedge(x=-0, y=1, inner_radius=0.2, outer_radius=0.32,\n start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),\n line_color=\"white\", fill_color='color', legend='country', source=data)\n\nregion.axis.axis_label=None\nregion.axis.visible=False\nregion.grid.grid_line_color = None\nregion.legend.label_text_font_size = \"0.7em\"\nregion.legend.spacing = 1\nregion.legend.glyph_height = 15\nregion.legend.label_height = 15\n\ncurdoc().add_root(region)\n\n# Bar chart\n\nplats = (\"IOS\", \"Android\", \"OSX\", \"Windows\", \"Other\")\nvalues = (35, 22, 13, 26, 4)\nplatform = figure(plot_height=350, toolbar_location=None, outline_line_color=None, sizing_mode=\"scale_both\", name=\"platform\",\n y_range=list(reversed(plats)), x_axis_location=\"above\")\nplatform.x_range.start = 0\nplatform.ygrid.grid_line_color = None\nplatform.axis.minor_tick_line_color = None\nplatform.outline_line_color = None\n\nplatform.hbar(left=0, right=values, y=plats, height=0.8)\n\ncurdoc().add_root(platform)\n\n# Table\n\nsource = ColumnDataSource(data=mpg[:6])\ncolumns = [\n TableColumn(field=\"cyl\", title=\"Counts\"),\n TableColumn(field=\"cty\", title=\"Uniques\",\n formatter=StringFormatter(text_align=\"center\")),\n TableColumn(field=\"hwy\", title=\"Rating\",\n formatter=NumberFormatter(text_align=\"right\")),\n]\ntable = DataTable(source=source, columns=columns, height=210, width=330, name=\"table\", sizing_mode=\"scale_both\")\n\ncurdoc().add_root(table)\n\n# Setup\n\ncurdoc().title = \"Bokeh Dashboard\"\ncurdoc().template_variables['stats_names'] = ['users', 'new_users', 'time', 'sessions', 'sales']\ncurdoc().template_variables['stats'] = {\n 'users' : {'icon': 'user', 'value': 11200, 'change': 4 , 'label': 'Total Users'},\n 'new_users' : {'icon': 'user', 'value': 350, 'change': 1.2 , 'label': 'New Users'},\n 'time' : {'icon': 'clock-o', 'value': 5.6, 'change': -2.3 , 'label': 'Total Time'},\n 'sessions' : {'icon': 'user', 'value': 27300, 'change': 0.5 , 'label': 'Total Sessions'},\n 'sales' : {'icon': 'dollar-sign', 'value': 8700, 'change': -0.2 , 'label': 'Average Sales'},\n}\n", "path": "examples/app/dash/main.py"}]}
| 2,451 | 510 |
gh_patches_debug_6702
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1256
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WARNING - thermal_production: PALENQUE is missing from the DO plant mapping
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/DO.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import arrow
4 from bs4 import BeautifulSoup
5 from collections import defaultdict
6 from math import isnan
7 import numpy as np
8 from operator import itemgetter
9 import pandas as pd
10 import requests
11
12 try:
13 unicode # Python 2
14 except NameError:
15 unicode = str # Python 3
16
17 try:
18 xrange # Python 2
19 except NameError:
20 xrange = range # Python 3
21
22
23 # This parser gets hourly electricity generation data from oc.org.do for the Dominican Republic.
24 # The data is in MWh but since it is updated hourly we can view it as MW.
25 # Solar generation has no data available currently but multiple projects are planned/under construction.
26
27 url = 'http://184.168.74.190:81/ReportesGraficos/ReportePostdespacho.aspx'
28
29 total_mapping = {
30 u'Total T\xe9rmico': 'Thermal',
31 u'Total E\xf3lico': 'Wind',
32 u'Total Hidroel\xe9ctrica': 'Hydro',
33 'Total Generado': 'Generated'
34 }
35
36 # Power plant types
37 # http://www.sie.gob.do/images/Estadisticas/MEM/GeneracionDiariaEnero2017/
38 # Reporte_diario_de_generacion_31_enero_2017_merged2.pdf
39
40 thermal_plants = {
41 u'AES ANDRES': 'gas',
42 u'BARAHONA CARBON': 'coal',
43 u'BERSAL': 'unknown',
44 u'CEPP 1': 'oil',
45 u'CEPP 2': 'oil',
46 u'CESPM 1': 'oil',
47 u'CESPM 2': 'oil',
48 u'CESPM 3': 'oil',
49 u'ESTRELLA DEL MAR 2 CFO': 'oil',
50 u'ESTRELLA DEL MAR 2 CGN': 'gas',
51 u'ESTRELLA DEL MAR 2 SFO': 'oil',
52 u'ESTRELLA DEL MAR 2 SGN': 'gas',
53 u'HAINA TG': 'oil',
54 u'INCA KM22': 'oil',
55 u'ITABO 1': 'coal',
56 u'ITABO 2': 'coal',
57 u'LA VEGA': 'oil',
58 u'LOS MINA 5': 'gas',
59 u'LOS MINA 6': 'gas',
60 u'LOS MINA 7': 'gas',
61 u'LOS OR\xcdGENES POWER PLANT FUEL OIL': 'oil',
62 u'LOS OR\xcdGENES POWER PLANT GAS NATURAL': 'gas',
63 u'METALDOM': 'oil',
64 u'MONTE PLATA SOLAR': 'solar',
65 u'MONTE RIO': 'oil',
66 u'PALAMARA': 'oil',
67 u'PARQUE ENERGETICO LOS MINA CC PARCIAL': 'gas',
68 u'PARQUE ENERGETICO LOS MINA CC TOTAL': 'gas',
69 u'PIMENTEL 1': 'oil',
70 u'PIMENTEL 2': 'oil',
71 u'PIMENTEL 3': 'oil',
72 u'QUISQUEYA 1': 'gas',
73 u'QUISQUEYA 2': 'gas',
74 u'RIO SAN JUAN': 'oil',
75 u'SAN FELIPE': 'oil',
76 u'SAN FELIPE CC': 'gas',
77 u'SAN FELIPE VAP': 'oil',
78 u'SAN LORENZO 1': 'gas',
79 u'SAN PEDRO BIO-ENERGY': 'biomass',
80 u'SAN PEDRO VAPOR': 'oil',
81 u'SULTANA DEL ESTE': 'oil'
82 }
83
84
85 def get_data(session=None):
86 """
87 Makes a request to source url.
88 Finds main table and creates a list of all table elements in unicode string format.
89 Returns a list.
90 """
91
92 data = []
93 s = session or requests.Session()
94 data_req = s.get(url)
95 soup = BeautifulSoup(data_req.content, 'lxml')
96
97 tbs = soup.find("table", id="PostdespachoUnidadesTermicasGrid_DXMainTable")
98 rows = tbs.find_all("td")
99
100 for row in rows:
101 num = row.getText().strip()
102 data.append(unicode(num))
103
104 return data
105
106
107 def floater(item):
108 """
109 Attempts to convert any item given to a float. Returns item if it fails.
110 """
111
112 try:
113 return float(item)
114 except ValueError:
115 return item
116
117
118 def chunker(big_lst):
119 """
120 Breaks a big list into a list of lists. Removes any list with no data then turns remaining
121 lists into key: value pairs with first element from the list being the key.
122 Returns a dictionary.
123 """
124
125 chunks = [big_lst[x:x + 27] for x in xrange(0, len(big_lst), 27)]
126
127 # Remove the list if it contains no data.
128 for chunk in chunks:
129 if any(chunk):
130 continue
131 else:
132 chunks.remove(chunk)
133
134 chunked_list = {words[0]: words[1:] for words in chunks}
135
136 return chunked_list
137
138
139 def data_formatter(data):
140 """
141 Takes data and finds relevant sections. Formats and breaks data into usable parts.
142 Returns a nested dictionary.
143 """
144
145 find_thermal_index = data.index(u'GRUPO: T\xe9rmica')
146 find_totals_index = data.index(u'Total T\xe9rmico')
147 find_totals_end = data.index(u'Total Programado')
148
149 ufthermal = data[find_thermal_index + 3:find_totals_index - 59]
150 total_data = data[find_totals_index:find_totals_end]
151
152 # Remove all company names.
153 for val in ufthermal:
154 if ':' in val:
155 i = ufthermal.index(val)
156 del ufthermal[i:i + 3]
157
158 formatted_thermal = chunker([floater(item) for item in ufthermal])
159 mapped_totals = [total_mapping.get(x, x) for x in total_data]
160 formatted_totals = chunker([floater(item) for item in mapped_totals])
161
162 return {'totals': formatted_totals, 'thermal': formatted_thermal}
163
164
165 def data_parser(formatted_data):
166 """
167 Converts formatted data into a pandas dataframe. Removes any empty rows.
168 Returns a DataFrame.
169 """
170
171 hours = list(range(1, 24)) + [0] + [25, 26]
172 dft = pd.DataFrame(formatted_data, index=hours)
173
174 dft = dft.drop(dft.index[[-1, -2]])
175 dft = dft.replace(u'', np.nan)
176 dft = dft.dropna(how='all')
177
178 return dft
179
180
181 def thermal_production(df, logger):
182 """
183 Takes DataFrame and finds thermal generation for each hour.
184 Removes any non generating plants then maps plants to type.
185 Sums type instances and returns a dictionary.
186 """
187
188 therms = []
189 unmapped = set()
190 for hour in df.index.values:
191 dt = hour
192 currentt = df.loc[[hour]]
193
194 # Create current plant output.
195 tp = {}
196 for item in list(df):
197 v = currentt.iloc[0][item]
198 tp[item] = v
199
200 current_plants = {k: tp[k] for k in tp if not isnan(tp[k])}
201
202 for plant in current_plants.keys():
203 if plant not in thermal_plants.keys():
204 unmapped.add(plant)
205
206 mapped_plants = [(thermal_plants.get(plant, 'unknown'), val) for plant, val in current_plants.items()]
207
208 thermalDict = defaultdict(lambda: 0.0)
209
210 # Sum values for duplicate keys.
211 for key, val in mapped_plants:
212 thermalDict[key] += val
213
214 thermalDict['datetime'] = dt
215 thermalDict = dict(thermalDict)
216 therms.append(thermalDict)
217
218 for plant in unmapped:
219 logger.warning(
220 '{} is missing from the DO plant mapping!'.format(plant),
221 extra={'key': 'DO'})
222
223 return therms
224
225
226 def total_production(df):
227 """
228 Takes DataFrame and finds generation totals for each hour.
229 Returns a dictionary.
230 """
231
232 vals = []
233 # The Dominican Republic does not observe daylight savings time.
234 for hour in df.index.values:
235 dt = hour
236 current = df.loc[[hour]]
237 hydro = current.iloc[0]['Hydro']
238 wind = current.iloc[0]['Wind']
239 if wind > -10:
240 wind = max(wind, 0)
241
242 # Wind and hydro totals do not always update exactly on the new hour.
243 # In this case we set them to None because they are unknown rather than zero.
244 if isnan(wind):
245 wind = None
246 if isnan(hydro):
247 hydro = None
248
249 prod = {'wind': wind, 'hydro': hydro, 'datetime': dt}
250 vals.append(prod)
251
252 return vals
253
254
255 def merge_production(thermal, total):
256 """
257 Takes thermal generation and total generation and merges them using 'datetime' key.
258 Returns a defaultdict.
259 """
260
261 d = defaultdict(dict)
262 for each in (thermal, total):
263 for elem in each:
264 d[elem['datetime']].update(elem)
265
266 final = sorted(d.values(), key=itemgetter("datetime"))
267
268 def get_datetime(hour):
269 at = arrow.now('America/Dominica').floor('day')
270 dt = (at.shift(hours=int(hour) - 1)).datetime
271 return dt
272
273 for item in final:
274 i = item['datetime']
275 j = get_datetime(i)
276 item['datetime'] = j
277
278 return final
279
280
281 def fetch_production(zone_key='DO', session=None, target_datetime=None, logger=None):
282 """
283 Requests the last known production mix (in MW) of a given country
284 Arguments:
285 zone_key (optional) -- used in case a parser is able to fetch multiple countries
286 Return:
287 A dictionary in the form:
288 {
289 'zoneKey': 'FR',
290 'datetime': '2017-01-01T00:00:00Z',
291 'production': {
292 'biomass': 0.0,
293 'coal': 0.0,
294 'gas': 0.0,
295 'hydro': 0.0,
296 'nuclear': null,
297 'oil': 0.0,
298 'solar': 0.0,
299 'wind': 0.0,
300 'geothermal': 0.0,
301 'unknown': 0.0
302 },
303 'storage': {
304 'hydro': -10.0,
305 },
306 'source': 'mysource.com'
307 }
308 """
309 if target_datetime:
310 raise NotImplementedError('This parser is not yet able to parse past dates')
311
312 dat = data_formatter(get_data(session=None))
313 tot = data_parser(dat['totals'])
314 th = data_parser(dat['thermal'])
315 thermal = thermal_production(th, logger)
316 total = total_production(tot)
317 merge = merge_production(thermal, total)
318
319 production_mix_by_hour = []
320 for hour in merge:
321 production_mix = {
322 'zoneKey': zone_key,
323 'datetime': hour['datetime'],
324 'production': {
325 'biomass': hour.get('biomass', 0.0),
326 'coal': hour.get('coal', 0.0),
327 'gas': hour.get('gas', 0.0),
328 'hydro': hour.get('hydro', 0.0),
329 'nuclear': 0.0,
330 'oil': hour.get('oil', 0.0),
331 'solar': None,
332 'wind': hour.get('wind', 0.0),
333 'geothermal': 0.0,
334 'unknown': hour.get('unknown', 0.0)
335 },
336 'storage': {
337 'hydro': None,
338 },
339 'source': 'oc.org.do'
340 }
341 production_mix_by_hour.append(production_mix)
342
343 return production_mix_by_hour
344
345
346 if __name__ == '__main__':
347 """Main method, never used by the Electricity Map backend, but handy for testing."""
348
349 print('fetch_production() ->')
350 print(fetch_production())
351
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/DO.py b/parsers/DO.py
--- a/parsers/DO.py
+++ b/parsers/DO.py
@@ -64,6 +64,7 @@
u'MONTE PLATA SOLAR': 'solar',
u'MONTE RIO': 'oil',
u'PALAMARA': 'oil',
+ u'PALENQUE': 'oil',
u'PARQUE ENERGETICO LOS MINA CC PARCIAL': 'gas',
u'PARQUE ENERGETICO LOS MINA CC TOTAL': 'gas',
u'PIMENTEL 1': 'oil',
|
{"golden_diff": "diff --git a/parsers/DO.py b/parsers/DO.py\n--- a/parsers/DO.py\n+++ b/parsers/DO.py\n@@ -64,6 +64,7 @@\n u'MONTE PLATA SOLAR': 'solar',\n u'MONTE RIO': 'oil',\n u'PALAMARA': 'oil',\n+ u'PALENQUE': 'oil',\n u'PARQUE ENERGETICO LOS MINA CC PARCIAL': 'gas',\n u'PARQUE ENERGETICO LOS MINA CC TOTAL': 'gas',\n u'PIMENTEL 1': 'oil',\n", "issue": "WARNING - thermal_production: PALENQUE is missing from the DO plant mapping\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict\nfrom math import isnan\nimport numpy as np\nfrom operator import itemgetter\nimport pandas as pd\nimport requests\n\ntry:\n unicode # Python 2\nexcept NameError:\n unicode = str # Python 3\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\n# This parser gets hourly electricity generation data from oc.org.do for the Dominican Republic.\n# The data is in MWh but since it is updated hourly we can view it as MW.\n# Solar generation has no data available currently but multiple projects are planned/under construction.\n\nurl = 'http://184.168.74.190:81/ReportesGraficos/ReportePostdespacho.aspx'\n\ntotal_mapping = {\n u'Total T\\xe9rmico': 'Thermal',\n u'Total E\\xf3lico': 'Wind',\n u'Total Hidroel\\xe9ctrica': 'Hydro',\n 'Total Generado': 'Generated'\n }\n\n# Power plant types\n# http://www.sie.gob.do/images/Estadisticas/MEM/GeneracionDiariaEnero2017/\n# Reporte_diario_de_generacion_31_enero_2017_merged2.pdf\n\nthermal_plants = {\n u'AES ANDRES': 'gas',\n u'BARAHONA CARBON': 'coal',\n u'BERSAL': 'unknown',\n u'CEPP 1': 'oil',\n u'CEPP 2': 'oil',\n u'CESPM 1': 'oil',\n u'CESPM 2': 'oil',\n u'CESPM 3': 'oil',\n u'ESTRELLA DEL MAR 2 CFO': 'oil',\n u'ESTRELLA DEL MAR 2 CGN': 'gas',\n u'ESTRELLA DEL MAR 2 SFO': 'oil',\n u'ESTRELLA DEL MAR 2 SGN': 'gas',\n u'HAINA TG': 'oil',\n u'INCA KM22': 'oil',\n u'ITABO 1': 'coal',\n u'ITABO 2': 'coal',\n u'LA VEGA': 'oil',\n u'LOS MINA 5': 'gas',\n u'LOS MINA 6': 'gas',\n u'LOS MINA 7': 'gas',\n u'LOS OR\\xcdGENES POWER PLANT FUEL OIL': 'oil',\n u'LOS OR\\xcdGENES POWER PLANT GAS NATURAL': 'gas',\n u'METALDOM': 'oil',\n u'MONTE PLATA SOLAR': 'solar',\n u'MONTE RIO': 'oil',\n u'PALAMARA': 'oil',\n u'PARQUE ENERGETICO LOS MINA CC PARCIAL': 'gas',\n u'PARQUE ENERGETICO LOS MINA CC TOTAL': 'gas',\n u'PIMENTEL 1': 'oil',\n u'PIMENTEL 2': 'oil',\n u'PIMENTEL 3': 'oil',\n u'QUISQUEYA 1': 'gas',\n u'QUISQUEYA 2': 'gas',\n u'RIO SAN JUAN': 'oil',\n u'SAN FELIPE': 'oil',\n u'SAN FELIPE CC': 'gas',\n u'SAN FELIPE VAP': 'oil',\n u'SAN LORENZO 1': 'gas',\n u'SAN PEDRO BIO-ENERGY': 'biomass',\n u'SAN PEDRO VAPOR': 'oil',\n u'SULTANA DEL ESTE': 'oil'\n }\n\n\ndef get_data(session=None):\n \"\"\"\n Makes a request to source url.\n Finds main table and creates a list of all table elements in unicode string format.\n Returns a list.\n \"\"\"\n\n data = []\n s = session or requests.Session()\n data_req = s.get(url)\n soup = BeautifulSoup(data_req.content, 'lxml')\n\n tbs = soup.find(\"table\", id=\"PostdespachoUnidadesTermicasGrid_DXMainTable\")\n rows = tbs.find_all(\"td\")\n\n for row in rows:\n num = row.getText().strip()\n data.append(unicode(num))\n\n return data\n\n\ndef floater(item):\n \"\"\"\n Attempts to convert any item given to a float. Returns item if it fails.\n \"\"\"\n\n try:\n return float(item)\n except ValueError:\n return item\n\n\ndef chunker(big_lst):\n \"\"\"\n Breaks a big list into a list of lists. Removes any list with no data then turns remaining\n lists into key: value pairs with first element from the list being the key.\n Returns a dictionary.\n \"\"\"\n\n chunks = [big_lst[x:x + 27] for x in xrange(0, len(big_lst), 27)]\n\n # Remove the list if it contains no data.\n for chunk in chunks:\n if any(chunk):\n continue\n else:\n chunks.remove(chunk)\n\n chunked_list = {words[0]: words[1:] for words in chunks}\n\n return chunked_list\n\n\ndef data_formatter(data):\n \"\"\"\n Takes data and finds relevant sections. Formats and breaks data into usable parts.\n Returns a nested dictionary.\n \"\"\"\n\n find_thermal_index = data.index(u'GRUPO: T\\xe9rmica')\n find_totals_index = data.index(u'Total T\\xe9rmico')\n find_totals_end = data.index(u'Total Programado')\n\n ufthermal = data[find_thermal_index + 3:find_totals_index - 59]\n total_data = data[find_totals_index:find_totals_end]\n\n # Remove all company names.\n for val in ufthermal:\n if ':' in val:\n i = ufthermal.index(val)\n del ufthermal[i:i + 3]\n\n formatted_thermal = chunker([floater(item) for item in ufthermal])\n mapped_totals = [total_mapping.get(x, x) for x in total_data]\n formatted_totals = chunker([floater(item) for item in mapped_totals])\n\n return {'totals': formatted_totals, 'thermal': formatted_thermal}\n\n\ndef data_parser(formatted_data):\n \"\"\"\n Converts formatted data into a pandas dataframe. Removes any empty rows.\n Returns a DataFrame.\n \"\"\"\n\n hours = list(range(1, 24)) + [0] + [25, 26]\n dft = pd.DataFrame(formatted_data, index=hours)\n\n dft = dft.drop(dft.index[[-1, -2]])\n dft = dft.replace(u'', np.nan)\n dft = dft.dropna(how='all')\n\n return dft\n\n\ndef thermal_production(df, logger):\n \"\"\"\n Takes DataFrame and finds thermal generation for each hour.\n Removes any non generating plants then maps plants to type.\n Sums type instances and returns a dictionary.\n \"\"\"\n\n therms = []\n unmapped = set()\n for hour in df.index.values:\n dt = hour\n currentt = df.loc[[hour]]\n\n # Create current plant output.\n tp = {}\n for item in list(df):\n v = currentt.iloc[0][item]\n tp[item] = v\n\n current_plants = {k: tp[k] for k in tp if not isnan(tp[k])}\n\n for plant in current_plants.keys():\n if plant not in thermal_plants.keys():\n unmapped.add(plant)\n\n mapped_plants = [(thermal_plants.get(plant, 'unknown'), val) for plant, val in current_plants.items()]\n\n thermalDict = defaultdict(lambda: 0.0)\n\n # Sum values for duplicate keys.\n for key, val in mapped_plants:\n thermalDict[key] += val\n\n thermalDict['datetime'] = dt\n thermalDict = dict(thermalDict)\n therms.append(thermalDict)\n\n for plant in unmapped:\n logger.warning(\n '{} is missing from the DO plant mapping!'.format(plant),\n extra={'key': 'DO'})\n\n return therms\n\n\ndef total_production(df):\n \"\"\"\n Takes DataFrame and finds generation totals for each hour.\n Returns a dictionary.\n \"\"\"\n\n vals = []\n # The Dominican Republic does not observe daylight savings time.\n for hour in df.index.values:\n dt = hour\n current = df.loc[[hour]]\n hydro = current.iloc[0]['Hydro']\n wind = current.iloc[0]['Wind']\n if wind > -10:\n wind = max(wind, 0)\n\n # Wind and hydro totals do not always update exactly on the new hour.\n # In this case we set them to None because they are unknown rather than zero.\n if isnan(wind):\n wind = None\n if isnan(hydro):\n hydro = None\n\n prod = {'wind': wind, 'hydro': hydro, 'datetime': dt}\n vals.append(prod)\n\n return vals\n\n\ndef merge_production(thermal, total):\n \"\"\"\n Takes thermal generation and total generation and merges them using 'datetime' key.\n Returns a defaultdict.\n \"\"\"\n\n d = defaultdict(dict)\n for each in (thermal, total):\n for elem in each:\n d[elem['datetime']].update(elem)\n\n final = sorted(d.values(), key=itemgetter(\"datetime\"))\n\n def get_datetime(hour):\n at = arrow.now('America/Dominica').floor('day')\n dt = (at.shift(hours=int(hour) - 1)).datetime\n return dt\n\n for item in final:\n i = item['datetime']\n j = get_datetime(i)\n item['datetime'] = j\n\n return final\n\n\ndef fetch_production(zone_key='DO', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n dat = data_formatter(get_data(session=None))\n tot = data_parser(dat['totals'])\n th = data_parser(dat['thermal'])\n thermal = thermal_production(th, logger)\n total = total_production(tot)\n merge = merge_production(thermal, total)\n\n production_mix_by_hour = []\n for hour in merge:\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': hour['datetime'],\n 'production': {\n 'biomass': hour.get('biomass', 0.0),\n 'coal': hour.get('coal', 0.0),\n 'gas': hour.get('gas', 0.0),\n 'hydro': hour.get('hydro', 0.0),\n 'nuclear': 0.0,\n 'oil': hour.get('oil', 0.0),\n 'solar': None,\n 'wind': hour.get('wind', 0.0),\n 'geothermal': 0.0,\n 'unknown': hour.get('unknown', 0.0)\n },\n 'storage': {\n 'hydro': None,\n },\n 'source': 'oc.org.do'\n }\n production_mix_by_hour.append(production_mix)\n\n return production_mix_by_hour\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/DO.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict\nfrom math import isnan\nimport numpy as np\nfrom operator import itemgetter\nimport pandas as pd\nimport requests\n\ntry:\n unicode # Python 2\nexcept NameError:\n unicode = str # Python 3\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\n# This parser gets hourly electricity generation data from oc.org.do for the Dominican Republic.\n# The data is in MWh but since it is updated hourly we can view it as MW.\n# Solar generation has no data available currently but multiple projects are planned/under construction.\n\nurl = 'http://184.168.74.190:81/ReportesGraficos/ReportePostdespacho.aspx'\n\ntotal_mapping = {\n u'Total T\\xe9rmico': 'Thermal',\n u'Total E\\xf3lico': 'Wind',\n u'Total Hidroel\\xe9ctrica': 'Hydro',\n 'Total Generado': 'Generated'\n }\n\n# Power plant types\n# http://www.sie.gob.do/images/Estadisticas/MEM/GeneracionDiariaEnero2017/\n# Reporte_diario_de_generacion_31_enero_2017_merged2.pdf\n\nthermal_plants = {\n u'AES ANDRES': 'gas',\n u'BARAHONA CARBON': 'coal',\n u'BERSAL': 'unknown',\n u'CEPP 1': 'oil',\n u'CEPP 2': 'oil',\n u'CESPM 1': 'oil',\n u'CESPM 2': 'oil',\n u'CESPM 3': 'oil',\n u'ESTRELLA DEL MAR 2 CFO': 'oil',\n u'ESTRELLA DEL MAR 2 CGN': 'gas',\n u'ESTRELLA DEL MAR 2 SFO': 'oil',\n u'ESTRELLA DEL MAR 2 SGN': 'gas',\n u'HAINA TG': 'oil',\n u'INCA KM22': 'oil',\n u'ITABO 1': 'coal',\n u'ITABO 2': 'coal',\n u'LA VEGA': 'oil',\n u'LOS MINA 5': 'gas',\n u'LOS MINA 6': 'gas',\n u'LOS MINA 7': 'gas',\n u'LOS OR\\xcdGENES POWER PLANT FUEL OIL': 'oil',\n u'LOS OR\\xcdGENES POWER PLANT GAS NATURAL': 'gas',\n u'METALDOM': 'oil',\n u'MONTE PLATA SOLAR': 'solar',\n u'MONTE RIO': 'oil',\n u'PALAMARA': 'oil',\n u'PALENQUE': 'oil',\n u'PARQUE ENERGETICO LOS MINA CC PARCIAL': 'gas',\n u'PARQUE ENERGETICO LOS MINA CC TOTAL': 'gas',\n u'PIMENTEL 1': 'oil',\n u'PIMENTEL 2': 'oil',\n u'PIMENTEL 3': 'oil',\n u'QUISQUEYA 1': 'gas',\n u'QUISQUEYA 2': 'gas',\n u'RIO SAN JUAN': 'oil',\n u'SAN FELIPE': 'oil',\n u'SAN FELIPE CC': 'gas',\n u'SAN FELIPE VAP': 'oil',\n u'SAN LORENZO 1': 'gas',\n u'SAN PEDRO BIO-ENERGY': 'biomass',\n u'SAN PEDRO VAPOR': 'oil',\n u'SULTANA DEL ESTE': 'oil'\n }\n\n\ndef get_data(session=None):\n \"\"\"\n Makes a request to source url.\n Finds main table and creates a list of all table elements in unicode string format.\n Returns a list.\n \"\"\"\n\n data = []\n s = session or requests.Session()\n data_req = s.get(url)\n soup = BeautifulSoup(data_req.content, 'lxml')\n\n tbs = soup.find(\"table\", id=\"PostdespachoUnidadesTermicasGrid_DXMainTable\")\n rows = tbs.find_all(\"td\")\n\n for row in rows:\n num = row.getText().strip()\n data.append(unicode(num))\n\n return data\n\n\ndef floater(item):\n \"\"\"\n Attempts to convert any item given to a float. Returns item if it fails.\n \"\"\"\n\n try:\n return float(item)\n except ValueError:\n return item\n\n\ndef chunker(big_lst):\n \"\"\"\n Breaks a big list into a list of lists. Removes any list with no data then turns remaining\n lists into key: value pairs with first element from the list being the key.\n Returns a dictionary.\n \"\"\"\n\n chunks = [big_lst[x:x + 27] for x in xrange(0, len(big_lst), 27)]\n\n # Remove the list if it contains no data.\n for chunk in chunks:\n if any(chunk):\n continue\n else:\n chunks.remove(chunk)\n\n chunked_list = {words[0]: words[1:] for words in chunks}\n\n return chunked_list\n\n\ndef data_formatter(data):\n \"\"\"\n Takes data and finds relevant sections. Formats and breaks data into usable parts.\n Returns a nested dictionary.\n \"\"\"\n\n find_thermal_index = data.index(u'GRUPO: T\\xe9rmica')\n find_totals_index = data.index(u'Total T\\xe9rmico')\n find_totals_end = data.index(u'Total Programado')\n\n ufthermal = data[find_thermal_index + 3:find_totals_index - 59]\n total_data = data[find_totals_index:find_totals_end]\n\n # Remove all company names.\n for val in ufthermal:\n if ':' in val:\n i = ufthermal.index(val)\n del ufthermal[i:i + 3]\n\n formatted_thermal = chunker([floater(item) for item in ufthermal])\n mapped_totals = [total_mapping.get(x, x) for x in total_data]\n formatted_totals = chunker([floater(item) for item in mapped_totals])\n\n return {'totals': formatted_totals, 'thermal': formatted_thermal}\n\n\ndef data_parser(formatted_data):\n \"\"\"\n Converts formatted data into a pandas dataframe. Removes any empty rows.\n Returns a DataFrame.\n \"\"\"\n\n hours = list(range(1, 24)) + [0] + [25, 26]\n dft = pd.DataFrame(formatted_data, index=hours)\n\n dft = dft.drop(dft.index[[-1, -2]])\n dft = dft.replace(u'', np.nan)\n dft = dft.dropna(how='all')\n\n return dft\n\n\ndef thermal_production(df, logger):\n \"\"\"\n Takes DataFrame and finds thermal generation for each hour.\n Removes any non generating plants then maps plants to type.\n Sums type instances and returns a dictionary.\n \"\"\"\n\n therms = []\n unmapped = set()\n for hour in df.index.values:\n dt = hour\n currentt = df.loc[[hour]]\n\n # Create current plant output.\n tp = {}\n for item in list(df):\n v = currentt.iloc[0][item]\n tp[item] = v\n\n current_plants = {k: tp[k] for k in tp if not isnan(tp[k])}\n\n for plant in current_plants.keys():\n if plant not in thermal_plants.keys():\n unmapped.add(plant)\n\n mapped_plants = [(thermal_plants.get(plant, 'unknown'), val) for plant, val in current_plants.items()]\n\n thermalDict = defaultdict(lambda: 0.0)\n\n # Sum values for duplicate keys.\n for key, val in mapped_plants:\n thermalDict[key] += val\n\n thermalDict['datetime'] = dt\n thermalDict = dict(thermalDict)\n therms.append(thermalDict)\n\n for plant in unmapped:\n logger.warning(\n '{} is missing from the DO plant mapping!'.format(plant),\n extra={'key': 'DO'})\n\n return therms\n\n\ndef total_production(df):\n \"\"\"\n Takes DataFrame and finds generation totals for each hour.\n Returns a dictionary.\n \"\"\"\n\n vals = []\n # The Dominican Republic does not observe daylight savings time.\n for hour in df.index.values:\n dt = hour\n current = df.loc[[hour]]\n hydro = current.iloc[0]['Hydro']\n wind = current.iloc[0]['Wind']\n if wind > -10:\n wind = max(wind, 0)\n\n # Wind and hydro totals do not always update exactly on the new hour.\n # In this case we set them to None because they are unknown rather than zero.\n if isnan(wind):\n wind = None\n if isnan(hydro):\n hydro = None\n\n prod = {'wind': wind, 'hydro': hydro, 'datetime': dt}\n vals.append(prod)\n\n return vals\n\n\ndef merge_production(thermal, total):\n \"\"\"\n Takes thermal generation and total generation and merges them using 'datetime' key.\n Returns a defaultdict.\n \"\"\"\n\n d = defaultdict(dict)\n for each in (thermal, total):\n for elem in each:\n d[elem['datetime']].update(elem)\n\n final = sorted(d.values(), key=itemgetter(\"datetime\"))\n\n def get_datetime(hour):\n at = arrow.now('America/Dominica').floor('day')\n dt = (at.shift(hours=int(hour) - 1)).datetime\n return dt\n\n for item in final:\n i = item['datetime']\n j = get_datetime(i)\n item['datetime'] = j\n\n return final\n\n\ndef fetch_production(zone_key='DO', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n dat = data_formatter(get_data(session=None))\n tot = data_parser(dat['totals'])\n th = data_parser(dat['thermal'])\n thermal = thermal_production(th, logger)\n total = total_production(tot)\n merge = merge_production(thermal, total)\n\n production_mix_by_hour = []\n for hour in merge:\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': hour['datetime'],\n 'production': {\n 'biomass': hour.get('biomass', 0.0),\n 'coal': hour.get('coal', 0.0),\n 'gas': hour.get('gas', 0.0),\n 'hydro': hour.get('hydro', 0.0),\n 'nuclear': 0.0,\n 'oil': hour.get('oil', 0.0),\n 'solar': None,\n 'wind': hour.get('wind', 0.0),\n 'geothermal': 0.0,\n 'unknown': hour.get('unknown', 0.0)\n },\n 'storage': {\n 'hydro': None,\n },\n 'source': 'oc.org.do'\n }\n production_mix_by_hour.append(production_mix)\n\n return production_mix_by_hour\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/DO.py"}]}
| 3,973 | 140 |
gh_patches_debug_27250
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-3710
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation builder failure on main
https://github.com/nilearn/nilearn/actions/workflows/build-docs.yml
started occurring after merging #3698 (doubt it is related given the content of the PR)
https://github.com/nilearn/nilearn/actions/runs/4741116007
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nilearn/datasets/__init__.py`
Content:
```
1 """Helper functions to download NeuroImaging datasets."""
2
3 from .atlas import (
4 fetch_atlas_aal,
5 fetch_atlas_allen_2011,
6 fetch_atlas_basc_multiscale_2015,
7 fetch_atlas_craddock_2012,
8 fetch_atlas_destrieux_2009,
9 fetch_atlas_difumo,
10 fetch_atlas_harvard_oxford,
11 fetch_atlas_juelich,
12 fetch_atlas_msdl,
13 fetch_atlas_schaefer_2018,
14 fetch_atlas_smith_2009,
15 fetch_atlas_surf_destrieux,
16 fetch_atlas_talairach,
17 fetch_atlas_yeo_2011,
18 fetch_coords_dosenbach_2010,
19 fetch_coords_power_2011,
20 fetch_coords_seitzman_2018,
21 )
22 from .func import (
23 fetch_abide_pcp,
24 fetch_adhd,
25 fetch_bids_langloc_dataset,
26 fetch_development_fmri,
27 fetch_fiac_first_level,
28 fetch_haxby,
29 fetch_language_localizer_demo_dataset,
30 fetch_localizer_button_task,
31 fetch_localizer_calculation_task,
32 fetch_localizer_contrasts,
33 fetch_localizer_first_level,
34 fetch_megatrawls_netmats,
35 fetch_mixed_gambles,
36 fetch_miyawaki2008,
37 fetch_openneuro_dataset,
38 fetch_openneuro_dataset_index,
39 fetch_spm_auditory,
40 fetch_spm_multimodal_fmri,
41 fetch_surf_nki_enhanced,
42 patch_openneuro_dataset,
43 select_from_index,
44 )
45 from .neurovault import (
46 fetch_neurovault,
47 fetch_neurovault_auditory_computation_task,
48 fetch_neurovault_ids,
49 fetch_neurovault_motor_task,
50 )
51 from .struct import (
52 GM_MNI152_FILE_PATH,
53 MNI152_FILE_PATH,
54 WM_MNI152_FILE_PATH,
55 fetch_icbm152_2009,
56 fetch_icbm152_brain_gm_mask,
57 fetch_oasis_vbm,
58 fetch_surf_fsaverage,
59 load_mni152_brain_mask,
60 load_mni152_gm_mask,
61 load_mni152_gm_template,
62 load_mni152_template,
63 load_mni152_wm_mask,
64 load_mni152_wm_template,
65 )
66 from .utils import get_data_dirs, load_sample_motor_activation_image
67
68 __all__ = [
69 "MNI152_FILE_PATH",
70 "GM_MNI152_FILE_PATH",
71 "WM_MNI152_FILE_PATH",
72 "fetch_icbm152_2009",
73 "load_mni152_template",
74 "load_mni152_gm_template",
75 "load_mni152_wm_template",
76 "fetch_oasis_vbm",
77 "fetch_haxby",
78 "fetch_adhd",
79 "fetch_miyawaki2008",
80 "fetch_localizer_contrasts",
81 "fetch_localizer_button_task",
82 "fetch_abide_pcp",
83 "fetch_localizer_calculation_task",
84 "fetch_atlas_craddock_2012",
85 "fetch_atlas_destrieux_2009",
86 "fetch_atlas_juelich",
87 "fetch_atlas_harvard_oxford",
88 "fetch_atlas_msdl",
89 "fetch_atlas_schaefer_2018",
90 "fetch_coords_power_2011",
91 "fetch_coords_seitzman_2018",
92 "fetch_atlas_smith_2009",
93 "fetch_atlas_allen_2011",
94 "fetch_atlas_yeo_2011",
95 "fetch_mixed_gambles",
96 "fetch_atlas_aal",
97 "fetch_atlas_difumo",
98 "fetch_megatrawls_netmats",
99 "fetch_surf_nki_enhanced",
100 "fetch_development_fmri",
101 "fetch_surf_fsaverage",
102 "fetch_atlas_basc_multiscale_2015",
103 "fetch_coords_dosenbach_2010",
104 "fetch_neurovault",
105 "fetch_neurovault_ids",
106 "fetch_neurovault_motor_task",
107 "fetch_neurovault_auditory_computation_task",
108 "load_mni152_brain_mask",
109 "load_mni152_gm_mask",
110 "load_mni152_wm_mask",
111 "fetch_icbm152_brain_gm_mask",
112 "fetch_atlas_surf_destrieux",
113 "fetch_atlas_talairach",
114 "get_data_dirs",
115 "load_sample_motor_activation_image",
116 "fetch_language_localizer_demo_dataset",
117 "fetch_bids_langloc_dataset",
118 "fetch_openneuro_dataset_index",
119 "select_from_index",
120 "patch_openneuro_dataset",
121 "fetch_openneuro_dataset",
122 "fetch_localizer_first_level",
123 "fetch_spm_auditory",
124 "fetch_spm_multimodal_fmri",
125 "fetch_fiac_first_level",
126 ]
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py
--- a/nilearn/datasets/__init__.py
+++ b/nilearn/datasets/__init__.py
@@ -10,6 +10,7 @@
fetch_atlas_harvard_oxford,
fetch_atlas_juelich,
fetch_atlas_msdl,
+ fetch_atlas_pauli_2017,
fetch_atlas_schaefer_2018,
fetch_atlas_smith_2009,
fetch_atlas_surf_destrieux,
@@ -24,6 +25,7 @@
fetch_adhd,
fetch_bids_langloc_dataset,
fetch_development_fmri,
+ fetch_ds000030_urls,
fetch_fiac_first_level,
fetch_haxby,
fetch_language_localizer_demo_dataset,
@@ -86,6 +88,7 @@
"fetch_atlas_juelich",
"fetch_atlas_harvard_oxford",
"fetch_atlas_msdl",
+ "fetch_atlas_pauli_2017",
"fetch_atlas_schaefer_2018",
"fetch_coords_power_2011",
"fetch_coords_seitzman_2018",
@@ -98,6 +101,7 @@
"fetch_megatrawls_netmats",
"fetch_surf_nki_enhanced",
"fetch_development_fmri",
+ "fetch_ds000030_urls",
"fetch_surf_fsaverage",
"fetch_atlas_basc_multiscale_2015",
"fetch_coords_dosenbach_2010",
|
{"golden_diff": "diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py\n--- a/nilearn/datasets/__init__.py\n+++ b/nilearn/datasets/__init__.py\n@@ -10,6 +10,7 @@\n fetch_atlas_harvard_oxford,\n fetch_atlas_juelich,\n fetch_atlas_msdl,\n+ fetch_atlas_pauli_2017,\n fetch_atlas_schaefer_2018,\n fetch_atlas_smith_2009,\n fetch_atlas_surf_destrieux,\n@@ -24,6 +25,7 @@\n fetch_adhd,\n fetch_bids_langloc_dataset,\n fetch_development_fmri,\n+ fetch_ds000030_urls,\n fetch_fiac_first_level,\n fetch_haxby,\n fetch_language_localizer_demo_dataset,\n@@ -86,6 +88,7 @@\n \"fetch_atlas_juelich\",\n \"fetch_atlas_harvard_oxford\",\n \"fetch_atlas_msdl\",\n+ \"fetch_atlas_pauli_2017\",\n \"fetch_atlas_schaefer_2018\",\n \"fetch_coords_power_2011\",\n \"fetch_coords_seitzman_2018\",\n@@ -98,6 +101,7 @@\n \"fetch_megatrawls_netmats\",\n \"fetch_surf_nki_enhanced\",\n \"fetch_development_fmri\",\n+ \"fetch_ds000030_urls\",\n \"fetch_surf_fsaverage\",\n \"fetch_atlas_basc_multiscale_2015\",\n \"fetch_coords_dosenbach_2010\",\n", "issue": "Documentation builder failure on main\nhttps://github.com/nilearn/nilearn/actions/workflows/build-docs.yml\r\n\r\nstarted occurring after merging #3698 (doubt it is related given the content of the PR)\r\nhttps://github.com/nilearn/nilearn/actions/runs/4741116007\r\n\r\n\n", "before_files": [{"content": "\"\"\"Helper functions to download NeuroImaging datasets.\"\"\"\n\nfrom .atlas import (\n fetch_atlas_aal,\n fetch_atlas_allen_2011,\n fetch_atlas_basc_multiscale_2015,\n fetch_atlas_craddock_2012,\n fetch_atlas_destrieux_2009,\n fetch_atlas_difumo,\n fetch_atlas_harvard_oxford,\n fetch_atlas_juelich,\n fetch_atlas_msdl,\n fetch_atlas_schaefer_2018,\n fetch_atlas_smith_2009,\n fetch_atlas_surf_destrieux,\n fetch_atlas_talairach,\n fetch_atlas_yeo_2011,\n fetch_coords_dosenbach_2010,\n fetch_coords_power_2011,\n fetch_coords_seitzman_2018,\n)\nfrom .func import (\n fetch_abide_pcp,\n fetch_adhd,\n fetch_bids_langloc_dataset,\n fetch_development_fmri,\n fetch_fiac_first_level,\n fetch_haxby,\n fetch_language_localizer_demo_dataset,\n fetch_localizer_button_task,\n fetch_localizer_calculation_task,\n fetch_localizer_contrasts,\n fetch_localizer_first_level,\n fetch_megatrawls_netmats,\n fetch_mixed_gambles,\n fetch_miyawaki2008,\n fetch_openneuro_dataset,\n fetch_openneuro_dataset_index,\n fetch_spm_auditory,\n fetch_spm_multimodal_fmri,\n fetch_surf_nki_enhanced,\n patch_openneuro_dataset,\n select_from_index,\n)\nfrom .neurovault import (\n fetch_neurovault,\n fetch_neurovault_auditory_computation_task,\n fetch_neurovault_ids,\n fetch_neurovault_motor_task,\n)\nfrom .struct import (\n GM_MNI152_FILE_PATH,\n MNI152_FILE_PATH,\n WM_MNI152_FILE_PATH,\n fetch_icbm152_2009,\n fetch_icbm152_brain_gm_mask,\n fetch_oasis_vbm,\n fetch_surf_fsaverage,\n load_mni152_brain_mask,\n load_mni152_gm_mask,\n load_mni152_gm_template,\n load_mni152_template,\n load_mni152_wm_mask,\n load_mni152_wm_template,\n)\nfrom .utils import get_data_dirs, load_sample_motor_activation_image\n\n__all__ = [\n \"MNI152_FILE_PATH\",\n \"GM_MNI152_FILE_PATH\",\n \"WM_MNI152_FILE_PATH\",\n \"fetch_icbm152_2009\",\n \"load_mni152_template\",\n \"load_mni152_gm_template\",\n \"load_mni152_wm_template\",\n \"fetch_oasis_vbm\",\n \"fetch_haxby\",\n \"fetch_adhd\",\n \"fetch_miyawaki2008\",\n \"fetch_localizer_contrasts\",\n \"fetch_localizer_button_task\",\n \"fetch_abide_pcp\",\n \"fetch_localizer_calculation_task\",\n \"fetch_atlas_craddock_2012\",\n \"fetch_atlas_destrieux_2009\",\n \"fetch_atlas_juelich\",\n \"fetch_atlas_harvard_oxford\",\n \"fetch_atlas_msdl\",\n \"fetch_atlas_schaefer_2018\",\n \"fetch_coords_power_2011\",\n \"fetch_coords_seitzman_2018\",\n \"fetch_atlas_smith_2009\",\n \"fetch_atlas_allen_2011\",\n \"fetch_atlas_yeo_2011\",\n \"fetch_mixed_gambles\",\n \"fetch_atlas_aal\",\n \"fetch_atlas_difumo\",\n \"fetch_megatrawls_netmats\",\n \"fetch_surf_nki_enhanced\",\n \"fetch_development_fmri\",\n \"fetch_surf_fsaverage\",\n \"fetch_atlas_basc_multiscale_2015\",\n \"fetch_coords_dosenbach_2010\",\n \"fetch_neurovault\",\n \"fetch_neurovault_ids\",\n \"fetch_neurovault_motor_task\",\n \"fetch_neurovault_auditory_computation_task\",\n \"load_mni152_brain_mask\",\n \"load_mni152_gm_mask\",\n \"load_mni152_wm_mask\",\n \"fetch_icbm152_brain_gm_mask\",\n \"fetch_atlas_surf_destrieux\",\n \"fetch_atlas_talairach\",\n \"get_data_dirs\",\n \"load_sample_motor_activation_image\",\n \"fetch_language_localizer_demo_dataset\",\n \"fetch_bids_langloc_dataset\",\n \"fetch_openneuro_dataset_index\",\n \"select_from_index\",\n \"patch_openneuro_dataset\",\n \"fetch_openneuro_dataset\",\n \"fetch_localizer_first_level\",\n \"fetch_spm_auditory\",\n \"fetch_spm_multimodal_fmri\",\n \"fetch_fiac_first_level\",\n]\n", "path": "nilearn/datasets/__init__.py"}], "after_files": [{"content": "\"\"\"Helper functions to download NeuroImaging datasets.\"\"\"\n\nfrom .atlas import (\n fetch_atlas_aal,\n fetch_atlas_allen_2011,\n fetch_atlas_basc_multiscale_2015,\n fetch_atlas_craddock_2012,\n fetch_atlas_destrieux_2009,\n fetch_atlas_difumo,\n fetch_atlas_harvard_oxford,\n fetch_atlas_juelich,\n fetch_atlas_msdl,\n fetch_atlas_pauli_2017,\n fetch_atlas_schaefer_2018,\n fetch_atlas_smith_2009,\n fetch_atlas_surf_destrieux,\n fetch_atlas_talairach,\n fetch_atlas_yeo_2011,\n fetch_coords_dosenbach_2010,\n fetch_coords_power_2011,\n fetch_coords_seitzman_2018,\n)\nfrom .func import (\n fetch_abide_pcp,\n fetch_adhd,\n fetch_bids_langloc_dataset,\n fetch_development_fmri,\n fetch_ds000030_urls,\n fetch_fiac_first_level,\n fetch_haxby,\n fetch_language_localizer_demo_dataset,\n fetch_localizer_button_task,\n fetch_localizer_calculation_task,\n fetch_localizer_contrasts,\n fetch_localizer_first_level,\n fetch_megatrawls_netmats,\n fetch_mixed_gambles,\n fetch_miyawaki2008,\n fetch_openneuro_dataset,\n fetch_openneuro_dataset_index,\n fetch_spm_auditory,\n fetch_spm_multimodal_fmri,\n fetch_surf_nki_enhanced,\n patch_openneuro_dataset,\n select_from_index,\n)\nfrom .neurovault import (\n fetch_neurovault,\n fetch_neurovault_auditory_computation_task,\n fetch_neurovault_ids,\n fetch_neurovault_motor_task,\n)\nfrom .struct import (\n GM_MNI152_FILE_PATH,\n MNI152_FILE_PATH,\n WM_MNI152_FILE_PATH,\n fetch_icbm152_2009,\n fetch_icbm152_brain_gm_mask,\n fetch_oasis_vbm,\n fetch_surf_fsaverage,\n load_mni152_brain_mask,\n load_mni152_gm_mask,\n load_mni152_gm_template,\n load_mni152_template,\n load_mni152_wm_mask,\n load_mni152_wm_template,\n)\nfrom .utils import get_data_dirs, load_sample_motor_activation_image\n\n__all__ = [\n \"MNI152_FILE_PATH\",\n \"GM_MNI152_FILE_PATH\",\n \"WM_MNI152_FILE_PATH\",\n \"fetch_icbm152_2009\",\n \"load_mni152_template\",\n \"load_mni152_gm_template\",\n \"load_mni152_wm_template\",\n \"fetch_oasis_vbm\",\n \"fetch_haxby\",\n \"fetch_adhd\",\n \"fetch_miyawaki2008\",\n \"fetch_localizer_contrasts\",\n \"fetch_localizer_button_task\",\n \"fetch_abide_pcp\",\n \"fetch_localizer_calculation_task\",\n \"fetch_atlas_craddock_2012\",\n \"fetch_atlas_destrieux_2009\",\n \"fetch_atlas_juelich\",\n \"fetch_atlas_harvard_oxford\",\n \"fetch_atlas_msdl\",\n \"fetch_atlas_pauli_2017\",\n \"fetch_atlas_schaefer_2018\",\n \"fetch_coords_power_2011\",\n \"fetch_coords_seitzman_2018\",\n \"fetch_atlas_smith_2009\",\n \"fetch_atlas_allen_2011\",\n \"fetch_atlas_yeo_2011\",\n \"fetch_mixed_gambles\",\n \"fetch_atlas_aal\",\n \"fetch_atlas_difumo\",\n \"fetch_megatrawls_netmats\",\n \"fetch_surf_nki_enhanced\",\n \"fetch_development_fmri\",\n \"fetch_ds000030_urls\",\n \"fetch_surf_fsaverage\",\n \"fetch_atlas_basc_multiscale_2015\",\n \"fetch_coords_dosenbach_2010\",\n \"fetch_neurovault\",\n \"fetch_neurovault_ids\",\n \"fetch_neurovault_motor_task\",\n \"fetch_neurovault_auditory_computation_task\",\n \"load_mni152_brain_mask\",\n \"load_mni152_gm_mask\",\n \"load_mni152_wm_mask\",\n \"fetch_icbm152_brain_gm_mask\",\n \"fetch_atlas_surf_destrieux\",\n \"fetch_atlas_talairach\",\n \"get_data_dirs\",\n \"load_sample_motor_activation_image\",\n \"fetch_language_localizer_demo_dataset\",\n \"fetch_bids_langloc_dataset\",\n \"fetch_openneuro_dataset_index\",\n \"select_from_index\",\n \"patch_openneuro_dataset\",\n \"fetch_openneuro_dataset\",\n \"fetch_localizer_first_level\",\n \"fetch_spm_auditory\",\n \"fetch_spm_multimodal_fmri\",\n \"fetch_fiac_first_level\",\n]\n", "path": "nilearn/datasets/__init__.py"}]}
| 1,756 | 387 |
gh_patches_debug_27595
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-14870
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Simple condition (without and/or) does not work in event rule
### Deployment Type
Self-hosted
### NetBox Version
v3.7.0
### Python Version
3.11
### Steps to Reproduce
1. Create webhook: Name = Test, URL = http://127.0.0.1:9000 (doesn't matter in this case, it won't be triggered but is required to configure event rule)
2. Go to **Event rules - Add**:
- Name = Test
- Content types = Circuit
- select Updates
- set Conditions:
```
{
"attr": "status.value",
"value": "active"
}
```
- Action type = Webhook
- Webhook = Test
- **Create**
### Expected Behavior
Event rule is created
### Observed Behavior
Error is shown about the condition:
**Ruleset must have exactly one logical operator (found 2)**
The examples in https://docs.netbox.dev/en/stable/reference/conditions/ look the same: simple JSON object with attributes `attr` and `value`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/extras/conditions.py`
Content:
```
1 import functools
2 import re
3 from django.utils.translation import gettext as _
4
5 __all__ = (
6 'Condition',
7 'ConditionSet',
8 )
9
10
11 AND = 'and'
12 OR = 'or'
13
14
15 def is_ruleset(data):
16 """
17 Determine whether the given dictionary looks like a rule set.
18 """
19 return type(data) is dict and len(data) == 1 and list(data.keys())[0] in (AND, OR)
20
21
22 class Condition:
23 """
24 An individual conditional rule that evaluates a single attribute and its value.
25
26 :param attr: The name of the attribute being evaluated
27 :param value: The value being compared
28 :param op: The logical operation to use when evaluating the value (default: 'eq')
29 """
30 EQ = 'eq'
31 GT = 'gt'
32 GTE = 'gte'
33 LT = 'lt'
34 LTE = 'lte'
35 IN = 'in'
36 CONTAINS = 'contains'
37 REGEX = 'regex'
38
39 OPERATORS = (
40 EQ, GT, GTE, LT, LTE, IN, CONTAINS, REGEX
41 )
42
43 TYPES = {
44 str: (EQ, CONTAINS, REGEX),
45 bool: (EQ, CONTAINS),
46 int: (EQ, GT, GTE, LT, LTE, CONTAINS),
47 float: (EQ, GT, GTE, LT, LTE, CONTAINS),
48 list: (EQ, IN, CONTAINS),
49 type(None): (EQ,)
50 }
51
52 def __init__(self, attr, value, op=EQ, negate=False):
53 if op not in self.OPERATORS:
54 raise ValueError(_("Unknown operator: {op}. Must be one of: {operators}").format(
55 op=op, operators=', '.join(self.OPERATORS)
56 ))
57 if type(value) not in self.TYPES:
58 raise ValueError(_("Unsupported value type: {value}").format(value=type(value)))
59 if op not in self.TYPES[type(value)]:
60 raise ValueError(_("Invalid type for {op} operation: {value}").format(op=op, value=type(value)))
61
62 self.attr = attr
63 self.value = value
64 self.eval_func = getattr(self, f'eval_{op}')
65 self.negate = negate
66
67 def eval(self, data):
68 """
69 Evaluate the provided data to determine whether it matches the condition.
70 """
71 def _get(obj, key):
72 if isinstance(obj, list):
73 return [dict.get(i, key) for i in obj]
74
75 return dict.get(obj, key)
76
77 try:
78 value = functools.reduce(_get, self.attr.split('.'), data)
79 except TypeError:
80 # Invalid key path
81 value = None
82 result = self.eval_func(value)
83
84 if self.negate:
85 return not result
86 return result
87
88 # Equivalency
89
90 def eval_eq(self, value):
91 return value == self.value
92
93 def eval_neq(self, value):
94 return value != self.value
95
96 # Numeric comparisons
97
98 def eval_gt(self, value):
99 return value > self.value
100
101 def eval_gte(self, value):
102 return value >= self.value
103
104 def eval_lt(self, value):
105 return value < self.value
106
107 def eval_lte(self, value):
108 return value <= self.value
109
110 # Membership
111
112 def eval_in(self, value):
113 return value in self.value
114
115 def eval_contains(self, value):
116 return self.value in value
117
118 # Regular expressions
119
120 def eval_regex(self, value):
121 return re.match(self.value, value) is not None
122
123
124 class ConditionSet:
125 """
126 A set of one or more Condition to be evaluated per the prescribed logic (AND or OR). Example:
127
128 {"and": [
129 {"attr": "foo", "op": "eq", "value": 1},
130 {"attr": "bar", "op": "eq", "value": 2, "negate": true}
131 ]}
132
133 :param ruleset: A dictionary mapping a logical operator to a list of conditional rules
134 """
135 def __init__(self, ruleset):
136 if type(ruleset) is not dict:
137 raise ValueError(_("Ruleset must be a dictionary, not {ruleset}.").format(ruleset=type(ruleset)))
138 if len(ruleset) != 1:
139 raise ValueError(_("Ruleset must have exactly one logical operator (found {ruleset})").format(
140 ruleset=len(ruleset)))
141
142 # Determine the logic type
143 logic = list(ruleset.keys())[0]
144 if type(logic) is not str or logic.lower() not in (AND, OR):
145 raise ValueError(_("Invalid logic type: {logic} (must be '{op_and}' or '{op_or}')").format(
146 logic=logic, op_and=AND, op_or=OR
147 ))
148 self.logic = logic.lower()
149
150 # Compile the set of Conditions
151 self.conditions = [
152 ConditionSet(rule) if is_ruleset(rule) else Condition(**rule)
153 for rule in ruleset[self.logic]
154 ]
155
156 def eval(self, data):
157 """
158 Evaluate the provided data to determine whether it matches this set of conditions.
159 """
160 func = any if self.logic == 'or' else all
161 return func(d.eval(data) for d in self.conditions)
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/extras/conditions.py b/netbox/extras/conditions.py
--- a/netbox/extras/conditions.py
+++ b/netbox/extras/conditions.py
@@ -135,23 +135,23 @@
def __init__(self, ruleset):
if type(ruleset) is not dict:
raise ValueError(_("Ruleset must be a dictionary, not {ruleset}.").format(ruleset=type(ruleset)))
- if len(ruleset) != 1:
- raise ValueError(_("Ruleset must have exactly one logical operator (found {ruleset})").format(
- ruleset=len(ruleset)))
-
- # Determine the logic type
- logic = list(ruleset.keys())[0]
- if type(logic) is not str or logic.lower() not in (AND, OR):
- raise ValueError(_("Invalid logic type: {logic} (must be '{op_and}' or '{op_or}')").format(
- logic=logic, op_and=AND, op_or=OR
- ))
- self.logic = logic.lower()
- # Compile the set of Conditions
- self.conditions = [
- ConditionSet(rule) if is_ruleset(rule) else Condition(**rule)
- for rule in ruleset[self.logic]
- ]
+ if len(ruleset) == 1:
+ self.logic = (list(ruleset.keys())[0]).lower()
+ if self.logic not in (AND, OR):
+ raise ValueError(_("Invalid logic type: must be 'AND' or 'OR'. Please check documentation."))
+
+ # Compile the set of Conditions
+ self.conditions = [
+ ConditionSet(rule) if is_ruleset(rule) else Condition(**rule)
+ for rule in ruleset[self.logic]
+ ]
+ else:
+ try:
+ self.logic = None
+ self.conditions = [Condition(**ruleset)]
+ except TypeError:
+ raise ValueError(_("Incorrect key(s) informed. Please check documentation."))
def eval(self, data):
"""
|
{"golden_diff": "diff --git a/netbox/extras/conditions.py b/netbox/extras/conditions.py\n--- a/netbox/extras/conditions.py\n+++ b/netbox/extras/conditions.py\n@@ -135,23 +135,23 @@\n def __init__(self, ruleset):\n if type(ruleset) is not dict:\n raise ValueError(_(\"Ruleset must be a dictionary, not {ruleset}.\").format(ruleset=type(ruleset)))\n- if len(ruleset) != 1:\n- raise ValueError(_(\"Ruleset must have exactly one logical operator (found {ruleset})\").format(\n- ruleset=len(ruleset)))\n-\n- # Determine the logic type\n- logic = list(ruleset.keys())[0]\n- if type(logic) is not str or logic.lower() not in (AND, OR):\n- raise ValueError(_(\"Invalid logic type: {logic} (must be '{op_and}' or '{op_or}')\").format(\n- logic=logic, op_and=AND, op_or=OR\n- ))\n- self.logic = logic.lower()\n \n- # Compile the set of Conditions\n- self.conditions = [\n- ConditionSet(rule) if is_ruleset(rule) else Condition(**rule)\n- for rule in ruleset[self.logic]\n- ]\n+ if len(ruleset) == 1:\n+ self.logic = (list(ruleset.keys())[0]).lower()\n+ if self.logic not in (AND, OR):\n+ raise ValueError(_(\"Invalid logic type: must be 'AND' or 'OR'. Please check documentation.\"))\n+\n+ # Compile the set of Conditions\n+ self.conditions = [\n+ ConditionSet(rule) if is_ruleset(rule) else Condition(**rule)\n+ for rule in ruleset[self.logic]\n+ ]\n+ else:\n+ try:\n+ self.logic = None\n+ self.conditions = [Condition(**ruleset)]\n+ except TypeError:\n+ raise ValueError(_(\"Incorrect key(s) informed. Please check documentation.\"))\n \n def eval(self, data):\n \"\"\"\n", "issue": "Simple condition (without and/or) does not work in event rule\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.0\n\n### Python Version\n\n3.11\n\n### Steps to Reproduce\n\n1. Create webhook: Name = Test, URL = http://127.0.0.1:9000 (doesn't matter in this case, it won't be triggered but is required to configure event rule)\r\n2. Go to **Event rules - Add**:\r\n- Name = Test\r\n- Content types = Circuit\r\n- select Updates\r\n- set Conditions:\r\n```\r\n{\r\n \"attr\": \"status.value\",\r\n \"value\": \"active\"\r\n}\r\n```\r\n\r\n- Action type = Webhook\r\n- Webhook = Test\r\n- **Create**\r\n\n\n### Expected Behavior\n\nEvent rule is created\n\n### Observed Behavior\n\nError is shown about the condition:\r\n\r\n**Ruleset must have exactly one logical operator (found 2)** \r\n\r\nThe examples in https://docs.netbox.dev/en/stable/reference/conditions/ look the same: simple JSON object with attributes `attr` and `value`.\n", "before_files": [{"content": "import functools\nimport re\nfrom django.utils.translation import gettext as _\n\n__all__ = (\n 'Condition',\n 'ConditionSet',\n)\n\n\nAND = 'and'\nOR = 'or'\n\n\ndef is_ruleset(data):\n \"\"\"\n Determine whether the given dictionary looks like a rule set.\n \"\"\"\n return type(data) is dict and len(data) == 1 and list(data.keys())[0] in (AND, OR)\n\n\nclass Condition:\n \"\"\"\n An individual conditional rule that evaluates a single attribute and its value.\n\n :param attr: The name of the attribute being evaluated\n :param value: The value being compared\n :param op: The logical operation to use when evaluating the value (default: 'eq')\n \"\"\"\n EQ = 'eq'\n GT = 'gt'\n GTE = 'gte'\n LT = 'lt'\n LTE = 'lte'\n IN = 'in'\n CONTAINS = 'contains'\n REGEX = 'regex'\n\n OPERATORS = (\n EQ, GT, GTE, LT, LTE, IN, CONTAINS, REGEX\n )\n\n TYPES = {\n str: (EQ, CONTAINS, REGEX),\n bool: (EQ, CONTAINS),\n int: (EQ, GT, GTE, LT, LTE, CONTAINS),\n float: (EQ, GT, GTE, LT, LTE, CONTAINS),\n list: (EQ, IN, CONTAINS),\n type(None): (EQ,)\n }\n\n def __init__(self, attr, value, op=EQ, negate=False):\n if op not in self.OPERATORS:\n raise ValueError(_(\"Unknown operator: {op}. Must be one of: {operators}\").format(\n op=op, operators=', '.join(self.OPERATORS)\n ))\n if type(value) not in self.TYPES:\n raise ValueError(_(\"Unsupported value type: {value}\").format(value=type(value)))\n if op not in self.TYPES[type(value)]:\n raise ValueError(_(\"Invalid type for {op} operation: {value}\").format(op=op, value=type(value)))\n\n self.attr = attr\n self.value = value\n self.eval_func = getattr(self, f'eval_{op}')\n self.negate = negate\n\n def eval(self, data):\n \"\"\"\n Evaluate the provided data to determine whether it matches the condition.\n \"\"\"\n def _get(obj, key):\n if isinstance(obj, list):\n return [dict.get(i, key) for i in obj]\n\n return dict.get(obj, key)\n\n try:\n value = functools.reduce(_get, self.attr.split('.'), data)\n except TypeError:\n # Invalid key path\n value = None\n result = self.eval_func(value)\n\n if self.negate:\n return not result\n return result\n\n # Equivalency\n\n def eval_eq(self, value):\n return value == self.value\n\n def eval_neq(self, value):\n return value != self.value\n\n # Numeric comparisons\n\n def eval_gt(self, value):\n return value > self.value\n\n def eval_gte(self, value):\n return value >= self.value\n\n def eval_lt(self, value):\n return value < self.value\n\n def eval_lte(self, value):\n return value <= self.value\n\n # Membership\n\n def eval_in(self, value):\n return value in self.value\n\n def eval_contains(self, value):\n return self.value in value\n\n # Regular expressions\n\n def eval_regex(self, value):\n return re.match(self.value, value) is not None\n\n\nclass ConditionSet:\n \"\"\"\n A set of one or more Condition to be evaluated per the prescribed logic (AND or OR). Example:\n\n {\"and\": [\n {\"attr\": \"foo\", \"op\": \"eq\", \"value\": 1},\n {\"attr\": \"bar\", \"op\": \"eq\", \"value\": 2, \"negate\": true}\n ]}\n\n :param ruleset: A dictionary mapping a logical operator to a list of conditional rules\n \"\"\"\n def __init__(self, ruleset):\n if type(ruleset) is not dict:\n raise ValueError(_(\"Ruleset must be a dictionary, not {ruleset}.\").format(ruleset=type(ruleset)))\n if len(ruleset) != 1:\n raise ValueError(_(\"Ruleset must have exactly one logical operator (found {ruleset})\").format(\n ruleset=len(ruleset)))\n\n # Determine the logic type\n logic = list(ruleset.keys())[0]\n if type(logic) is not str or logic.lower() not in (AND, OR):\n raise ValueError(_(\"Invalid logic type: {logic} (must be '{op_and}' or '{op_or}')\").format(\n logic=logic, op_and=AND, op_or=OR\n ))\n self.logic = logic.lower()\n\n # Compile the set of Conditions\n self.conditions = [\n ConditionSet(rule) if is_ruleset(rule) else Condition(**rule)\n for rule in ruleset[self.logic]\n ]\n\n def eval(self, data):\n \"\"\"\n Evaluate the provided data to determine whether it matches this set of conditions.\n \"\"\"\n func = any if self.logic == 'or' else all\n return func(d.eval(data) for d in self.conditions)\n", "path": "netbox/extras/conditions.py"}], "after_files": [{"content": "import functools\nimport re\nfrom django.utils.translation import gettext as _\n\n__all__ = (\n 'Condition',\n 'ConditionSet',\n)\n\n\nAND = 'and'\nOR = 'or'\n\n\ndef is_ruleset(data):\n \"\"\"\n Determine whether the given dictionary looks like a rule set.\n \"\"\"\n return type(data) is dict and len(data) == 1 and list(data.keys())[0] in (AND, OR)\n\n\nclass Condition:\n \"\"\"\n An individual conditional rule that evaluates a single attribute and its value.\n\n :param attr: The name of the attribute being evaluated\n :param value: The value being compared\n :param op: The logical operation to use when evaluating the value (default: 'eq')\n \"\"\"\n EQ = 'eq'\n GT = 'gt'\n GTE = 'gte'\n LT = 'lt'\n LTE = 'lte'\n IN = 'in'\n CONTAINS = 'contains'\n REGEX = 'regex'\n\n OPERATORS = (\n EQ, GT, GTE, LT, LTE, IN, CONTAINS, REGEX\n )\n\n TYPES = {\n str: (EQ, CONTAINS, REGEX),\n bool: (EQ, CONTAINS),\n int: (EQ, GT, GTE, LT, LTE, CONTAINS),\n float: (EQ, GT, GTE, LT, LTE, CONTAINS),\n list: (EQ, IN, CONTAINS),\n type(None): (EQ,)\n }\n\n def __init__(self, attr, value, op=EQ, negate=False):\n if op not in self.OPERATORS:\n raise ValueError(_(\"Unknown operator: {op}. Must be one of: {operators}\").format(\n op=op, operators=', '.join(self.OPERATORS)\n ))\n if type(value) not in self.TYPES:\n raise ValueError(_(\"Unsupported value type: {value}\").format(value=type(value)))\n if op not in self.TYPES[type(value)]:\n raise ValueError(_(\"Invalid type for {op} operation: {value}\").format(op=op, value=type(value)))\n\n self.attr = attr\n self.value = value\n self.eval_func = getattr(self, f'eval_{op}')\n self.negate = negate\n\n def eval(self, data):\n \"\"\"\n Evaluate the provided data to determine whether it matches the condition.\n \"\"\"\n def _get(obj, key):\n if isinstance(obj, list):\n return [dict.get(i, key) for i in obj]\n\n return dict.get(obj, key)\n\n try:\n value = functools.reduce(_get, self.attr.split('.'), data)\n except TypeError:\n # Invalid key path\n value = None\n result = self.eval_func(value)\n\n if self.negate:\n return not result\n return result\n\n # Equivalency\n\n def eval_eq(self, value):\n return value == self.value\n\n def eval_neq(self, value):\n return value != self.value\n\n # Numeric comparisons\n\n def eval_gt(self, value):\n return value > self.value\n\n def eval_gte(self, value):\n return value >= self.value\n\n def eval_lt(self, value):\n return value < self.value\n\n def eval_lte(self, value):\n return value <= self.value\n\n # Membership\n\n def eval_in(self, value):\n return value in self.value\n\n def eval_contains(self, value):\n return self.value in value\n\n # Regular expressions\n\n def eval_regex(self, value):\n return re.match(self.value, value) is not None\n\n\nclass ConditionSet:\n \"\"\"\n A set of one or more Condition to be evaluated per the prescribed logic (AND or OR). Example:\n\n {\"and\": [\n {\"attr\": \"foo\", \"op\": \"eq\", \"value\": 1},\n {\"attr\": \"bar\", \"op\": \"eq\", \"value\": 2, \"negate\": true}\n ]}\n\n :param ruleset: A dictionary mapping a logical operator to a list of conditional rules\n \"\"\"\n def __init__(self, ruleset):\n if type(ruleset) is not dict:\n raise ValueError(_(\"Ruleset must be a dictionary, not {ruleset}.\").format(ruleset=type(ruleset)))\n\n if len(ruleset) == 1:\n self.logic = (list(ruleset.keys())[0]).lower()\n if self.logic not in (AND, OR):\n raise ValueError(_(\"Invalid logic type: must be 'AND' or 'OR'. Please check documentation.\"))\n\n # Compile the set of Conditions\n self.conditions = [\n ConditionSet(rule) if is_ruleset(rule) else Condition(**rule)\n for rule in ruleset[self.logic]\n ]\n else:\n try:\n self.logic = None\n self.conditions = [Condition(**ruleset)]\n except TypeError:\n raise ValueError(_(\"Incorrect key(s) informed. Please check documentation.\"))\n\n def eval(self, data):\n \"\"\"\n Evaluate the provided data to determine whether it matches this set of conditions.\n \"\"\"\n func = any if self.logic == 'or' else all\n return func(d.eval(data) for d in self.conditions)\n", "path": "netbox/extras/conditions.py"}]}
| 2,030 | 447 |
gh_patches_debug_51300
|
rasdani/github-patches
|
git_diff
|
translate__pootle-5619
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Priority column is missing
Since the column reordering we've lost the priority column in the vfolders table
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/virtualfolder/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django import forms
10 from django.http import Http404
11 from django.shortcuts import get_object_or_404
12 from django.urls import reverse
13 from django.utils.functional import cached_property
14
15 from pootle.core.browser import get_table_headings
16 from pootle.core.delegate import search_backend
17 from pootle.core.exceptions import Http400
18 from pootle.core.http import JsonResponse
19 from pootle.core.url_helpers import get_path_parts, split_pootle_path
20 from pootle.i18n.gettext import ugettext as _
21 from pootle_misc.util import ajax_required
22 from pootle_store.forms import UnitSearchForm
23 from pootle_store.unit.results import GroupedResults
24 from pootle_translationproject.views import TPTranslateView
25
26 from .delegate import vfolders_data_tool
27 from .models import VirtualFolder
28
29
30 def make_vfolder_dict(context, vf, stats):
31 lang_code, proj_code = split_pootle_path(context.pootle_path)[:2]
32 base_url = reverse(
33 "pootle-vfolder-tp-translate",
34 kwargs=dict(
35 vfolder_name=vf,
36 language_code=lang_code,
37 project_code=proj_code))
38 return {
39 'href_translate': base_url,
40 'title': stats["title"],
41 'code': vf,
42 'priority': stats.get("priority"),
43 'is_grayed': not stats["isVisible"],
44 'stats': stats,
45 'icon': 'vfolder'}
46
47
48 class VFolderTPTranslateView(TPTranslateView):
49 display_vfolder_priority = False
50
51 @cached_property
52 def check_data(self):
53 return self.vfolders_data_view.vfolder_data_tool.get_checks(
54 user=self.request.user).get(self.vfolder_pk, {})
55
56 @cached_property
57 def vfolder(self):
58 return VirtualFolder.objects.get(name=self.kwargs["vfolder_name"])
59
60 @property
61 def vfolder_pk(self):
62 return self.vfolder.pk
63
64 def get_context_data(self, *args, **kwargs):
65 ctx = super(
66 VFolderTPTranslateView,
67 self).get_context_data(*args, **kwargs)
68 ctx["unit_api_root"] = reverse(
69 "vfolder-pootle-xhr-units",
70 kwargs=dict(vfolder_name=self.vfolder.name))
71 ctx["resource_path"] = (
72 "/".join(
73 ["++vfolder",
74 self.vfolder.name,
75 self.object.pootle_path.replace(self.ctx_path, "")]))
76 ctx["resource_path_parts"] = get_path_parts(ctx["resource_path"])
77 return ctx
78
79
80 @ajax_required
81 def get_vfolder_units(request, **kwargs):
82 """Gets source and target texts and its metadata.
83
84 :return: A JSON-encoded string containing the source and target texts
85 grouped by the store they belong to.
86
87 The optional `count` GET parameter defines the chunk size to
88 consider. The user's preference will be used by default.
89
90 When the `initial` GET parameter is present, a sorted list of
91 the result set ids will be returned too.
92 """
93 search_form = UnitSearchForm(request.GET, user=request.user)
94
95 vfolder = get_object_or_404(
96 VirtualFolder,
97 name=kwargs.get("vfolder_name"))
98
99 if not search_form.is_valid():
100 errors = search_form.errors.as_data()
101 if "path" in errors:
102 for error in errors["path"]:
103 if error.code == "max_length":
104 raise Http400(_('Path too long.'))
105 elif error.code == "required":
106 raise Http400(_('Arguments missing.'))
107 raise Http404(forms.ValidationError(search_form.errors).messages)
108
109 search_form.cleaned_data["vfolder"] = vfolder
110 backend = search_backend.get(VirtualFolder)(
111 request.user, **search_form.cleaned_data)
112 total, start, end, units_qs = backend.search()
113 return JsonResponse(
114 {'start': start,
115 'end': end,
116 'total': total,
117 'unitGroups': GroupedResults(units_qs).data})
118
119
120 class VFoldersDataView(object):
121
122 _table_fields = (
123 'name', 'progress', 'activity',
124 'total', 'need-translation',
125 'suggestions', 'critical')
126
127 def __init__(self, context, user, has_admin_access=False):
128 self.context = context
129 self.user = user
130 self.has_admin_access = has_admin_access
131
132 @property
133 def vfolder_data_tool(self):
134 return vfolders_data_tool.get(self.context.__class__)(self.context)
135
136 @property
137 def table_fields(self):
138 fields = self._table_fields
139 if self.has_admin_access:
140 fields += ('last-updated', )
141 return fields
142
143 @cached_property
144 def table_data(self):
145 ctx = {}
146 if len(self.all_stats) > 0:
147 ctx.update({
148 'children': {
149 'id': 'vfolders',
150 'fields': self.table_fields,
151 'headings': get_table_headings(self.table_fields),
152 'rows': self.table_items}})
153 return ctx
154
155 @cached_property
156 def all_stats(self):
157 return self.vfolder_data_tool.get_stats(user=self.user)
158
159 @cached_property
160 def stats(self):
161 return dict(children=self.all_stats)
162
163 @property
164 def table_items(self):
165 return [
166 make_vfolder_dict(self.context, *vf)
167 for vf
168 in self.all_stats.items()]
169
170 @cached_property
171 def has_data(self):
172 return (
173 self.vfolder_data_tool.all_stat_data.exists()
174 if self.vfolder_data_tool.show_all_to(self.user)
175 else self.vfolder_data_tool.stat_data.exists())
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pootle/apps/virtualfolder/views.py b/pootle/apps/virtualfolder/views.py
--- a/pootle/apps/virtualfolder/views.py
+++ b/pootle/apps/virtualfolder/views.py
@@ -122,7 +122,7 @@
_table_fields = (
'name', 'progress', 'activity',
'total', 'need-translation',
- 'suggestions', 'critical')
+ 'suggestions', 'critical', 'priority')
def __init__(self, context, user, has_admin_access=False):
self.context = context
|
{"golden_diff": "diff --git a/pootle/apps/virtualfolder/views.py b/pootle/apps/virtualfolder/views.py\n--- a/pootle/apps/virtualfolder/views.py\n+++ b/pootle/apps/virtualfolder/views.py\n@@ -122,7 +122,7 @@\n _table_fields = (\n 'name', 'progress', 'activity',\n 'total', 'need-translation',\n- 'suggestions', 'critical')\n+ 'suggestions', 'critical', 'priority')\n \n def __init__(self, context, user, has_admin_access=False):\n self.context = context\n", "issue": "Priority column is missing\nSince the column reordering we've lost the priority column in the vfolders table\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import forms\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.browser import get_table_headings\nfrom pootle.core.delegate import search_backend\nfrom pootle.core.exceptions import Http400\nfrom pootle.core.http import JsonResponse\nfrom pootle.core.url_helpers import get_path_parts, split_pootle_path\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_misc.util import ajax_required\nfrom pootle_store.forms import UnitSearchForm\nfrom pootle_store.unit.results import GroupedResults\nfrom pootle_translationproject.views import TPTranslateView\n\nfrom .delegate import vfolders_data_tool\nfrom .models import VirtualFolder\n\n\ndef make_vfolder_dict(context, vf, stats):\n lang_code, proj_code = split_pootle_path(context.pootle_path)[:2]\n base_url = reverse(\n \"pootle-vfolder-tp-translate\",\n kwargs=dict(\n vfolder_name=vf,\n language_code=lang_code,\n project_code=proj_code))\n return {\n 'href_translate': base_url,\n 'title': stats[\"title\"],\n 'code': vf,\n 'priority': stats.get(\"priority\"),\n 'is_grayed': not stats[\"isVisible\"],\n 'stats': stats,\n 'icon': 'vfolder'}\n\n\nclass VFolderTPTranslateView(TPTranslateView):\n display_vfolder_priority = False\n\n @cached_property\n def check_data(self):\n return self.vfolders_data_view.vfolder_data_tool.get_checks(\n user=self.request.user).get(self.vfolder_pk, {})\n\n @cached_property\n def vfolder(self):\n return VirtualFolder.objects.get(name=self.kwargs[\"vfolder_name\"])\n\n @property\n def vfolder_pk(self):\n return self.vfolder.pk\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(\n VFolderTPTranslateView,\n self).get_context_data(*args, **kwargs)\n ctx[\"unit_api_root\"] = reverse(\n \"vfolder-pootle-xhr-units\",\n kwargs=dict(vfolder_name=self.vfolder.name))\n ctx[\"resource_path\"] = (\n \"/\".join(\n [\"++vfolder\",\n self.vfolder.name,\n self.object.pootle_path.replace(self.ctx_path, \"\")]))\n ctx[\"resource_path_parts\"] = get_path_parts(ctx[\"resource_path\"])\n return ctx\n\n\n@ajax_required\ndef get_vfolder_units(request, **kwargs):\n \"\"\"Gets source and target texts and its metadata.\n\n :return: A JSON-encoded string containing the source and target texts\n grouped by the store they belong to.\n\n The optional `count` GET parameter defines the chunk size to\n consider. The user's preference will be used by default.\n\n When the `initial` GET parameter is present, a sorted list of\n the result set ids will be returned too.\n \"\"\"\n search_form = UnitSearchForm(request.GET, user=request.user)\n\n vfolder = get_object_or_404(\n VirtualFolder,\n name=kwargs.get(\"vfolder_name\"))\n\n if not search_form.is_valid():\n errors = search_form.errors.as_data()\n if \"path\" in errors:\n for error in errors[\"path\"]:\n if error.code == \"max_length\":\n raise Http400(_('Path too long.'))\n elif error.code == \"required\":\n raise Http400(_('Arguments missing.'))\n raise Http404(forms.ValidationError(search_form.errors).messages)\n\n search_form.cleaned_data[\"vfolder\"] = vfolder\n backend = search_backend.get(VirtualFolder)(\n request.user, **search_form.cleaned_data)\n total, start, end, units_qs = backend.search()\n return JsonResponse(\n {'start': start,\n 'end': end,\n 'total': total,\n 'unitGroups': GroupedResults(units_qs).data})\n\n\nclass VFoldersDataView(object):\n\n _table_fields = (\n 'name', 'progress', 'activity',\n 'total', 'need-translation',\n 'suggestions', 'critical')\n\n def __init__(self, context, user, has_admin_access=False):\n self.context = context\n self.user = user\n self.has_admin_access = has_admin_access\n\n @property\n def vfolder_data_tool(self):\n return vfolders_data_tool.get(self.context.__class__)(self.context)\n\n @property\n def table_fields(self):\n fields = self._table_fields\n if self.has_admin_access:\n fields += ('last-updated', )\n return fields\n\n @cached_property\n def table_data(self):\n ctx = {}\n if len(self.all_stats) > 0:\n ctx.update({\n 'children': {\n 'id': 'vfolders',\n 'fields': self.table_fields,\n 'headings': get_table_headings(self.table_fields),\n 'rows': self.table_items}})\n return ctx\n\n @cached_property\n def all_stats(self):\n return self.vfolder_data_tool.get_stats(user=self.user)\n\n @cached_property\n def stats(self):\n return dict(children=self.all_stats)\n\n @property\n def table_items(self):\n return [\n make_vfolder_dict(self.context, *vf)\n for vf\n in self.all_stats.items()]\n\n @cached_property\n def has_data(self):\n return (\n self.vfolder_data_tool.all_stat_data.exists()\n if self.vfolder_data_tool.show_all_to(self.user)\n else self.vfolder_data_tool.stat_data.exists())\n", "path": "pootle/apps/virtualfolder/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import forms\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.browser import get_table_headings\nfrom pootle.core.delegate import search_backend\nfrom pootle.core.exceptions import Http400\nfrom pootle.core.http import JsonResponse\nfrom pootle.core.url_helpers import get_path_parts, split_pootle_path\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_misc.util import ajax_required\nfrom pootle_store.forms import UnitSearchForm\nfrom pootle_store.unit.results import GroupedResults\nfrom pootle_translationproject.views import TPTranslateView\n\nfrom .delegate import vfolders_data_tool\nfrom .models import VirtualFolder\n\n\ndef make_vfolder_dict(context, vf, stats):\n lang_code, proj_code = split_pootle_path(context.pootle_path)[:2]\n base_url = reverse(\n \"pootle-vfolder-tp-translate\",\n kwargs=dict(\n vfolder_name=vf,\n language_code=lang_code,\n project_code=proj_code))\n return {\n 'href_translate': base_url,\n 'title': stats[\"title\"],\n 'code': vf,\n 'priority': stats.get(\"priority\"),\n 'is_grayed': not stats[\"isVisible\"],\n 'stats': stats,\n 'icon': 'vfolder'}\n\n\nclass VFolderTPTranslateView(TPTranslateView):\n display_vfolder_priority = False\n\n @cached_property\n def check_data(self):\n return self.vfolders_data_view.vfolder_data_tool.get_checks(\n user=self.request.user).get(self.vfolder_pk, {})\n\n @cached_property\n def vfolder(self):\n return VirtualFolder.objects.get(name=self.kwargs[\"vfolder_name\"])\n\n @property\n def vfolder_pk(self):\n return self.vfolder.pk\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(\n VFolderTPTranslateView,\n self).get_context_data(*args, **kwargs)\n ctx[\"unit_api_root\"] = reverse(\n \"vfolder-pootle-xhr-units\",\n kwargs=dict(vfolder_name=self.vfolder.name))\n ctx[\"resource_path\"] = (\n \"/\".join(\n [\"++vfolder\",\n self.vfolder.name,\n self.object.pootle_path.replace(self.ctx_path, \"\")]))\n ctx[\"resource_path_parts\"] = get_path_parts(ctx[\"resource_path\"])\n return ctx\n\n\n@ajax_required\ndef get_vfolder_units(request, **kwargs):\n \"\"\"Gets source and target texts and its metadata.\n\n :return: A JSON-encoded string containing the source and target texts\n grouped by the store they belong to.\n\n The optional `count` GET parameter defines the chunk size to\n consider. The user's preference will be used by default.\n\n When the `initial` GET parameter is present, a sorted list of\n the result set ids will be returned too.\n \"\"\"\n search_form = UnitSearchForm(request.GET, user=request.user)\n\n vfolder = get_object_or_404(\n VirtualFolder,\n name=kwargs.get(\"vfolder_name\"))\n\n if not search_form.is_valid():\n errors = search_form.errors.as_data()\n if \"path\" in errors:\n for error in errors[\"path\"]:\n if error.code == \"max_length\":\n raise Http400(_('Path too long.'))\n elif error.code == \"required\":\n raise Http400(_('Arguments missing.'))\n raise Http404(forms.ValidationError(search_form.errors).messages)\n\n search_form.cleaned_data[\"vfolder\"] = vfolder\n backend = search_backend.get(VirtualFolder)(\n request.user, **search_form.cleaned_data)\n total, start, end, units_qs = backend.search()\n return JsonResponse(\n {'start': start,\n 'end': end,\n 'total': total,\n 'unitGroups': GroupedResults(units_qs).data})\n\n\nclass VFoldersDataView(object):\n\n _table_fields = (\n 'name', 'progress', 'activity',\n 'total', 'need-translation',\n 'suggestions', 'critical', 'priority')\n\n def __init__(self, context, user, has_admin_access=False):\n self.context = context\n self.user = user\n self.has_admin_access = has_admin_access\n\n @property\n def vfolder_data_tool(self):\n return vfolders_data_tool.get(self.context.__class__)(self.context)\n\n @property\n def table_fields(self):\n fields = self._table_fields\n if self.has_admin_access:\n fields += ('last-updated', )\n return fields\n\n @cached_property\n def table_data(self):\n ctx = {}\n if len(self.all_stats) > 0:\n ctx.update({\n 'children': {\n 'id': 'vfolders',\n 'fields': self.table_fields,\n 'headings': get_table_headings(self.table_fields),\n 'rows': self.table_items}})\n return ctx\n\n @cached_property\n def all_stats(self):\n return self.vfolder_data_tool.get_stats(user=self.user)\n\n @cached_property\n def stats(self):\n return dict(children=self.all_stats)\n\n @property\n def table_items(self):\n return [\n make_vfolder_dict(self.context, *vf)\n for vf\n in self.all_stats.items()]\n\n @cached_property\n def has_data(self):\n return (\n self.vfolder_data_tool.all_stat_data.exists()\n if self.vfolder_data_tool.show_all_to(self.user)\n else self.vfolder_data_tool.stat_data.exists())\n", "path": "pootle/apps/virtualfolder/views.py"}]}
| 2,000 | 131 |
gh_patches_debug_1413
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-1314
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
reset.css doesn't load sometimes
@clone1018 saw this when we first started caching static assets. It's why I turned off static caching initially. Now static caching is back with #1245 and indeed we're seeing this again. :(

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/cache_static.py`
Content:
```
1 """
2 Handles caching of static resources.
3 """
4 import os
5 from calendar import timegm
6 from email.utils import parsedate
7 from wsgiref.handlers import format_date_time
8
9 from aspen import Response
10
11
12 def version_is_available(request):
13 """Return a boolean, whether we have the version they asked for.
14 """
15 path = request.line.uri.path
16 version = request.website.version
17 return path['version'] == version if 'version' in path else True
18
19
20 def version_is_dash(request):
21 """Return a boolean, whether the version they asked for is -.
22 """
23 return request.line.uri.path.get('version') == '-'
24
25
26 def get_last_modified(fs_path):
27 """Get the last modified time, as int, of the file pointed to by fs_path.
28 """
29 return int(os.path.getctime(fs_path))
30
31
32 def inbound(request):
33 """Try to serve a 304 for resources under assets/.
34 """
35 uri = request.line.uri
36
37 if not uri.startswith('/assets/'):
38
39 # Only apply to the assets/ directory.
40
41 return request
42
43 if version_is_dash(request):
44
45 # Special-case a version of '-' to never 304/404 here.
46
47 return request
48
49 if not version_is_available(request):
50
51 # Don't serve one version of a file as if it were another.
52
53 raise Response(404)
54
55 ims = request.headers.get('If-Modified-Since')
56 if not ims:
57
58 # This client doesn't care about when the file was modified.
59
60 return request
61
62 if request.fs.endswith('.spt'):
63
64 # This is a requests for a dynamic resource. Perhaps in the future
65 # we'll delegate to such resources to compute a sensible Last-Modified
66 # or E-Tag, but for now we punt. This is okay, because we expect to
67 # put our dynamic assets behind a CDN in production.
68
69 return request
70
71
72 try:
73 ims = timegm(parsedate(ims))
74 except:
75
76 # Malformed If-Modified-Since header. Proceed with the request.
77
78 return request
79
80 last_modified = get_last_modified(request.fs)
81 if ims < last_modified:
82
83 # The file has been modified since. Serve the whole thing.
84
85 return request
86
87
88 # Huzzah!
89 # =======
90 # We can serve a 304! :D
91
92 response = Response(304)
93 response.headers['Last-Modified'] = format_date_time(last_modified)
94 response.headers['Cache-Control'] = 'no-cache'
95 raise response
96
97
98 def outbound(response):
99 """Set caching headers for resources under assets/.
100 """
101 request = response.request
102 website = request.website
103 uri = request.line.uri
104
105 version = website.version
106 response.headers['X-Gittip-Version'] = version
107
108 if not uri.startswith('/assets/'):
109 return response
110
111 response.headers.cookie.clear()
112
113 if response.code == 304:
114 return response
115
116 if website.cache_static:
117
118 # https://developers.google.com/speed/docs/best-practices/caching
119 response.headers['Cache-Control'] = 'public'
120 response.headers['Vary'] = 'accept-encoding'
121
122 if 'version' in uri.path:
123 # This specific asset is versioned, so it's fine to cache it.
124 response.headers['Expires'] = 'Sun, 17 Jan 2038 19:14:07 GMT'
125 else:
126 # Asset is not versioned. Don't cache it, but set Last-Modified.
127 last_modified = get_last_modified(request.fs)
128 response.headers['Last-Modified'] = format_date_time(last_modified)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gittip/cache_static.py b/gittip/cache_static.py
--- a/gittip/cache_static.py
+++ b/gittip/cache_static.py
@@ -111,6 +111,10 @@
response.headers.cookie.clear()
if response.code == 304:
+
+ # https://github.com/gittip/www.gittip.com/issues/1308
+ del response.headers['Content-Type']
+
return response
if website.cache_static:
|
{"golden_diff": "diff --git a/gittip/cache_static.py b/gittip/cache_static.py\n--- a/gittip/cache_static.py\n+++ b/gittip/cache_static.py\n@@ -111,6 +111,10 @@\n response.headers.cookie.clear()\n \n if response.code == 304:\n+\n+ # https://github.com/gittip/www.gittip.com/issues/1308\n+ del response.headers['Content-Type']\n+\n return response\n \n if website.cache_static:\n", "issue": "reset.css doesn't load sometimes\n@clone1018 saw this when we first started caching static assets. It's why I turned off static caching initially. Now static caching is back with #1245 and indeed we're seeing this again. :(\n\n\n\n", "before_files": [{"content": "\"\"\"\nHandles caching of static resources.\n\"\"\"\nimport os\nfrom calendar import timegm\nfrom email.utils import parsedate\nfrom wsgiref.handlers import format_date_time\n\nfrom aspen import Response\n\n\ndef version_is_available(request):\n \"\"\"Return a boolean, whether we have the version they asked for.\n \"\"\"\n path = request.line.uri.path\n version = request.website.version\n return path['version'] == version if 'version' in path else True\n\n\ndef version_is_dash(request):\n \"\"\"Return a boolean, whether the version they asked for is -.\n \"\"\"\n return request.line.uri.path.get('version') == '-'\n\n\ndef get_last_modified(fs_path):\n \"\"\"Get the last modified time, as int, of the file pointed to by fs_path.\n \"\"\"\n return int(os.path.getctime(fs_path))\n\n\ndef inbound(request):\n \"\"\"Try to serve a 304 for resources under assets/.\n \"\"\"\n uri = request.line.uri\n\n if not uri.startswith('/assets/'):\n\n # Only apply to the assets/ directory.\n\n return request\n\n if version_is_dash(request):\n\n # Special-case a version of '-' to never 304/404 here.\n\n return request\n\n if not version_is_available(request):\n\n # Don't serve one version of a file as if it were another.\n\n raise Response(404)\n\n ims = request.headers.get('If-Modified-Since')\n if not ims:\n\n # This client doesn't care about when the file was modified.\n\n return request\n\n if request.fs.endswith('.spt'):\n\n # This is a requests for a dynamic resource. Perhaps in the future\n # we'll delegate to such resources to compute a sensible Last-Modified\n # or E-Tag, but for now we punt. This is okay, because we expect to\n # put our dynamic assets behind a CDN in production.\n\n return request\n\n\n try:\n ims = timegm(parsedate(ims))\n except:\n\n # Malformed If-Modified-Since header. Proceed with the request.\n\n return request\n\n last_modified = get_last_modified(request.fs)\n if ims < last_modified:\n\n # The file has been modified since. Serve the whole thing.\n\n return request\n\n\n # Huzzah!\n # =======\n # We can serve a 304! :D\n\n response = Response(304)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n response.headers['Cache-Control'] = 'no-cache'\n raise response\n\n\ndef outbound(response):\n \"\"\"Set caching headers for resources under assets/.\n \"\"\"\n request = response.request\n website = request.website\n uri = request.line.uri\n\n version = website.version\n response.headers['X-Gittip-Version'] = version\n\n if not uri.startswith('/assets/'):\n return response\n\n response.headers.cookie.clear()\n\n if response.code == 304:\n return response\n\n if website.cache_static:\n\n # https://developers.google.com/speed/docs/best-practices/caching\n response.headers['Cache-Control'] = 'public'\n response.headers['Vary'] = 'accept-encoding'\n\n if 'version' in uri.path:\n # This specific asset is versioned, so it's fine to cache it.\n response.headers['Expires'] = 'Sun, 17 Jan 2038 19:14:07 GMT'\n else:\n # Asset is not versioned. Don't cache it, but set Last-Modified.\n last_modified = get_last_modified(request.fs)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n", "path": "gittip/cache_static.py"}], "after_files": [{"content": "\"\"\"\nHandles caching of static resources.\n\"\"\"\nimport os\nfrom calendar import timegm\nfrom email.utils import parsedate\nfrom wsgiref.handlers import format_date_time\n\nfrom aspen import Response\n\n\ndef version_is_available(request):\n \"\"\"Return a boolean, whether we have the version they asked for.\n \"\"\"\n path = request.line.uri.path\n version = request.website.version\n return path['version'] == version if 'version' in path else True\n\n\ndef version_is_dash(request):\n \"\"\"Return a boolean, whether the version they asked for is -.\n \"\"\"\n return request.line.uri.path.get('version') == '-'\n\n\ndef get_last_modified(fs_path):\n \"\"\"Get the last modified time, as int, of the file pointed to by fs_path.\n \"\"\"\n return int(os.path.getctime(fs_path))\n\n\ndef inbound(request):\n \"\"\"Try to serve a 304 for resources under assets/.\n \"\"\"\n uri = request.line.uri\n\n if not uri.startswith('/assets/'):\n\n # Only apply to the assets/ directory.\n\n return request\n\n if version_is_dash(request):\n\n # Special-case a version of '-' to never 304/404 here.\n\n return request\n\n if not version_is_available(request):\n\n # Don't serve one version of a file as if it were another.\n\n raise Response(404)\n\n ims = request.headers.get('If-Modified-Since')\n if not ims:\n\n # This client doesn't care about when the file was modified.\n\n return request\n\n if request.fs.endswith('.spt'):\n\n # This is a requests for a dynamic resource. Perhaps in the future\n # we'll delegate to such resources to compute a sensible Last-Modified\n # or E-Tag, but for now we punt. This is okay, because we expect to\n # put our dynamic assets behind a CDN in production.\n\n return request\n\n\n try:\n ims = timegm(parsedate(ims))\n except:\n\n # Malformed If-Modified-Since header. Proceed with the request.\n\n return request\n\n last_modified = get_last_modified(request.fs)\n if ims < last_modified:\n\n # The file has been modified since. Serve the whole thing.\n\n return request\n\n\n # Huzzah!\n # =======\n # We can serve a 304! :D\n\n response = Response(304)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n response.headers['Cache-Control'] = 'no-cache'\n raise response\n\n\ndef outbound(response):\n \"\"\"Set caching headers for resources under assets/.\n \"\"\"\n request = response.request\n website = request.website\n uri = request.line.uri\n\n version = website.version\n response.headers['X-Gittip-Version'] = version\n\n if not uri.startswith('/assets/'):\n return response\n\n response.headers.cookie.clear()\n\n if response.code == 304:\n\n # https://github.com/gittip/www.gittip.com/issues/1308\n del response.headers['Content-Type']\n\n return response\n\n if website.cache_static:\n\n # https://developers.google.com/speed/docs/best-practices/caching\n response.headers['Cache-Control'] = 'public'\n response.headers['Vary'] = 'accept-encoding'\n\n if 'version' in uri.path:\n # This specific asset is versioned, so it's fine to cache it.\n response.headers['Expires'] = 'Sun, 17 Jan 2038 19:14:07 GMT'\n else:\n # Asset is not versioned. Don't cache it, but set Last-Modified.\n last_modified = get_last_modified(request.fs)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n", "path": "gittip/cache_static.py"}]}
| 1,486 | 111 |
gh_patches_debug_28903
|
rasdani/github-patches
|
git_diff
|
crytic__slither-252
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect source mappings because of certain (Unicode?) characters in comments
Certain characters (or scripts) in Solidity comments appear to cause incorrect source mappings.
For example, in `0x06012c8cf97bead5deae237070f9587f8e7a266d_KittyCore.sol`, the symbol that looks like underscore in "email_protected":
```
/// @author Dieter Shirley <<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="6004051405200118090f0d1a050e4e030f">[email_protected]</a>> (https://github.com/dete)
```
Similarly, the Asian characters in below comments from `0x5d0d76787d9d564061dd23f8209f804a3b8ad2f2_FoMo3Dlong.sol` also cause source mapping problems:
```
struct Round {
uint256 plyr; // pID of player in lead, lead领导吗?
uint256 team; // tID of team in lead
uint256 end; // time ends/ended
bool ended; // has round end function been ran 这个开关值得研究下
uint256 strt; // time round started
uint256 keys; // keys
uint256 eth; // total eth in
uint256 pot; // eth to pot (during round) / final amount paid to winner (after round ends)
uint256 mask; // global mask
uint256 ico; // total eth sent in during ICO phase
uint256 icoGen; // total eth for gen during ICO phase
uint256 icoAvg; // average key price for ICO phase
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/core/source_mapping/source_mapping.py`
Content:
```
1 import re
2 import os
3 from slither.core.context.context import Context
4
5 class SourceMapping(Context):
6
7 def __init__(self):
8 super(SourceMapping, self).__init__()
9 self._source_mapping = None
10
11 @property
12 def source_mapping(self):
13 return self._source_mapping
14
15 @staticmethod
16 def _compute_line(source_code, start, length):
17 """
18 Compute line(s) numbers and starting/ending columns
19 from a start/end offset. All numbers start from 1.
20
21 Not done in an efficient way
22 """
23 total_length = len(source_code)
24 source_code = source_code.splitlines(True)
25 counter = 0
26 i = 0
27 lines = []
28 starting_column = None
29 ending_column = None
30 while counter < total_length:
31 # Determine the length of the line, and advance the line number
32 lineLength = len(source_code[i])
33 i = i + 1
34
35 # Determine our column numbers.
36 if starting_column is None and counter + lineLength > start:
37 starting_column = (start - counter) + 1
38 if starting_column is not None and ending_column is None and counter + lineLength > start + length:
39 ending_column = ((start + length) - counter) + 1
40
41 # Advance the current position counter, and determine line numbers.
42 counter += lineLength
43 if counter > start:
44 lines.append(i)
45
46 # If our advanced position for the next line is out of range, stop.
47 if counter > start + length:
48 break
49
50 return (lines, starting_column, ending_column)
51
52 @staticmethod
53 def _convert_source_mapping(offset, slither):
54 '''
55 Convert a text offset to a real offset
56 see https://solidity.readthedocs.io/en/develop/miscellaneous.html#source-mappings
57 Returns:
58 (dict): {'start':0, 'length':0, 'filename': 'file.sol'}
59 '''
60 sourceUnits = slither.source_units
61
62 position = re.findall('([0-9]*):([0-9]*):([-]?[0-9]*)', offset)
63 if len(position) != 1:
64 return {}
65
66 s, l, f = position[0]
67 s = int(s)
68 l = int(l)
69 f = int(f)
70
71 if f not in sourceUnits:
72 return {'start':s, 'length':l}
73 filename_used = sourceUnits[f]
74 filename_absolute = None
75 filename_relative = None
76 filename_short = None
77
78 lines = []
79
80 # If possible, convert the filename to its absolute/relative version
81 if slither.crytic_compile:
82 filenames = slither.crytic_compile.filename_lookup(filename_used)
83 filename_absolute = filenames.absolute
84 filename_relative = filenames.relative
85 filename_short = filenames.short
86
87 if filename_absolute in slither.source_code:
88 filename = filename_absolute
89 elif filename_relative in slither.source_code:
90 filename = filename_relative
91 elif filename_short in slither.source_code:
92 filename = filename_short
93 else:#
94 filename = filename_used.used
95 else:
96 filename = filename_used
97
98 if filename in slither.source_code:
99 source_code = slither.source_code[filename]
100 (lines, starting_column, ending_column) = SourceMapping._compute_line(source_code,
101 s,
102 l)
103 else:
104 (lines, starting_column, ending_column) = ([], None, None)
105
106
107 return {'start':s,
108 'length':l,
109 'filename_used': filename_used,
110 'filename_relative': filename_relative,
111 'filename_absolute': filename_absolute,
112 'filename_short': filename_short,
113 'lines' : lines,
114 'starting_column': starting_column,
115 'ending_column': ending_column
116 }
117
118 def set_offset(self, offset, slither):
119 if isinstance(offset, dict):
120 self._source_mapping = offset
121 else:
122 self._source_mapping = self._convert_source_mapping(offset, slither)
123
124
125 @property
126 def source_mapping_str(self):
127
128 lines = self.source_mapping.get('lines', None)
129 if not lines:
130 lines = ''
131 elif len(lines) == 1:
132 lines = '#{}'.format(lines[0])
133 else:
134 lines = '#{}-{}'.format(lines[0], lines[-1])
135 return '{}{}'.format(self.source_mapping['filename_short'], lines)
136
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/core/source_mapping/source_mapping.py b/slither/core/source_mapping/source_mapping.py
--- a/slither/core/source_mapping/source_mapping.py
+++ b/slither/core/source_mapping/source_mapping.py
@@ -20,6 +20,7 @@
Not done in an efficient way
"""
+ source_code = source_code.encode('utf-8')
total_length = len(source_code)
source_code = source_code.splitlines(True)
counter = 0
@@ -29,17 +30,18 @@
ending_column = None
while counter < total_length:
# Determine the length of the line, and advance the line number
- lineLength = len(source_code[i])
+ line_content = source_code[i]
+ line_length = len(line_content)
i = i + 1
# Determine our column numbers.
- if starting_column is None and counter + lineLength > start:
+ if starting_column is None and counter + line_length > start:
starting_column = (start - counter) + 1
- if starting_column is not None and ending_column is None and counter + lineLength > start + length:
+ if starting_column is not None and ending_column is None and counter + line_length > start + length:
ending_column = ((start + length) - counter) + 1
# Advance the current position counter, and determine line numbers.
- counter += lineLength
+ counter += line_length
if counter > start:
lines.append(i)
|
{"golden_diff": "diff --git a/slither/core/source_mapping/source_mapping.py b/slither/core/source_mapping/source_mapping.py\n--- a/slither/core/source_mapping/source_mapping.py\n+++ b/slither/core/source_mapping/source_mapping.py\n@@ -20,6 +20,7 @@\n \n Not done in an efficient way\n \"\"\"\n+ source_code = source_code.encode('utf-8')\n total_length = len(source_code)\n source_code = source_code.splitlines(True)\n counter = 0\n@@ -29,17 +30,18 @@\n ending_column = None\n while counter < total_length:\n # Determine the length of the line, and advance the line number\n- lineLength = len(source_code[i])\n+ line_content = source_code[i]\n+ line_length = len(line_content)\n i = i + 1\n \n # Determine our column numbers.\n- if starting_column is None and counter + lineLength > start:\n+ if starting_column is None and counter + line_length > start:\n starting_column = (start - counter) + 1\n- if starting_column is not None and ending_column is None and counter + lineLength > start + length:\n+ if starting_column is not None and ending_column is None and counter + line_length > start + length:\n ending_column = ((start + length) - counter) + 1\n \n # Advance the current position counter, and determine line numbers.\n- counter += lineLength\n+ counter += line_length\n if counter > start:\n lines.append(i)\n", "issue": "Incorrect source mappings because of certain (Unicode?) characters in comments\nCertain characters (or scripts) in Solidity comments appear to cause incorrect source mappings.\r\n\r\nFor example, in `0x06012c8cf97bead5deae237070f9587f8e7a266d_KittyCore.sol`, the symbol that looks like underscore in \"email_protected\":\r\n```\r\n/// @author Dieter Shirley <<a href=\"/cdn-cgi/l/email-protection\" class=\"__cf_email__\" data-cfemail=\"6004051405200118090f0d1a050e4e030f\">[email_protected]</a>> (https://github.com/dete) \r\n```\r\nSimilarly, the Asian characters in below comments from `0x5d0d76787d9d564061dd23f8209f804a3b8ad2f2_FoMo3Dlong.sol` also cause source mapping problems:\r\n\r\n```\r\nstruct Round {\r\n uint256 plyr; // pID of player in lead\uff0c lead\u9886\u5bfc\u5417\uff1f \r\n uint256 team; // tID of team in lead \r\n uint256 end; // time ends/ended \r\n bool ended; // has round end function been ran \u8fd9\u4e2a\u5f00\u5173\u503c\u5f97\u7814\u7a76\u4e0b \r\n\tuint256 strt; // time round started \r\n\tuint256 keys; // keys \r\n\tuint256 eth; // total eth in \r\n\tuint256 pot; // eth to pot (during round) / final amount paid to winner (after round ends) \r\n uint256 mask; // global mask \r\n uint256 ico; // total eth sent in during ICO phase \r\n uint256 icoGen; // total eth for gen during ICO phase \r\n uint256 icoAvg; // average key price for ICO phase \r\n }\r\n```\n", "before_files": [{"content": "import re\nimport os\nfrom slither.core.context.context import Context\n\nclass SourceMapping(Context):\n\n def __init__(self):\n super(SourceMapping, self).__init__()\n self._source_mapping = None\n\n @property\n def source_mapping(self):\n return self._source_mapping\n\n @staticmethod\n def _compute_line(source_code, start, length):\n \"\"\"\n Compute line(s) numbers and starting/ending columns\n from a start/end offset. All numbers start from 1.\n\n Not done in an efficient way\n \"\"\"\n total_length = len(source_code)\n source_code = source_code.splitlines(True)\n counter = 0\n i = 0\n lines = []\n starting_column = None\n ending_column = None\n while counter < total_length:\n # Determine the length of the line, and advance the line number\n lineLength = len(source_code[i])\n i = i + 1\n\n # Determine our column numbers.\n if starting_column is None and counter + lineLength > start:\n starting_column = (start - counter) + 1\n if starting_column is not None and ending_column is None and counter + lineLength > start + length:\n ending_column = ((start + length) - counter) + 1\n\n # Advance the current position counter, and determine line numbers.\n counter += lineLength\n if counter > start:\n lines.append(i)\n\n # If our advanced position for the next line is out of range, stop.\n if counter > start + length:\n break\n\n return (lines, starting_column, ending_column)\n\n @staticmethod\n def _convert_source_mapping(offset, slither):\n '''\n Convert a text offset to a real offset\n see https://solidity.readthedocs.io/en/develop/miscellaneous.html#source-mappings\n Returns:\n (dict): {'start':0, 'length':0, 'filename': 'file.sol'}\n '''\n sourceUnits = slither.source_units\n\n position = re.findall('([0-9]*):([0-9]*):([-]?[0-9]*)', offset)\n if len(position) != 1:\n return {}\n\n s, l, f = position[0]\n s = int(s)\n l = int(l)\n f = int(f)\n\n if f not in sourceUnits:\n return {'start':s, 'length':l}\n filename_used = sourceUnits[f]\n filename_absolute = None\n filename_relative = None\n filename_short = None\n\n lines = []\n\n # If possible, convert the filename to its absolute/relative version\n if slither.crytic_compile:\n filenames = slither.crytic_compile.filename_lookup(filename_used)\n filename_absolute = filenames.absolute\n filename_relative = filenames.relative\n filename_short = filenames.short\n\n if filename_absolute in slither.source_code:\n filename = filename_absolute\n elif filename_relative in slither.source_code:\n filename = filename_relative\n elif filename_short in slither.source_code:\n filename = filename_short\n else:#\n filename = filename_used.used\n else:\n filename = filename_used\n\n if filename in slither.source_code:\n source_code = slither.source_code[filename]\n (lines, starting_column, ending_column) = SourceMapping._compute_line(source_code,\n s,\n l)\n else:\n (lines, starting_column, ending_column) = ([], None, None)\n\n\n return {'start':s,\n 'length':l,\n 'filename_used': filename_used,\n 'filename_relative': filename_relative,\n 'filename_absolute': filename_absolute,\n 'filename_short': filename_short,\n 'lines' : lines,\n 'starting_column': starting_column,\n 'ending_column': ending_column\n }\n\n def set_offset(self, offset, slither):\n if isinstance(offset, dict):\n self._source_mapping = offset\n else:\n self._source_mapping = self._convert_source_mapping(offset, slither)\n\n\n @property\n def source_mapping_str(self):\n\n lines = self.source_mapping.get('lines', None)\n if not lines:\n lines = ''\n elif len(lines) == 1:\n lines = '#{}'.format(lines[0])\n else:\n lines = '#{}-{}'.format(lines[0], lines[-1])\n return '{}{}'.format(self.source_mapping['filename_short'], lines)\n\n", "path": "slither/core/source_mapping/source_mapping.py"}], "after_files": [{"content": "import re\nimport os\nfrom slither.core.context.context import Context\n\nclass SourceMapping(Context):\n\n def __init__(self):\n super(SourceMapping, self).__init__()\n self._source_mapping = None\n\n @property\n def source_mapping(self):\n return self._source_mapping\n\n @staticmethod\n def _compute_line(source_code, start, length):\n \"\"\"\n Compute line(s) numbers and starting/ending columns\n from a start/end offset. All numbers start from 1.\n\n Not done in an efficient way\n \"\"\"\n source_code = source_code.encode('utf-8')\n total_length = len(source_code)\n source_code = source_code.splitlines(True)\n counter = 0\n i = 0\n lines = []\n starting_column = None\n ending_column = None\n while counter < total_length:\n # Determine the length of the line, and advance the line number\n line_content = source_code[i]\n line_length = len(line_content)\n i = i + 1\n\n # Determine our column numbers.\n if starting_column is None and counter + line_length > start:\n starting_column = (start - counter) + 1\n if starting_column is not None and ending_column is None and counter + line_length > start + length:\n ending_column = ((start + length) - counter) + 1\n\n # Advance the current position counter, and determine line numbers.\n counter += line_length\n if counter > start:\n lines.append(i)\n\n # If our advanced position for the next line is out of range, stop.\n if counter > start + length:\n break\n\n return (lines, starting_column, ending_column)\n\n @staticmethod\n def _convert_source_mapping(offset, slither):\n '''\n Convert a text offset to a real offset\n see https://solidity.readthedocs.io/en/develop/miscellaneous.html#source-mappings\n Returns:\n (dict): {'start':0, 'length':0, 'filename': 'file.sol'}\n '''\n sourceUnits = slither.source_units\n\n position = re.findall('([0-9]*):([0-9]*):([-]?[0-9]*)', offset)\n if len(position) != 1:\n return {}\n\n s, l, f = position[0]\n s = int(s)\n l = int(l)\n f = int(f)\n\n if f not in sourceUnits:\n return {'start':s, 'length':l}\n filename_used = sourceUnits[f]\n filename_absolute = None\n filename_relative = None\n filename_short = None\n\n lines = []\n\n # If possible, convert the filename to its absolute/relative version\n if slither.crytic_compile:\n filenames = slither.crytic_compile.filename_lookup(filename_used)\n filename_absolute = filenames.absolute\n filename_relative = filenames.relative\n filename_short = filenames.short\n\n if filename_absolute in slither.source_code:\n filename = filename_absolute\n elif filename_relative in slither.source_code:\n filename = filename_relative\n elif filename_short in slither.source_code:\n filename = filename_short\n else:#\n filename = filename_used.used\n else:\n filename = filename_used\n\n if filename in slither.source_code:\n source_code = slither.source_code[filename]\n (lines, starting_column, ending_column) = SourceMapping._compute_line(source_code,\n s,\n l)\n else:\n (lines, starting_column, ending_column) = ([], None, None)\n\n\n return {'start':s,\n 'length':l,\n 'filename_used': filename_used,\n 'filename_relative': filename_relative,\n 'filename_absolute': filename_absolute,\n 'filename_short': filename_short,\n 'lines' : lines,\n 'starting_column': starting_column,\n 'ending_column': ending_column\n }\n\n def set_offset(self, offset, slither):\n if isinstance(offset, dict):\n self._source_mapping = offset\n else:\n self._source_mapping = self._convert_source_mapping(offset, slither)\n\n\n @property\n def source_mapping_str(self):\n\n lines = self.source_mapping.get('lines', None)\n if not lines:\n lines = ''\n elif len(lines) == 1:\n lines = '#{}'.format(lines[0])\n else:\n lines = '#{}-{}'.format(lines[0], lines[-1])\n return '{}{}'.format(self.source_mapping['filename_short'], lines)\n\n", "path": "slither/core/source_mapping/source_mapping.py"}]}
| 1,986 | 332 |
gh_patches_debug_30152
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-9973
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setting WAGTAILIMAGES_RENDITION_STORAGE generates a migration in wagtailimages
### Issue Summary
Running `./manage.py makemigrations` while WAGTAILIMAGES_RENDITION_STORAGE is set to something other than the default storage causes a migration to be generated within the wagtailimages app
### Steps to Reproduce
1. (for example) Start a new project with `wagtail start myproject`
2. Run `./manage.py migrate` and `./manage.py makemigrations`; this outputs "No changes detected"
3. `pip install django-storages`
4. Add the line `WAGTAILIMAGES_RENDITION_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"` to myproject/settings/base.py
5. Run `./manage.py makemigrations`; this generates a migration `wagtail/images/migrations/0026_alter_rendition_file.py` that adds a `storage` argument to the Rendition.file field.
- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes
### Technical details
- Python version: 3.8.0
- Django version: 4.1.3
- Wagtail version: main (4.2a0, 4b770784ca68f22d5ea58ecbd01e5c8c13882a3d)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py`
Content:
```
1 # Generated by Django 4.0.7 on 2022-08-10 16:26
2
3 from django.db import migrations
4 import wagtail.images.models
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 ("wagtailimages", "0024_index_image_file_hash"),
11 ]
12
13 operations = [
14 migrations.AlterField(
15 model_name="image",
16 name="file",
17 field=wagtail.images.models.WagtailImageField(
18 height_field="height",
19 upload_to=wagtail.images.models.get_upload_to,
20 verbose_name="file",
21 width_field="width",
22 ),
23 ),
24 migrations.AlterField(
25 model_name="rendition",
26 name="file",
27 field=wagtail.images.models.WagtailImageField(
28 height_field="height",
29 upload_to=wagtail.images.models.get_rendition_upload_to,
30 width_field="width",
31 ),
32 ),
33 ]
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py b/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py
--- a/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py
+++ b/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py
@@ -1,5 +1,6 @@
# Generated by Django 4.0.7 on 2022-08-10 16:26
+from django import VERSION as DJANGO_VERSION
from django.db import migrations
import wagtail.images.models
@@ -10,6 +11,19 @@
("wagtailimages", "0024_index_image_file_hash"),
]
+ rendition_file_options = {
+ "height_field": "height",
+ "upload_to": wagtail.images.models.get_rendition_upload_to,
+ "width_field": "width",
+ }
+ # See https://code.djangoproject.com/ticket/34192 - prior to Django 4.2, a callable storage
+ # argument that returns default_storage would be incorrectly omitted from the deconstructed
+ # field. We need to match that behaviour and include/omit it accordingly to prevent
+ # makemigrations from seeing a difference and generating a spurious migration in
+ # wagtail.images.
+ if DJANGO_VERSION >= (4, 2):
+ rendition_file_options["storage"] = wagtail.images.models.get_rendition_storage
+
operations = [
migrations.AlterField(
model_name="image",
@@ -24,10 +38,6 @@
migrations.AlterField(
model_name="rendition",
name="file",
- field=wagtail.images.models.WagtailImageField(
- height_field="height",
- upload_to=wagtail.images.models.get_rendition_upload_to,
- width_field="width",
- ),
+ field=wagtail.images.models.WagtailImageField(**rendition_file_options),
),
]
|
{"golden_diff": "diff --git a/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py b/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py\n--- a/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py\n+++ b/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py\n@@ -1,5 +1,6 @@\n # Generated by Django 4.0.7 on 2022-08-10 16:26\r\n \r\n+from django import VERSION as DJANGO_VERSION\r\n from django.db import migrations\r\n import wagtail.images.models\r\n \r\n@@ -10,6 +11,19 @@\n (\"wagtailimages\", \"0024_index_image_file_hash\"),\r\n ]\r\n \r\n+ rendition_file_options = {\r\n+ \"height_field\": \"height\",\r\n+ \"upload_to\": wagtail.images.models.get_rendition_upload_to,\r\n+ \"width_field\": \"width\",\r\n+ }\r\n+ # See https://code.djangoproject.com/ticket/34192 - prior to Django 4.2, a callable storage\r\n+ # argument that returns default_storage would be incorrectly omitted from the deconstructed\r\n+ # field. We need to match that behaviour and include/omit it accordingly to prevent\r\n+ # makemigrations from seeing a difference and generating a spurious migration in\r\n+ # wagtail.images.\r\n+ if DJANGO_VERSION >= (4, 2):\r\n+ rendition_file_options[\"storage\"] = wagtail.images.models.get_rendition_storage\r\n+\r\n operations = [\r\n migrations.AlterField(\r\n model_name=\"image\",\r\n@@ -24,10 +38,6 @@\n migrations.AlterField(\r\n model_name=\"rendition\",\r\n name=\"file\",\r\n- field=wagtail.images.models.WagtailImageField(\r\n- height_field=\"height\",\r\n- upload_to=wagtail.images.models.get_rendition_upload_to,\r\n- width_field=\"width\",\r\n- ),\r\n+ field=wagtail.images.models.WagtailImageField(**rendition_file_options),\r\n ),\r\n ]\n", "issue": "Setting WAGTAILIMAGES_RENDITION_STORAGE generates a migration in wagtailimages\n### Issue Summary\r\n\r\nRunning `./manage.py makemigrations` while WAGTAILIMAGES_RENDITION_STORAGE is set to something other than the default storage causes a migration to be generated within the wagtailimages app\r\n\r\n### Steps to Reproduce\r\n\r\n1. (for example) Start a new project with `wagtail start myproject`\r\n2. Run `./manage.py migrate` and `./manage.py makemigrations`; this outputs \"No changes detected\"\r\n3. `pip install django-storages`\r\n4. Add the line `WAGTAILIMAGES_RENDITION_STORAGE = \"storages.backends.s3boto3.S3Boto3Storage\"` to myproject/settings/base.py\r\n5. Run `./manage.py makemigrations`; this generates a migration `wagtail/images/migrations/0026_alter_rendition_file.py` that adds a `storage` argument to the Rendition.file field.\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n### Technical details\r\n\r\n- Python version: 3.8.0\r\n- Django version: 4.1.3\r\n- Wagtail version: main (4.2a0, 4b770784ca68f22d5ea58ecbd01e5c8c13882a3d)\r\n\n", "before_files": [{"content": "# Generated by Django 4.0.7 on 2022-08-10 16:26\r\n\r\nfrom django.db import migrations\r\nimport wagtail.images.models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n (\"wagtailimages\", \"0024_index_image_file_hash\"),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name=\"image\",\r\n name=\"file\",\r\n field=wagtail.images.models.WagtailImageField(\r\n height_field=\"height\",\r\n upload_to=wagtail.images.models.get_upload_to,\r\n verbose_name=\"file\",\r\n width_field=\"width\",\r\n ),\r\n ),\r\n migrations.AlterField(\r\n model_name=\"rendition\",\r\n name=\"file\",\r\n field=wagtail.images.models.WagtailImageField(\r\n height_field=\"height\",\r\n upload_to=wagtail.images.models.get_rendition_upload_to,\r\n width_field=\"width\",\r\n ),\r\n ),\r\n ]\r\n", "path": "wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py"}], "after_files": [{"content": "# Generated by Django 4.0.7 on 2022-08-10 16:26\r\n\r\nfrom django import VERSION as DJANGO_VERSION\r\nfrom django.db import migrations\r\nimport wagtail.images.models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n (\"wagtailimages\", \"0024_index_image_file_hash\"),\r\n ]\r\n\r\n rendition_file_options = {\r\n \"height_field\": \"height\",\r\n \"upload_to\": wagtail.images.models.get_rendition_upload_to,\r\n \"width_field\": \"width\",\r\n }\r\n # See https://code.djangoproject.com/ticket/34192 - prior to Django 4.2, a callable storage\r\n # argument that returns default_storage would be incorrectly omitted from the deconstructed\r\n # field. We need to match that behaviour and include/omit it accordingly to prevent\r\n # makemigrations from seeing a difference and generating a spurious migration in\r\n # wagtail.images.\r\n if DJANGO_VERSION >= (4, 2):\r\n rendition_file_options[\"storage\"] = wagtail.images.models.get_rendition_storage\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name=\"image\",\r\n name=\"file\",\r\n field=wagtail.images.models.WagtailImageField(\r\n height_field=\"height\",\r\n upload_to=wagtail.images.models.get_upload_to,\r\n verbose_name=\"file\",\r\n width_field=\"width\",\r\n ),\r\n ),\r\n migrations.AlterField(\r\n model_name=\"rendition\",\r\n name=\"file\",\r\n field=wagtail.images.models.WagtailImageField(**rendition_file_options),\r\n ),\r\n ]\r\n", "path": "wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py"}]}
| 857 | 482 |
gh_patches_debug_30208
|
rasdani/github-patches
|
git_diff
|
microsoft__DeepSpeed-3348
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Size of saved model checkpoint becomes much larger after deepspeed.initialize when using ZeRO-2
**Describe the bug**
Originally reported [here](https://github.com/huggingface/transformers/issues/22822). @stas00 @tjruwase
For some models, the size of model checkpoints saved by `model.save_prtrained()` becomes much larger after calling `deepspeed.initialize`. See examples below.
**To Reproduce**
```python
from transformers import AutoModelForCausalLM
import deepspeed
ds_config = {
"optimizer": {
"type": "AdamW",
},
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "cpu",
"pin_memory": True
},
"allgather_partitions": True,
"allgather_bucket_size": 2e8,
"overlap_comm": True,
"reduce_scatter": True,
"reduce_bucket_size": 2e8,
"contiguous_gradients": True
},
"offload_optimizer": {
"device": "cpu",
"pin_memory": True
},
"train_batch_size": 1,
"train_micro_batch_size_per_gpu": 1
}
model = AutoModelForCausalLM.from_pretrained("decapoda-research/llama-7b-hf")
model.save_pretrained("before")
deepspeed_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config)
deepspeed_engine.module.save_pretrained("after")
```
File sizes:
```bash
du -a -h --max-depth=1 before/
512 before/config.json
32K before/pytorch_model.bin.index.json
9.2G before/pytorch_model-00001-of-00003.bin
9.3G before/pytorch_model-00002-of-00003.bin
6.7G before/pytorch_model-00003-of-00003.bin
512 before/generation_config.json
26G before/
du -a -h --max-depth=1 after/
512 after/config.json
32K after/pytorch_model.bin.index.json
26G after/pytorch_model-00001-of-00003.bin
26G after/pytorch_model-00002-of-00003.bin
26G after/pytorch_model-00003-of-00003.bin
512 after/generation_config.json
76G after/
```
This issue is not always occurred, for example, `gpt2` does not have this problem. But I tested `decapoda-research/llama-7b-hf`, and `decapoda-research/llama-13b-hf` have this issue.
This can be fixed by re-clone states before the saving:
```python
state_dict = deepspeed_engine.module.state_dict()
state_dict = type(state_dict)(
{k: v.clone()
for k,
v in state_dict.items()})
deepspeed_engine.module.save_pretrained("after_fixed", state_dict=state_dict)
```
**Expected behavior**
The saved model size should be unchanged after `deepspeed.initialize`
**System info (please complete the following information):**
- deepspeed: 0.8.3
- transformers version: 4.28.0.dev0
- Platform: Linux-4.18.0-372.32.1.el8_6.x86_64-x86_64-with-glibc2.17
- Python version: 3.8.16
- Huggingface_hub version: 0.13.3
- Safetensors version: not installed
- PyTorch version (GPU?): 1.12.1+cu116 (True)
- Tensorflow version (GPU?): not installed (NA)
- Flax version (CPU?/GPU?/TPU?): not installed (NA)
- Jax version: not installed
- JaxLib version: not installed
- Using GPU in script?: yes
- Using distributed or parallel set-up in script?: yes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepspeed/checkpoint/utils.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 import os
7 from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)
8
9
10 def get_model_ckpt_name_for_rank(base_folder, mp_rank_str):
11 ckpt_name = os.path.join(
12 base_folder,
13 MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX,
14 )
15 return ckpt_name
16
17
18 def get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank):
19 zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}'
20 mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}'
21 zero_ckpt_name = os.path.join(
22 base_folder,
23 zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX,
24 )
25 return zero_ckpt_name
26
27
28 def get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank):
29 ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'
30 ckpt_path = os.path.join(base_folder, ckpt_file)
31 return ckpt_path
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deepspeed/checkpoint/utils.py b/deepspeed/checkpoint/utils.py
--- a/deepspeed/checkpoint/utils.py
+++ b/deepspeed/checkpoint/utils.py
@@ -4,6 +4,7 @@
# DeepSpeed Team
import os
+import torch
from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)
@@ -29,3 +30,33 @@
ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'
ckpt_path = os.path.join(base_folder, ckpt_file)
return ckpt_path
+
+
+# We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save()
+# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
+# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
+# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
+# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
+# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
+def clone_tensors_for_torch_save(item, device=torch.device('cpu')):
+ """
+ Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device.
+ Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
+
+ Parameters:
+ - ``item``: tensor to clone or (possibly nested) container of tensors to clone.
+ - ``device``: target device (defaults to 'cpu')
+
+ Returns:
+ - copy of ``item`` with cloned tensors on target device
+ """
+ if torch.is_tensor(item):
+ return item.detach().clone().to(device)
+ elif isinstance(item, list):
+ return [clone_tensors_for_torch_save(v, device) for v in item]
+ elif isinstance(item, tuple):
+ return tuple([clone_tensors_for_torch_save(v, device) for v in item])
+ elif isinstance(item, dict):
+ return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()})
+ else:
+ return item
|
{"golden_diff": "diff --git a/deepspeed/checkpoint/utils.py b/deepspeed/checkpoint/utils.py\n--- a/deepspeed/checkpoint/utils.py\n+++ b/deepspeed/checkpoint/utils.py\n@@ -4,6 +4,7 @@\n # DeepSpeed Team\n \n import os\n+import torch\n from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)\n \n \n@@ -29,3 +30,33 @@\n ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'\n ckpt_path = os.path.join(base_folder, ckpt_file)\n return ckpt_path\n+\n+\n+# We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save()\n+# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.\n+# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.\n+# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.\n+# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.\n+# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing\n+def clone_tensors_for_torch_save(item, device=torch.device('cpu')):\n+ \"\"\"\n+ Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device.\n+ Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.\n+\n+ Parameters:\n+ - ``item``: tensor to clone or (possibly nested) container of tensors to clone.\n+ - ``device``: target device (defaults to 'cpu')\n+\n+ Returns:\n+ - copy of ``item`` with cloned tensors on target device\n+ \"\"\"\n+ if torch.is_tensor(item):\n+ return item.detach().clone().to(device)\n+ elif isinstance(item, list):\n+ return [clone_tensors_for_torch_save(v, device) for v in item]\n+ elif isinstance(item, tuple):\n+ return tuple([clone_tensors_for_torch_save(v, device) for v in item])\n+ elif isinstance(item, dict):\n+ return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()})\n+ else:\n+ return item\n", "issue": "[BUG] Size of saved model checkpoint becomes much larger after deepspeed.initialize when using ZeRO-2\n**Describe the bug**\r\nOriginally reported [here](https://github.com/huggingface/transformers/issues/22822). @stas00 @tjruwase\r\n\r\nFor some models, the size of model checkpoints saved by `model.save_prtrained()` becomes much larger after calling `deepspeed.initialize`. See examples below.\r\n\r\n\r\n**To Reproduce**\r\n```python\r\nfrom transformers import AutoModelForCausalLM\r\nimport deepspeed\r\n\r\nds_config = {\r\n \"optimizer\": {\r\n \"type\": \"AdamW\",\r\n },\r\n \"zero_optimization\": {\r\n \"stage\": 2,\r\n \"offload_optimizer\": {\r\n \"device\": \"cpu\",\r\n \"pin_memory\": True\r\n },\r\n \"allgather_partitions\": True,\r\n \"allgather_bucket_size\": 2e8,\r\n \"overlap_comm\": True,\r\n \"reduce_scatter\": True,\r\n \"reduce_bucket_size\": 2e8,\r\n \"contiguous_gradients\": True\r\n },\r\n \"offload_optimizer\": {\r\n \"device\": \"cpu\",\r\n \"pin_memory\": True\r\n },\r\n \"train_batch_size\": 1,\r\n \"train_micro_batch_size_per_gpu\": 1\r\n}\r\n\r\nmodel = AutoModelForCausalLM.from_pretrained(\"decapoda-research/llama-7b-hf\")\r\nmodel.save_pretrained(\"before\")\r\ndeepspeed_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config)\r\ndeepspeed_engine.module.save_pretrained(\"after\")\r\n```\r\n\r\nFile sizes:\r\n\r\n```bash\r\ndu -a -h --max-depth=1 before/\r\n512 before/config.json\r\n32K before/pytorch_model.bin.index.json\r\n9.2G before/pytorch_model-00001-of-00003.bin\r\n9.3G before/pytorch_model-00002-of-00003.bin\r\n6.7G before/pytorch_model-00003-of-00003.bin\r\n512 before/generation_config.json\r\n26G before/\r\n\r\ndu -a -h --max-depth=1 after/\r\n512 after/config.json\r\n32K after/pytorch_model.bin.index.json\r\n26G after/pytorch_model-00001-of-00003.bin\r\n26G after/pytorch_model-00002-of-00003.bin\r\n26G after/pytorch_model-00003-of-00003.bin\r\n512 after/generation_config.json\r\n76G after/\r\n```\r\n\r\nThis issue is not always occurred, for example, `gpt2` does not have this problem. But I tested `decapoda-research/llama-7b-hf`, and `decapoda-research/llama-13b-hf` have this issue.\r\n\r\nThis can be fixed by re-clone states before the saving:\r\n```python\r\nstate_dict = deepspeed_engine.module.state_dict()\r\nstate_dict = type(state_dict)(\r\n {k: v.clone()\r\n for k,\r\n v in state_dict.items()})\r\ndeepspeed_engine.module.save_pretrained(\"after_fixed\", state_dict=state_dict)\r\n```\r\n\r\n**Expected behavior**\r\nThe saved model size should be unchanged after `deepspeed.initialize`\r\n\r\n**System info (please complete the following information):**\r\n- deepspeed: 0.8.3\r\n- transformers version: 4.28.0.dev0\r\n- Platform: Linux-4.18.0-372.32.1.el8_6.x86_64-x86_64-with-glibc2.17\r\n- Python version: 3.8.16\r\n- Huggingface_hub version: 0.13.3\r\n- Safetensors version: not installed\r\n- PyTorch version (GPU?): 1.12.1+cu116 (True)\r\n- Tensorflow version (GPU?): not installed (NA)\r\n- Flax version (CPU?/GPU?/TPU?): not installed (NA)\r\n- Jax version: not installed\r\n- JaxLib version: not installed\r\n- Using GPU in script?: yes\r\n- Using distributed or parallel set-up in script?: yes\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport os\nfrom .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)\n\n\ndef get_model_ckpt_name_for_rank(base_folder, mp_rank_str):\n ckpt_name = os.path.join(\n base_folder,\n MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX,\n )\n return ckpt_name\n\n\ndef get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank):\n zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}'\n mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}'\n zero_ckpt_name = os.path.join(\n base_folder,\n zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX,\n )\n return zero_ckpt_name\n\n\ndef get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank):\n ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'\n ckpt_path = os.path.join(base_folder, ckpt_file)\n return ckpt_path\n", "path": "deepspeed/checkpoint/utils.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport os\nimport torch\nfrom .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)\n\n\ndef get_model_ckpt_name_for_rank(base_folder, mp_rank_str):\n ckpt_name = os.path.join(\n base_folder,\n MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX,\n )\n return ckpt_name\n\n\ndef get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank):\n zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}'\n mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}'\n zero_ckpt_name = os.path.join(\n base_folder,\n zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX,\n )\n return zero_ckpt_name\n\n\ndef get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank):\n ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'\n ckpt_path = os.path.join(base_folder, ckpt_file)\n return ckpt_path\n\n\n# We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save()\n# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.\n# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.\n# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.\n# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.\n# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing\ndef clone_tensors_for_torch_save(item, device=torch.device('cpu')):\n \"\"\"\n Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device.\n Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.\n\n Parameters:\n - ``item``: tensor to clone or (possibly nested) container of tensors to clone.\n - ``device``: target device (defaults to 'cpu')\n\n Returns:\n - copy of ``item`` with cloned tensors on target device\n \"\"\"\n if torch.is_tensor(item):\n return item.detach().clone().to(device)\n elif isinstance(item, list):\n return [clone_tensors_for_torch_save(v, device) for v in item]\n elif isinstance(item, tuple):\n return tuple([clone_tensors_for_torch_save(v, device) for v in item])\n elif isinstance(item, dict):\n return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()})\n else:\n return item\n", "path": "deepspeed/checkpoint/utils.py"}]}
| 1,516 | 504 |
gh_patches_debug_7862
|
rasdani/github-patches
|
git_diff
|
coala__coala-bears-2136
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set setup.py url = http://coala.io/
difficulty/newcomer
Opened by @jayvdb at [Gitter](https://gitter.im/coala/coala?at=5a1181aff257ad9109b396a0)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import locale
4 import sys
5 from subprocess import call
6
7 import setuptools.command.build_py
8 from bears import Constants
9 from setuptools import find_packages, setup
10 from setuptools.command.test import test as TestCommand
11
12 try:
13 locale.getlocale()
14 except (ValueError, UnicodeError):
15 locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
16
17
18 class PyTestCommand(TestCommand):
19
20 def run_tests(self):
21 # import here, cause outside the eggs aren't loaded
22 import pytest
23 errno = pytest.main([])
24 sys.exit(errno)
25
26
27 class BuildDocsCommand(setuptools.command.build_py.build_py):
28 apidoc_command = ('sphinx-apidoc', '-f', '-o', 'docs/API',
29 'bears')
30 make_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')
31
32 def run(self):
33 err_no = call(self.apidoc_command)
34 if not err_no:
35 err_no = call(self.make_command)
36 sys.exit(err_no)
37
38
39 with open('requirements.txt') as requirements:
40 required = requirements.read().splitlines()
41 required.remove('-r bear-requirements.txt')
42
43 with open('bear-requirements.txt') as requirements:
44 bear_required = requirements.read().splitlines()
45
46 with open('test-requirements.txt') as requirements:
47 test_required = requirements.read().splitlines()
48
49 with open('ignore.txt') as ignore:
50 ignore_requirements = ignore.read().splitlines()
51
52 with open('README.rst') as readme:
53 long_description = readme.read()
54
55 extras_require = {
56 'alldeps': bear_required,
57 }
58
59 # For the average user we leave out some of the more complicated requirements,
60 # e.g. language-check (needs java).
61 required += [req for req in bear_required
62 if not any(req.startswith(ignore)
63 for ignore in ignore_requirements)]
64
65
66 if __name__ == '__main__':
67 setup(name='coala-bears',
68 version=Constants.VERSION,
69 description='Bears for coala (Code Analysis Application)',
70 author='The coala developers',
71 maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\xfcger',
72 maintainer_email=('[email protected], '
73 '[email protected], '
74 '[email protected]'),
75 url='http://coala.rtfd.org/',
76 platforms='any',
77 packages=find_packages(exclude=('build.*', 'tests', 'tests.*')),
78 install_requires=required,
79 extras_require=extras_require,
80 tests_require=test_required,
81 package_data={'bears': ['VERSION'],
82 'bears.java': ['checkstyle.jar', 'google_checks.xml'],
83 'bears.scala': ['scalastyle.jar',
84 'scalastyle_config.xml']},
85 license='AGPL-3.0',
86 long_description=long_description,
87 entry_points={'coalabears': ['coala_official_bears = bears']},
88 # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
89 classifiers=[
90 'Development Status :: 4 - Beta',
91
92 'Environment :: Plugins',
93 'Environment :: MacOS X',
94 'Environment :: Win32 (MS Windows)',
95 'Environment :: X11 Applications :: Gnome',
96
97 'Intended Audience :: Science/Research',
98 'Intended Audience :: Developers',
99
100 'License :: OSI Approved :: GNU Affero General Public License '
101 'v3 or later (AGPLv3+)',
102
103 'Operating System :: OS Independent',
104
105 'Programming Language :: Python :: Implementation :: CPython',
106 'Programming Language :: Python :: 3.4',
107 'Programming Language :: Python :: 3.5',
108 'Programming Language :: Python :: 3 :: Only',
109
110 'Topic :: Scientific/Engineering :: Information Analysis',
111 'Topic :: Software Development :: Quality Assurance',
112 'Topic :: Text Processing :: Linguistic'],
113 cmdclass={'docs': BuildDocsCommand,
114 'test': PyTestCommand})
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -72,7 +72,7 @@
maintainer_email=('[email protected], '
'[email protected], '
'[email protected]'),
- url='http://coala.rtfd.org/',
+ url='http://coala.io/',
platforms='any',
packages=find_packages(exclude=('build.*', 'tests', 'tests.*')),
install_requires=required,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,7 +72,7 @@\n maintainer_email=('[email protected], '\n '[email protected], '\n '[email protected]'),\n- url='http://coala.rtfd.org/',\n+ url='http://coala.io/',\n platforms='any',\n packages=find_packages(exclude=('build.*', 'tests', 'tests.*')),\n install_requires=required,\n", "issue": "Set setup.py url = http://coala.io/\ndifficulty/newcomer\nOpened by @jayvdb at [Gitter](https://gitter.im/coala/coala?at=5a1181aff257ad9109b396a0)\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport locale\nimport sys\nfrom subprocess import call\n\nimport setuptools.command.build_py\nfrom bears import Constants\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\ntry:\n locale.getlocale()\nexcept (ValueError, UnicodeError):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\nclass PyTestCommand(TestCommand):\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main([])\n sys.exit(errno)\n\n\nclass BuildDocsCommand(setuptools.command.build_py.build_py):\n apidoc_command = ('sphinx-apidoc', '-f', '-o', 'docs/API',\n 'bears')\n make_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')\n\n def run(self):\n err_no = call(self.apidoc_command)\n if not err_no:\n err_no = call(self.make_command)\n sys.exit(err_no)\n\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines()\n required.remove('-r bear-requirements.txt')\n\nwith open('bear-requirements.txt') as requirements:\n bear_required = requirements.read().splitlines()\n\nwith open('test-requirements.txt') as requirements:\n test_required = requirements.read().splitlines()\n\nwith open('ignore.txt') as ignore:\n ignore_requirements = ignore.read().splitlines()\n\nwith open('README.rst') as readme:\n long_description = readme.read()\n\nextras_require = {\n 'alldeps': bear_required,\n}\n\n# For the average user we leave out some of the more complicated requirements,\n# e.g. language-check (needs java).\nrequired += [req for req in bear_required\n if not any(req.startswith(ignore)\n for ignore in ignore_requirements)]\n\n\nif __name__ == '__main__':\n setup(name='coala-bears',\n version=Constants.VERSION,\n description='Bears for coala (Code Analysis Application)',\n author='The coala developers',\n maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\\xfcger',\n maintainer_email=('[email protected], '\n '[email protected], '\n '[email protected]'),\n url='http://coala.rtfd.org/',\n platforms='any',\n packages=find_packages(exclude=('build.*', 'tests', 'tests.*')),\n install_requires=required,\n extras_require=extras_require,\n tests_require=test_required,\n package_data={'bears': ['VERSION'],\n 'bears.java': ['checkstyle.jar', 'google_checks.xml'],\n 'bears.scala': ['scalastyle.jar',\n 'scalastyle_config.xml']},\n license='AGPL-3.0',\n long_description=long_description,\n entry_points={'coalabears': ['coala_official_bears = bears']},\n # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Environment :: Plugins',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Gnome',\n\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: GNU Affero General Public License '\n 'v3 or later (AGPLv3+)',\n\n 'Operating System :: OS Independent',\n\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Text Processing :: Linguistic'],\n cmdclass={'docs': BuildDocsCommand,\n 'test': PyTestCommand})\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport locale\nimport sys\nfrom subprocess import call\n\nimport setuptools.command.build_py\nfrom bears import Constants\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\ntry:\n locale.getlocale()\nexcept (ValueError, UnicodeError):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\nclass PyTestCommand(TestCommand):\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main([])\n sys.exit(errno)\n\n\nclass BuildDocsCommand(setuptools.command.build_py.build_py):\n apidoc_command = ('sphinx-apidoc', '-f', '-o', 'docs/API',\n 'bears')\n make_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')\n\n def run(self):\n err_no = call(self.apidoc_command)\n if not err_no:\n err_no = call(self.make_command)\n sys.exit(err_no)\n\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines()\n required.remove('-r bear-requirements.txt')\n\nwith open('bear-requirements.txt') as requirements:\n bear_required = requirements.read().splitlines()\n\nwith open('test-requirements.txt') as requirements:\n test_required = requirements.read().splitlines()\n\nwith open('ignore.txt') as ignore:\n ignore_requirements = ignore.read().splitlines()\n\nwith open('README.rst') as readme:\n long_description = readme.read()\n\nextras_require = {\n 'alldeps': bear_required,\n}\n\n# For the average user we leave out some of the more complicated requirements,\n# e.g. language-check (needs java).\nrequired += [req for req in bear_required\n if not any(req.startswith(ignore)\n for ignore in ignore_requirements)]\n\n\nif __name__ == '__main__':\n setup(name='coala-bears',\n version=Constants.VERSION,\n description='Bears for coala (Code Analysis Application)',\n author='The coala developers',\n maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\\xfcger',\n maintainer_email=('[email protected], '\n '[email protected], '\n '[email protected]'),\n url='http://coala.io/',\n platforms='any',\n packages=find_packages(exclude=('build.*', 'tests', 'tests.*')),\n install_requires=required,\n extras_require=extras_require,\n tests_require=test_required,\n package_data={'bears': ['VERSION'],\n 'bears.java': ['checkstyle.jar', 'google_checks.xml'],\n 'bears.scala': ['scalastyle.jar',\n 'scalastyle_config.xml']},\n license='AGPL-3.0',\n long_description=long_description,\n entry_points={'coalabears': ['coala_official_bears = bears']},\n # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Environment :: Plugins',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Gnome',\n\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: GNU Affero General Public License '\n 'v3 or later (AGPLv3+)',\n\n 'Operating System :: OS Independent',\n\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Text Processing :: Linguistic'],\n cmdclass={'docs': BuildDocsCommand,\n 'test': PyTestCommand})\n", "path": "setup.py"}]}
| 1,424 | 116 |
gh_patches_debug_34413
|
rasdani/github-patches
|
git_diff
|
bentoml__BentoML-4212
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: OpenAPI Schema components from mounted ASGI apps are not being included
### Describe the bug
If a mounted ASGI app has an OpenAPI spec that defines schema components, these are not included in the bento's generated OpenAPI spec.
The following service file reproduces the issue:
```python
import bentoml
import pydantic
from fastapi import FastAPI
svc = bentoml.Service(name="test", runners=[])
fastapi_app = FastAPI()
svc.mount_asgi_app(fastapi_app)
class TestSchema(pydantic.BaseModel):
text_field: str
@fastapi_app.get("/metadata")
def metadata() -> TestSchema:
return TestSchema(text_field="Hello world")
```
If I serve this bento and navigate to the OpenAPI docs, the following error is raised:
```
Could not resolve reference: Could not resolve pointer: /components/schemas/TestSchema does not exist in document
```
This is happening because the OpenAPI path components are being pulled in from the mounted app, but the schema component (the TestSchema class in this case) are not being pulled in. I've got a fix ready for this and will open a PR shortly
### To reproduce
Requires fastapi and pydantic:
`pip install fastapi pydantic`
This service file reproduces the issue:
```python
import bentoml
import pydantic
from fastapi import FastAPI
svc = bentoml.Service(name="test", runners=[])
fastapi_app = FastAPI()
svc.mount_asgi_app(fastapi_app)
class TestSchema(pydantic.BaseModel):
text_field: str
@fastapi_app.get("/metadata")
def metadata() -> TestSchema:
return TestSchema(text_field="Hello world")
```
`bentoml serve service.py:svc`
### Expected behavior
The FastAPI app's schema definitions should be included in the generated OpenAPI spec.
### Environment
#### Environment variable
```bash
BENTOML_DEBUG=''
BENTOML_QUIET=''
BENTOML_BUNDLE_LOCAL_BUILD=''
BENTOML_DO_NOT_TRACK=''
BENTOML_CONFIG=''
BENTOML_CONFIG_OPTIONS=''
BENTOML_PORT=''
BENTOML_HOST=''
BENTOML_API_WORKERS=''
```
#### System information
`bentoml`: 1.1.6
`python`: 3.10.12
`platform`: Linux-6.2.0-33-generic-x86_64-with-glibc2.35
`uid_gid`: 1000:1000
<details><summary><code>pip_packages</code></summary>
<br>
```
aiohttp==3.8.5
aiosignal==1.3.1
annotated-types==0.5.0
anyio==3.7.1
appdirs==1.4.4
asgiref==3.7.2
async-timeout==4.0.3
attrs==23.1.0
bentoml==1.1.6
build==1.0.3
cattrs==23.1.2
certifi==2023.7.22
charset-normalizer==3.2.0
circus==0.18.0
click==8.1.7
click-option-group==0.5.6
cloudpickle==2.2.1
contextlib2==21.6.0
deepmerge==1.1.0
Deprecated==1.2.14
exceptiongroup==1.1.3
fastapi==0.103.1
frozenlist==1.4.0
fs==2.4.16
h11==0.14.0
idna==3.4
importlib-metadata==6.0.1
inflection==0.5.1
Jinja2==3.1.2
jsonschema==4.19.1
jsonschema-specifications==2023.7.1
markdown-it-py==3.0.0
MarkupSafe==2.1.3
mdurl==0.1.2
multidict==6.0.4
numpy==1.26.0
openapi==1.1.0
opentelemetry-api==1.18.0
opentelemetry-instrumentation==0.39b0
opentelemetry-instrumentation-aiohttp-client==0.39b0
opentelemetry-instrumentation-asgi==0.39b0
opentelemetry-sdk==1.18.0
opentelemetry-semantic-conventions==0.39b0
opentelemetry-util-http==0.39b0
packaging==23.1
pathspec==0.11.2
pip-requirements-parser==32.0.1
pip-tools==7.3.0
prometheus-client==0.17.1
psutil==5.9.5
pydantic==2.4.1
pydantic_core==2.10.1
Pygments==2.16.1
pynvml==11.5.0
pyparsing==3.1.1
pyproject_hooks==1.0.0
python-dateutil==2.8.2
python-json-logger==2.0.7
python-multipart==0.0.6
PyYAML==6.0.1
pyzmq==25.1.1
referencing==0.30.2
requests==2.31.0
rich==13.5.3
rpds-py==0.10.3
schema==0.7.5
simple-di==0.1.5
six==1.16.0
sniffio==1.3.0
starlette==0.27.0
tomli==2.0.1
tornado==6.3.3
typing_extensions==4.8.0
urllib3==2.0.5
uvicorn==0.23.2
watchfiles==0.20.0
wrapt==1.15.0
yarl==1.9.2
zipp==3.17.0
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/bentoml/_internal/service/openapi/__init__.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4 from functools import lru_cache
5 from http import HTTPStatus
6 from typing import TYPE_CHECKING
7
8 from deepmerge.merger import Merger
9
10 from bentoml.exceptions import InternalServerError
11 from bentoml.exceptions import InvalidArgument
12 from bentoml.exceptions import NotFound
13
14 from ...types import LazyType
15 from ...utils import bentoml_cattr
16 from .specification import Components
17 from .specification import Contact
18 from .specification import Info
19 from .specification import MediaType
20 from .specification import OpenAPISpecification
21 from .specification import Operation
22 from .specification import PathItem
23 from .specification import Reference
24 from .specification import Response
25 from .specification import Tag
26 from .utils import REF_PREFIX
27 from .utils import exception_components_schema
28 from .utils import exception_schema
29
30 if TYPE_CHECKING:
31 from .. import Service
32 from ..inference_api import InferenceAPI
33
34 SUCCESS_DESCRIPTION = "Successful Response"
35
36 INFRA_DECRIPTION = {
37 "/healthz": "Health check endpoint. Expecting an empty response with status code <code>200</code> when the service is in health state. The <code>/healthz</code> endpoint is <b>deprecated</b>. (since Kubernetes v1.16)",
38 "/livez": "Health check endpoint for Kubernetes. Healthy endpoint responses with a <code>200</code> OK status.",
39 "/readyz": "A <code>200</code> OK status from <code>/readyz</code> endpoint indicated the service is ready to accept traffic. From that point and onward, Kubernetes will use <code>/livez</code> endpoint to perform periodic health checks.",
40 "/metrics": "Prometheus metrics endpoint. The <code>/metrics</code> responses with a <code>200</code>. The output can then be used by a Prometheus sidecar to scrape the metrics of the service.",
41 }
42
43 __all__ = ["generate_spec"]
44
45 INFRA_TAG = Tag(
46 name="Infrastructure",
47 description="Common infrastructure endpoints for observability.",
48 )
49 APP_TAG = Tag(
50 name="Service APIs", description="BentoML Service API endpoints for inference."
51 )
52
53 merger = Merger(
54 # merge dicts
55 [(dict, "merge")],
56 # override all other types
57 ["override"],
58 # override conflicting types
59 ["override"],
60 )
61
62
63 def make_api_path(api: InferenceAPI[t.Any]) -> str:
64 return api.route if api.route.startswith("/") else f"/{api.route}"
65
66
67 @lru_cache(maxsize=1)
68 def make_infra_endpoints() -> dict[str, PathItem]:
69 return {
70 endpoint: PathItem(
71 get=Operation(
72 responses={"200": Response(description=SUCCESS_DESCRIPTION)},
73 tags=[INFRA_TAG.name],
74 description=INFRA_DECRIPTION[endpoint],
75 )
76 )
77 for endpoint in INFRA_DECRIPTION
78 }
79
80
81 def generate_service_components(svc: Service) -> Components:
82 components: dict[str, t.Any] = {}
83 for api in svc.apis.values():
84 api_components = {}
85 input_components = api.input.openapi_components()
86 if input_components:
87 merger.merge(api_components, input_components)
88 output_components = api.output.openapi_components()
89 if output_components:
90 merger.merge(api_components, output_components)
91
92 merger.merge(components, api_components)
93
94 # merge exception at last
95 merger.merge(components, {"schemas": exception_components_schema()})
96
97 return Components(**components)
98
99
100 def generate_spec(svc: Service, *, openapi_version: str = "3.0.2"):
101 """Generate a OpenAPI specification for a service."""
102 mounted_app_paths = {}
103
104 for app, _, _ in svc.mount_apps:
105 if LazyType["fastapi.FastAPI"]("fastapi.FastAPI").isinstance(app):
106 from fastapi.openapi.utils import get_openapi
107
108 openapi = get_openapi(
109 title=app.title,
110 version=app.version,
111 routes=app.routes,
112 )
113
114 mounted_app_paths.update(
115 {
116 k: bentoml_cattr.structure(v, PathItem)
117 for k, v in openapi["paths"].items()
118 }
119 )
120
121 return OpenAPISpecification(
122 openapi=openapi_version,
123 tags=[APP_TAG, INFRA_TAG],
124 components=generate_service_components(svc),
125 info=Info(
126 title=svc.name,
127 description=svc.doc,
128 version=svc.tag.version if svc.tag and svc.tag.version else "None",
129 contact=Contact(name="BentoML Team", email="[email protected]"),
130 ),
131 servers=[{"url": "."}],
132 paths={
133 # setup infra endpoints
134 **make_infra_endpoints(),
135 # setup inference endpoints
136 **{
137 make_api_path(api): PathItem(
138 post={
139 "responses": {
140 HTTPStatus.OK.value: api.output.openapi_responses(),
141 **{
142 ex.error_code.value: Response(
143 description=filled.description,
144 content={
145 "application/json": MediaType(
146 schema=Reference(
147 f"{REF_PREFIX}{filled.title}"
148 )
149 )
150 },
151 )
152 for ex in [
153 InvalidArgument,
154 NotFound,
155 InternalServerError,
156 ]
157 for filled in exception_schema(ex)
158 },
159 },
160 "tags": [APP_TAG.name],
161 "consumes": [api.input.mime_type],
162 "produces": [api.output.mime_type],
163 "x-bentoml-name": api.name,
164 "summary": str(api),
165 "description": api.doc or "",
166 "requestBody": api.input.openapi_request_body(),
167 "operationId": f"{svc.name}__{api.name}",
168 },
169 )
170 for api in svc.apis.values()
171 },
172 **mounted_app_paths,
173 },
174 )
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/bentoml/_internal/service/openapi/__init__.py b/src/bentoml/_internal/service/openapi/__init__.py
--- a/src/bentoml/_internal/service/openapi/__init__.py
+++ b/src/bentoml/_internal/service/openapi/__init__.py
@@ -13,7 +13,6 @@
from ...types import LazyType
from ...utils import bentoml_cattr
-from .specification import Components
from .specification import Contact
from .specification import Info
from .specification import MediaType
@@ -78,7 +77,7 @@
}
-def generate_service_components(svc: Service) -> Components:
+def generate_service_components(svc: Service) -> dict[str, t.Any]:
components: dict[str, t.Any] = {}
for api in svc.apis.values():
api_components = {}
@@ -92,14 +91,13 @@
merger.merge(components, api_components)
# merge exception at last
- merger.merge(components, {"schemas": exception_components_schema()})
-
- return Components(**components)
+ return merger.merge(components, {"schemas": exception_components_schema()})
def generate_spec(svc: Service, *, openapi_version: str = "3.0.2"):
"""Generate a OpenAPI specification for a service."""
mounted_app_paths = {}
+ schema_components = {}
for app, _, _ in svc.mount_apps:
if LazyType["fastapi.FastAPI"]("fastapi.FastAPI").isinstance(app):
@@ -118,10 +116,15 @@
}
)
+ if "components" in openapi:
+ merger.merge(schema_components, openapi["components"])
+
+ merger.merge(schema_components, generate_service_components(svc))
+
return OpenAPISpecification(
openapi=openapi_version,
tags=[APP_TAG, INFRA_TAG],
- components=generate_service_components(svc),
+ components=schema_components,
info=Info(
title=svc.name,
description=svc.doc,
|
{"golden_diff": "diff --git a/src/bentoml/_internal/service/openapi/__init__.py b/src/bentoml/_internal/service/openapi/__init__.py\n--- a/src/bentoml/_internal/service/openapi/__init__.py\n+++ b/src/bentoml/_internal/service/openapi/__init__.py\n@@ -13,7 +13,6 @@\n \n from ...types import LazyType\n from ...utils import bentoml_cattr\n-from .specification import Components\n from .specification import Contact\n from .specification import Info\n from .specification import MediaType\n@@ -78,7 +77,7 @@\n }\n \n \n-def generate_service_components(svc: Service) -> Components:\n+def generate_service_components(svc: Service) -> dict[str, t.Any]:\n components: dict[str, t.Any] = {}\n for api in svc.apis.values():\n api_components = {}\n@@ -92,14 +91,13 @@\n merger.merge(components, api_components)\n \n # merge exception at last\n- merger.merge(components, {\"schemas\": exception_components_schema()})\n-\n- return Components(**components)\n+ return merger.merge(components, {\"schemas\": exception_components_schema()})\n \n \n def generate_spec(svc: Service, *, openapi_version: str = \"3.0.2\"):\n \"\"\"Generate a OpenAPI specification for a service.\"\"\"\n mounted_app_paths = {}\n+ schema_components = {}\n \n for app, _, _ in svc.mount_apps:\n if LazyType[\"fastapi.FastAPI\"](\"fastapi.FastAPI\").isinstance(app):\n@@ -118,10 +116,15 @@\n }\n )\n \n+ if \"components\" in openapi:\n+ merger.merge(schema_components, openapi[\"components\"])\n+\n+ merger.merge(schema_components, generate_service_components(svc))\n+\n return OpenAPISpecification(\n openapi=openapi_version,\n tags=[APP_TAG, INFRA_TAG],\n- components=generate_service_components(svc),\n+ components=schema_components,\n info=Info(\n title=svc.name,\n description=svc.doc,\n", "issue": "bug: OpenAPI Schema components from mounted ASGI apps are not being included \n### Describe the bug\n\nIf a mounted ASGI app has an OpenAPI spec that defines schema components, these are not included in the bento's generated OpenAPI spec.\r\n\r\nThe following service file reproduces the issue:\r\n\r\n```python\r\nimport bentoml\r\nimport pydantic\r\nfrom fastapi import FastAPI\r\n\r\nsvc = bentoml.Service(name=\"test\", runners=[])\r\n\r\nfastapi_app = FastAPI()\r\nsvc.mount_asgi_app(fastapi_app)\r\n\r\n\r\nclass TestSchema(pydantic.BaseModel):\r\n text_field: str\r\n\r\n\r\n@fastapi_app.get(\"/metadata\")\r\ndef metadata() -> TestSchema:\r\n return TestSchema(text_field=\"Hello world\")\r\n\r\n```\r\n\r\nIf I serve this bento and navigate to the OpenAPI docs, the following error is raised: \r\n```\r\nCould not resolve reference: Could not resolve pointer: /components/schemas/TestSchema does not exist in document\r\n```\r\n\r\nThis is happening because the OpenAPI path components are being pulled in from the mounted app, but the schema component (the TestSchema class in this case) are not being pulled in. I've got a fix ready for this and will open a PR shortly\n\n### To reproduce\n\nRequires fastapi and pydantic:\r\n`pip install fastapi pydantic`\r\n\r\nThis service file reproduces the issue:\r\n```python\r\nimport bentoml\r\nimport pydantic\r\nfrom fastapi import FastAPI\r\n\r\nsvc = bentoml.Service(name=\"test\", runners=[])\r\n\r\nfastapi_app = FastAPI()\r\nsvc.mount_asgi_app(fastapi_app)\r\n\r\n\r\nclass TestSchema(pydantic.BaseModel):\r\n text_field: str\r\n\r\n\r\n@fastapi_app.get(\"/metadata\")\r\ndef metadata() -> TestSchema:\r\n return TestSchema(text_field=\"Hello world\")\r\n\r\n```\r\n\r\n`bentoml serve service.py:svc`\n\n### Expected behavior\n\nThe FastAPI app's schema definitions should be included in the generated OpenAPI spec.\n\n### Environment\n\n#### Environment variable\r\n\r\n```bash\r\nBENTOML_DEBUG=''\r\nBENTOML_QUIET=''\r\nBENTOML_BUNDLE_LOCAL_BUILD=''\r\nBENTOML_DO_NOT_TRACK=''\r\nBENTOML_CONFIG=''\r\nBENTOML_CONFIG_OPTIONS=''\r\nBENTOML_PORT=''\r\nBENTOML_HOST=''\r\nBENTOML_API_WORKERS=''\r\n```\r\n\r\n#### System information\r\n\r\n`bentoml`: 1.1.6\r\n`python`: 3.10.12\r\n`platform`: Linux-6.2.0-33-generic-x86_64-with-glibc2.35\r\n`uid_gid`: 1000:1000\r\n<details><summary><code>pip_packages</code></summary>\r\n\r\n<br>\r\n\r\n```\r\naiohttp==3.8.5\r\naiosignal==1.3.1\r\nannotated-types==0.5.0\r\nanyio==3.7.1\r\nappdirs==1.4.4\r\nasgiref==3.7.2\r\nasync-timeout==4.0.3\r\nattrs==23.1.0\r\nbentoml==1.1.6\r\nbuild==1.0.3\r\ncattrs==23.1.2\r\ncertifi==2023.7.22\r\ncharset-normalizer==3.2.0\r\ncircus==0.18.0\r\nclick==8.1.7\r\nclick-option-group==0.5.6\r\ncloudpickle==2.2.1\r\ncontextlib2==21.6.0\r\ndeepmerge==1.1.0\r\nDeprecated==1.2.14\r\nexceptiongroup==1.1.3\r\nfastapi==0.103.1\r\nfrozenlist==1.4.0\r\nfs==2.4.16\r\nh11==0.14.0\r\nidna==3.4\r\nimportlib-metadata==6.0.1\r\ninflection==0.5.1\r\nJinja2==3.1.2\r\njsonschema==4.19.1\r\njsonschema-specifications==2023.7.1\r\nmarkdown-it-py==3.0.0\r\nMarkupSafe==2.1.3\r\nmdurl==0.1.2\r\nmultidict==6.0.4\r\nnumpy==1.26.0\r\nopenapi==1.1.0\r\nopentelemetry-api==1.18.0\r\nopentelemetry-instrumentation==0.39b0\r\nopentelemetry-instrumentation-aiohttp-client==0.39b0\r\nopentelemetry-instrumentation-asgi==0.39b0\r\nopentelemetry-sdk==1.18.0\r\nopentelemetry-semantic-conventions==0.39b0\r\nopentelemetry-util-http==0.39b0\r\npackaging==23.1\r\npathspec==0.11.2\r\npip-requirements-parser==32.0.1\r\npip-tools==7.3.0\r\nprometheus-client==0.17.1\r\npsutil==5.9.5\r\npydantic==2.4.1\r\npydantic_core==2.10.1\r\nPygments==2.16.1\r\npynvml==11.5.0\r\npyparsing==3.1.1\r\npyproject_hooks==1.0.0\r\npython-dateutil==2.8.2\r\npython-json-logger==2.0.7\r\npython-multipart==0.0.6\r\nPyYAML==6.0.1\r\npyzmq==25.1.1\r\nreferencing==0.30.2\r\nrequests==2.31.0\r\nrich==13.5.3\r\nrpds-py==0.10.3\r\nschema==0.7.5\r\nsimple-di==0.1.5\r\nsix==1.16.0\r\nsniffio==1.3.0\r\nstarlette==0.27.0\r\ntomli==2.0.1\r\ntornado==6.3.3\r\ntyping_extensions==4.8.0\r\nurllib3==2.0.5\r\nuvicorn==0.23.2\r\nwatchfiles==0.20.0\r\nwrapt==1.15.0\r\nyarl==1.9.2\r\nzipp==3.17.0\r\n```\r\n\r\n</details>\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\nfrom functools import lru_cache\nfrom http import HTTPStatus\nfrom typing import TYPE_CHECKING\n\nfrom deepmerge.merger import Merger\n\nfrom bentoml.exceptions import InternalServerError\nfrom bentoml.exceptions import InvalidArgument\nfrom bentoml.exceptions import NotFound\n\nfrom ...types import LazyType\nfrom ...utils import bentoml_cattr\nfrom .specification import Components\nfrom .specification import Contact\nfrom .specification import Info\nfrom .specification import MediaType\nfrom .specification import OpenAPISpecification\nfrom .specification import Operation\nfrom .specification import PathItem\nfrom .specification import Reference\nfrom .specification import Response\nfrom .specification import Tag\nfrom .utils import REF_PREFIX\nfrom .utils import exception_components_schema\nfrom .utils import exception_schema\n\nif TYPE_CHECKING:\n from .. import Service\n from ..inference_api import InferenceAPI\n\nSUCCESS_DESCRIPTION = \"Successful Response\"\n\nINFRA_DECRIPTION = {\n \"/healthz\": \"Health check endpoint. Expecting an empty response with status code <code>200</code> when the service is in health state. The <code>/healthz</code> endpoint is <b>deprecated</b>. (since Kubernetes v1.16)\",\n \"/livez\": \"Health check endpoint for Kubernetes. Healthy endpoint responses with a <code>200</code> OK status.\",\n \"/readyz\": \"A <code>200</code> OK status from <code>/readyz</code> endpoint indicated the service is ready to accept traffic. From that point and onward, Kubernetes will use <code>/livez</code> endpoint to perform periodic health checks.\",\n \"/metrics\": \"Prometheus metrics endpoint. The <code>/metrics</code> responses with a <code>200</code>. The output can then be used by a Prometheus sidecar to scrape the metrics of the service.\",\n}\n\n__all__ = [\"generate_spec\"]\n\nINFRA_TAG = Tag(\n name=\"Infrastructure\",\n description=\"Common infrastructure endpoints for observability.\",\n)\nAPP_TAG = Tag(\n name=\"Service APIs\", description=\"BentoML Service API endpoints for inference.\"\n)\n\nmerger = Merger(\n # merge dicts\n [(dict, \"merge\")],\n # override all other types\n [\"override\"],\n # override conflicting types\n [\"override\"],\n)\n\n\ndef make_api_path(api: InferenceAPI[t.Any]) -> str:\n return api.route if api.route.startswith(\"/\") else f\"/{api.route}\"\n\n\n@lru_cache(maxsize=1)\ndef make_infra_endpoints() -> dict[str, PathItem]:\n return {\n endpoint: PathItem(\n get=Operation(\n responses={\"200\": Response(description=SUCCESS_DESCRIPTION)},\n tags=[INFRA_TAG.name],\n description=INFRA_DECRIPTION[endpoint],\n )\n )\n for endpoint in INFRA_DECRIPTION\n }\n\n\ndef generate_service_components(svc: Service) -> Components:\n components: dict[str, t.Any] = {}\n for api in svc.apis.values():\n api_components = {}\n input_components = api.input.openapi_components()\n if input_components:\n merger.merge(api_components, input_components)\n output_components = api.output.openapi_components()\n if output_components:\n merger.merge(api_components, output_components)\n\n merger.merge(components, api_components)\n\n # merge exception at last\n merger.merge(components, {\"schemas\": exception_components_schema()})\n\n return Components(**components)\n\n\ndef generate_spec(svc: Service, *, openapi_version: str = \"3.0.2\"):\n \"\"\"Generate a OpenAPI specification for a service.\"\"\"\n mounted_app_paths = {}\n\n for app, _, _ in svc.mount_apps:\n if LazyType[\"fastapi.FastAPI\"](\"fastapi.FastAPI\").isinstance(app):\n from fastapi.openapi.utils import get_openapi\n\n openapi = get_openapi(\n title=app.title,\n version=app.version,\n routes=app.routes,\n )\n\n mounted_app_paths.update(\n {\n k: bentoml_cattr.structure(v, PathItem)\n for k, v in openapi[\"paths\"].items()\n }\n )\n\n return OpenAPISpecification(\n openapi=openapi_version,\n tags=[APP_TAG, INFRA_TAG],\n components=generate_service_components(svc),\n info=Info(\n title=svc.name,\n description=svc.doc,\n version=svc.tag.version if svc.tag and svc.tag.version else \"None\",\n contact=Contact(name=\"BentoML Team\", email=\"[email protected]\"),\n ),\n servers=[{\"url\": \".\"}],\n paths={\n # setup infra endpoints\n **make_infra_endpoints(),\n # setup inference endpoints\n **{\n make_api_path(api): PathItem(\n post={\n \"responses\": {\n HTTPStatus.OK.value: api.output.openapi_responses(),\n **{\n ex.error_code.value: Response(\n description=filled.description,\n content={\n \"application/json\": MediaType(\n schema=Reference(\n f\"{REF_PREFIX}{filled.title}\"\n )\n )\n },\n )\n for ex in [\n InvalidArgument,\n NotFound,\n InternalServerError,\n ]\n for filled in exception_schema(ex)\n },\n },\n \"tags\": [APP_TAG.name],\n \"consumes\": [api.input.mime_type],\n \"produces\": [api.output.mime_type],\n \"x-bentoml-name\": api.name,\n \"summary\": str(api),\n \"description\": api.doc or \"\",\n \"requestBody\": api.input.openapi_request_body(),\n \"operationId\": f\"{svc.name}__{api.name}\",\n },\n )\n for api in svc.apis.values()\n },\n **mounted_app_paths,\n },\n )\n", "path": "src/bentoml/_internal/service/openapi/__init__.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\nfrom functools import lru_cache\nfrom http import HTTPStatus\nfrom typing import TYPE_CHECKING\n\nfrom deepmerge.merger import Merger\n\nfrom bentoml.exceptions import InternalServerError\nfrom bentoml.exceptions import InvalidArgument\nfrom bentoml.exceptions import NotFound\n\nfrom ...types import LazyType\nfrom ...utils import bentoml_cattr\nfrom .specification import Contact\nfrom .specification import Info\nfrom .specification import MediaType\nfrom .specification import OpenAPISpecification\nfrom .specification import Operation\nfrom .specification import PathItem\nfrom .specification import Reference\nfrom .specification import Response\nfrom .specification import Tag\nfrom .utils import REF_PREFIX\nfrom .utils import exception_components_schema\nfrom .utils import exception_schema\n\nif TYPE_CHECKING:\n from .. import Service\n from ..inference_api import InferenceAPI\n\nSUCCESS_DESCRIPTION = \"Successful Response\"\n\nINFRA_DECRIPTION = {\n \"/healthz\": \"Health check endpoint. Expecting an empty response with status code <code>200</code> when the service is in health state. The <code>/healthz</code> endpoint is <b>deprecated</b>. (since Kubernetes v1.16)\",\n \"/livez\": \"Health check endpoint for Kubernetes. Healthy endpoint responses with a <code>200</code> OK status.\",\n \"/readyz\": \"A <code>200</code> OK status from <code>/readyz</code> endpoint indicated the service is ready to accept traffic. From that point and onward, Kubernetes will use <code>/livez</code> endpoint to perform periodic health checks.\",\n \"/metrics\": \"Prometheus metrics endpoint. The <code>/metrics</code> responses with a <code>200</code>. The output can then be used by a Prometheus sidecar to scrape the metrics of the service.\",\n}\n\n__all__ = [\"generate_spec\"]\n\nINFRA_TAG = Tag(\n name=\"Infrastructure\",\n description=\"Common infrastructure endpoints for observability.\",\n)\nAPP_TAG = Tag(\n name=\"Service APIs\", description=\"BentoML Service API endpoints for inference.\"\n)\n\nmerger = Merger(\n # merge dicts\n [(dict, \"merge\")],\n # override all other types\n [\"override\"],\n # override conflicting types\n [\"override\"],\n)\n\n\ndef make_api_path(api: InferenceAPI[t.Any]) -> str:\n return api.route if api.route.startswith(\"/\") else f\"/{api.route}\"\n\n\n@lru_cache(maxsize=1)\ndef make_infra_endpoints() -> dict[str, PathItem]:\n return {\n endpoint: PathItem(\n get=Operation(\n responses={\"200\": Response(description=SUCCESS_DESCRIPTION)},\n tags=[INFRA_TAG.name],\n description=INFRA_DECRIPTION[endpoint],\n )\n )\n for endpoint in INFRA_DECRIPTION\n }\n\n\ndef generate_service_components(svc: Service) -> dict[str, t.Any]:\n components: dict[str, t.Any] = {}\n for api in svc.apis.values():\n api_components = {}\n input_components = api.input.openapi_components()\n if input_components:\n merger.merge(api_components, input_components)\n output_components = api.output.openapi_components()\n if output_components:\n merger.merge(api_components, output_components)\n\n merger.merge(components, api_components)\n\n # merge exception at last\n return merger.merge(components, {\"schemas\": exception_components_schema()})\n\n\ndef generate_spec(svc: Service, *, openapi_version: str = \"3.0.2\"):\n \"\"\"Generate a OpenAPI specification for a service.\"\"\"\n mounted_app_paths = {}\n schema_components = {}\n\n for app, _, _ in svc.mount_apps:\n if LazyType[\"fastapi.FastAPI\"](\"fastapi.FastAPI\").isinstance(app):\n from fastapi.openapi.utils import get_openapi\n\n openapi = get_openapi(\n title=app.title,\n version=app.version,\n routes=app.routes,\n )\n\n mounted_app_paths.update(\n {\n k: bentoml_cattr.structure(v, PathItem)\n for k, v in openapi[\"paths\"].items()\n }\n )\n\n if \"components\" in openapi:\n merger.merge(schema_components, openapi[\"components\"])\n\n merger.merge(schema_components, generate_service_components(svc))\n\n return OpenAPISpecification(\n openapi=openapi_version,\n tags=[APP_TAG, INFRA_TAG],\n components=schema_components,\n info=Info(\n title=svc.name,\n description=svc.doc,\n version=svc.tag.version if svc.tag and svc.tag.version else \"None\",\n contact=Contact(name=\"BentoML Team\", email=\"[email protected]\"),\n ),\n servers=[{\"url\": \".\"}],\n paths={\n # setup infra endpoints\n **make_infra_endpoints(),\n # setup inference endpoints\n **{\n make_api_path(api): PathItem(\n post={\n \"responses\": {\n HTTPStatus.OK.value: api.output.openapi_responses(),\n **{\n ex.error_code.value: Response(\n description=filled.description,\n content={\n \"application/json\": MediaType(\n schema=Reference(\n f\"{REF_PREFIX}{filled.title}\"\n )\n )\n },\n )\n for ex in [\n InvalidArgument,\n NotFound,\n InternalServerError,\n ]\n for filled in exception_schema(ex)\n },\n },\n \"tags\": [APP_TAG.name],\n \"consumes\": [api.input.mime_type],\n \"produces\": [api.output.mime_type],\n \"x-bentoml-name\": api.name,\n \"summary\": str(api),\n \"description\": api.doc or \"\",\n \"requestBody\": api.input.openapi_request_body(),\n \"operationId\": f\"{svc.name}__{api.name}\",\n },\n )\n for api in svc.apis.values()\n },\n **mounted_app_paths,\n },\n )\n", "path": "src/bentoml/_internal/service/openapi/__init__.py"}]}
| 3,340 | 457 |
gh_patches_debug_5637
|
rasdani/github-patches
|
git_diff
|
marshmallow-code__webargs-537
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Errors while validating arguments in headers result in a flask crash
If you make a view with header arguments `@bp.arguments(someschema, location='headers')`
Then feed it headers that are not defined in the schema, it will (rightfully) cause a schema validation error, however the error created includes the entire header tuple as a dictionary key, instead of just the 'key' (tuple position 0). This causes flask to error out while trying to convert the response to a valid JSON response.
This is the response returned (grabbed this with a pydb)
```python
{
"code": 422,
"status": "Unprocessable Entity",
"errors": {"headers": {("Someheader", "someval"): ["Unknown field."]}},
}
```
This is the stack trace flask produces, which I have included so people searching it will hopefully find their way here.
```python
../../../../miniconda3/lib/python3.7/site-packages/werkzeug/test.py:1006: in get
return self.open(*args, **kw)
nomitall/api/tests/_client.py:37: in open
return super().open(*args, **kwargs)
../../../../miniconda3/lib/python3.7/site-packages/flask/testing.py:227: in open
follow_redirects=follow_redirects,
../../../../miniconda3/lib/python3.7/site-packages/werkzeug/test.py:970: in open
response = self.run_wsgi_app(environ.copy(), buffered=buffered)
../../../../miniconda3/lib/python3.7/site-packages/werkzeug/test.py:861: in run_wsgi_app
rv = run_wsgi_app(self.application, environ, buffered=buffered)
../../../../miniconda3/lib/python3.7/site-packages/werkzeug/test.py:1096: in run_wsgi_app
app_rv = app(environ, start_response)
../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:2463: in __call__
return self.wsgi_app(environ, start_response)
../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:2449: in wsgi_app
response = self.handle_exception(e)
../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:1866: in handle_exception
reraise(exc_type, exc_value, tb)
../../../../miniconda3/lib/python3.7/site-packages/flask/_compat.py:39: in reraise
raise value
../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:2446: in wsgi_app
response = self.full_dispatch_request()
../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:1952: in full_dispatch_request
return self.finalize_request(rv)
../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:1967: in finalize_request
response = self.make_response(rv)
../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:2111: in make_response
rv = jsonify(rv)
../../../../miniconda3/lib/python3.7/site-packages/flask/json/__init__.py:370: in jsonify
dumps(data, indent=indent, separators=separators) + "\n",
../../../../miniconda3/lib/python3.7/site-packages/flask/json/__init__.py:211: in dumps
rv = _json.dumps(obj, **kwargs)
../../../../miniconda3/lib/python3.7/site-packages/simplejson/__init__.py:412: in dumps
**kw).encode(obj)
../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:298: in encode
chunks = list(chunks)
../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:696: in _iterencode
for chunk in _iterencode_dict(o, _current_indent_level):
../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:652: in _iterencode_dict
for chunk in chunks:
../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:652: in _iterencode_dict
for chunk in chunks:
../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:598: in _iterencode_dict
k = _stringify_key(k)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
key = ("Someheader", "someval")
def _stringify_key(key):
if isinstance(key, string_types): # pragma: no cover
pass
elif _PY3 and isinstance(key, bytes) and _encoding is not None:
key = str(key, _encoding)
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, integer_types):
if type(key) not in integer_types:
# See marshmallow-code/flask-smorest#118, do not trust custom str/repr
key = int(key)
key = str(key)
elif _use_decimal and isinstance(key, Decimal):
key = str(key)
elif _skipkeys:
key = None
else:
raise TypeError('keys must be str, int, float, bool or None, '
> 'not %s' % key.__class__.__name__)
E TypeError: keys must be str, int, float, bool or None, not tuple
../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:568: TypeError
```
I'm not sure there is a good workaround apart from disabling schema validation.
If this issue is unclear please ask for further explanation and i'll sink some time into creating some self contained reproduction code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/multidictproxy.py`
Content:
```
1 from collections.abc import Mapping
2
3 from webargs.compat import MARSHMALLOW_VERSION_INFO
4 from webargs.core import missing, is_multiple
5
6
7 class MultiDictProxy(Mapping):
8 """
9 A proxy object which wraps multidict types along with a matching schema
10 Whenever a value is looked up, it is checked against the schema to see if
11 there is a matching field where `is_multiple` is True. If there is, then
12 the data should be loaded as a list or tuple.
13
14 In all other cases, __getitem__ proxies directly to the input multidict.
15 """
16
17 def __init__(self, multidict, schema):
18 self.data = multidict
19 self.multiple_keys = self._collect_multiple_keys(schema)
20
21 @staticmethod
22 def _collect_multiple_keys(schema):
23 result = set()
24 for name, field in schema.fields.items():
25 if not is_multiple(field):
26 continue
27 if MARSHMALLOW_VERSION_INFO[0] < 3:
28 result.add(field.load_from if field.load_from is not None else name)
29 else:
30 result.add(field.data_key if field.data_key is not None else name)
31 return result
32
33 def __getitem__(self, key):
34 val = self.data.get(key, missing)
35 if val is missing or key not in self.multiple_keys:
36 return val
37 if hasattr(self.data, "getlist"):
38 return self.data.getlist(key)
39 if hasattr(self.data, "getall"):
40 return self.data.getall(key)
41 if isinstance(val, (list, tuple)):
42 return val
43 if val is None:
44 return None
45 return [val]
46
47 def __str__(self): # str(proxy) proxies to str(proxy.data)
48 return str(self.data)
49
50 def __repr__(self):
51 return "MultiDictProxy(data={!r}, multiple_keys={!r})".format(
52 self.data, self.multiple_keys
53 )
54
55 def __delitem__(self, key):
56 del self.data[key]
57
58 def __setitem__(self, key, value):
59 self.data[key] = value
60
61 def __getattr__(self, name):
62 return getattr(self.data, name)
63
64 def __iter__(self):
65 return iter(self.data)
66
67 def __contains__(self, x):
68 return x in self.data
69
70 def __len__(self):
71 return len(self.data)
72
73 def __eq__(self, other):
74 return self.data == other
75
76 def __ne__(self, other):
77 return self.data != other
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/webargs/multidictproxy.py b/src/webargs/multidictproxy.py
--- a/src/webargs/multidictproxy.py
+++ b/src/webargs/multidictproxy.py
@@ -62,7 +62,13 @@
return getattr(self.data, name)
def __iter__(self):
- return iter(self.data)
+ for x in iter(self.data):
+ # special case for header dicts which produce an iterator of tuples
+ # instead of an iterator of strings
+ if isinstance(x, tuple):
+ yield x[0]
+ else:
+ yield x
def __contains__(self, x):
return x in self.data
|
{"golden_diff": "diff --git a/src/webargs/multidictproxy.py b/src/webargs/multidictproxy.py\n--- a/src/webargs/multidictproxy.py\n+++ b/src/webargs/multidictproxy.py\n@@ -62,7 +62,13 @@\n return getattr(self.data, name)\n \n def __iter__(self):\n- return iter(self.data)\n+ for x in iter(self.data):\n+ # special case for header dicts which produce an iterator of tuples\n+ # instead of an iterator of strings\n+ if isinstance(x, tuple):\n+ yield x[0]\n+ else:\n+ yield x\n \n def __contains__(self, x):\n return x in self.data\n", "issue": "Errors while validating arguments in headers result in a flask crash\nIf you make a view with header arguments `@bp.arguments(someschema, location='headers')`\r\nThen feed it headers that are not defined in the schema, it will (rightfully) cause a schema validation error, however the error created includes the entire header tuple as a dictionary key, instead of just the 'key' (tuple position 0). This causes flask to error out while trying to convert the response to a valid JSON response.\r\n\r\nThis is the response returned (grabbed this with a pydb)\r\n```python\r\n{\r\n \"code\": 422,\r\n \"status\": \"Unprocessable Entity\",\r\n \"errors\": {\"headers\": {(\"Someheader\", \"someval\"): [\"Unknown field.\"]}},\r\n}\r\n```\r\nThis is the stack trace flask produces, which I have included so people searching it will hopefully find their way here.\r\n\r\n```python\r\n../../../../miniconda3/lib/python3.7/site-packages/werkzeug/test.py:1006: in get\r\n return self.open(*args, **kw)\r\nnomitall/api/tests/_client.py:37: in open\r\n return super().open(*args, **kwargs)\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/testing.py:227: in open\r\n follow_redirects=follow_redirects,\r\n../../../../miniconda3/lib/python3.7/site-packages/werkzeug/test.py:970: in open\r\n response = self.run_wsgi_app(environ.copy(), buffered=buffered)\r\n../../../../miniconda3/lib/python3.7/site-packages/werkzeug/test.py:861: in run_wsgi_app\r\n rv = run_wsgi_app(self.application, environ, buffered=buffered)\r\n../../../../miniconda3/lib/python3.7/site-packages/werkzeug/test.py:1096: in run_wsgi_app\r\n app_rv = app(environ, start_response)\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:2463: in __call__\r\n return self.wsgi_app(environ, start_response)\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:2449: in wsgi_app\r\n response = self.handle_exception(e)\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:1866: in handle_exception\r\n reraise(exc_type, exc_value, tb)\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/_compat.py:39: in reraise\r\n raise value\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:2446: in wsgi_app\r\n response = self.full_dispatch_request()\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:1952: in full_dispatch_request\r\n return self.finalize_request(rv)\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:1967: in finalize_request\r\n response = self.make_response(rv)\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/app.py:2111: in make_response\r\n rv = jsonify(rv)\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/json/__init__.py:370: in jsonify\r\n dumps(data, indent=indent, separators=separators) + \"\\n\",\r\n../../../../miniconda3/lib/python3.7/site-packages/flask/json/__init__.py:211: in dumps\r\n rv = _json.dumps(obj, **kwargs)\r\n../../../../miniconda3/lib/python3.7/site-packages/simplejson/__init__.py:412: in dumps\r\n **kw).encode(obj)\r\n../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:298: in encode\r\n chunks = list(chunks)\r\n../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:696: in _iterencode\r\n for chunk in _iterencode_dict(o, _current_indent_level):\r\n../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:652: in _iterencode_dict\r\n for chunk in chunks:\r\n../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:652: in _iterencode_dict\r\n for chunk in chunks:\r\n../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:598: in _iterencode_dict\r\n k = _stringify_key(k)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nkey = (\"Someheader\", \"someval\")\r\n\r\n def _stringify_key(key):\r\n if isinstance(key, string_types): # pragma: no cover\r\n pass\r\n elif _PY3 and isinstance(key, bytes) and _encoding is not None:\r\n key = str(key, _encoding)\r\n elif isinstance(key, float):\r\n key = _floatstr(key)\r\n elif key is True:\r\n key = 'true'\r\n elif key is False:\r\n key = 'false'\r\n elif key is None:\r\n key = 'null'\r\n elif isinstance(key, integer_types):\r\n if type(key) not in integer_types:\r\n # See marshmallow-code/flask-smorest#118, do not trust custom str/repr\r\n key = int(key)\r\n key = str(key)\r\n elif _use_decimal and isinstance(key, Decimal):\r\n key = str(key)\r\n elif _skipkeys:\r\n key = None\r\n else:\r\n raise TypeError('keys must be str, int, float, bool or None, '\r\n> 'not %s' % key.__class__.__name__)\r\nE TypeError: keys must be str, int, float, bool or None, not tuple\r\n\r\n../../../../miniconda3/lib/python3.7/site-packages/simplejson/encoder.py:568: TypeError\r\n```\r\n\r\nI'm not sure there is a good workaround apart from disabling schema validation.\r\n\r\nIf this issue is unclear please ask for further explanation and i'll sink some time into creating some self contained reproduction code.\n", "before_files": [{"content": "from collections.abc import Mapping\n\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.core import missing, is_multiple\n\n\nclass MultiDictProxy(Mapping):\n \"\"\"\n A proxy object which wraps multidict types along with a matching schema\n Whenever a value is looked up, it is checked against the schema to see if\n there is a matching field where `is_multiple` is True. If there is, then\n the data should be loaded as a list or tuple.\n\n In all other cases, __getitem__ proxies directly to the input multidict.\n \"\"\"\n\n def __init__(self, multidict, schema):\n self.data = multidict\n self.multiple_keys = self._collect_multiple_keys(schema)\n\n @staticmethod\n def _collect_multiple_keys(schema):\n result = set()\n for name, field in schema.fields.items():\n if not is_multiple(field):\n continue\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n result.add(field.load_from if field.load_from is not None else name)\n else:\n result.add(field.data_key if field.data_key is not None else name)\n return result\n\n def __getitem__(self, key):\n val = self.data.get(key, missing)\n if val is missing or key not in self.multiple_keys:\n return val\n if hasattr(self.data, \"getlist\"):\n return self.data.getlist(key)\n if hasattr(self.data, \"getall\"):\n return self.data.getall(key)\n if isinstance(val, (list, tuple)):\n return val\n if val is None:\n return None\n return [val]\n\n def __str__(self): # str(proxy) proxies to str(proxy.data)\n return str(self.data)\n\n def __repr__(self):\n return \"MultiDictProxy(data={!r}, multiple_keys={!r})\".format(\n self.data, self.multiple_keys\n )\n\n def __delitem__(self, key):\n del self.data[key]\n\n def __setitem__(self, key, value):\n self.data[key] = value\n\n def __getattr__(self, name):\n return getattr(self.data, name)\n\n def __iter__(self):\n return iter(self.data)\n\n def __contains__(self, x):\n return x in self.data\n\n def __len__(self):\n return len(self.data)\n\n def __eq__(self, other):\n return self.data == other\n\n def __ne__(self, other):\n return self.data != other\n", "path": "src/webargs/multidictproxy.py"}], "after_files": [{"content": "from collections.abc import Mapping\n\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.core import missing, is_multiple\n\n\nclass MultiDictProxy(Mapping):\n \"\"\"\n A proxy object which wraps multidict types along with a matching schema\n Whenever a value is looked up, it is checked against the schema to see if\n there is a matching field where `is_multiple` is True. If there is, then\n the data should be loaded as a list or tuple.\n\n In all other cases, __getitem__ proxies directly to the input multidict.\n \"\"\"\n\n def __init__(self, multidict, schema):\n self.data = multidict\n self.multiple_keys = self._collect_multiple_keys(schema)\n\n @staticmethod\n def _collect_multiple_keys(schema):\n result = set()\n for name, field in schema.fields.items():\n if not is_multiple(field):\n continue\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n result.add(field.load_from if field.load_from is not None else name)\n else:\n result.add(field.data_key if field.data_key is not None else name)\n return result\n\n def __getitem__(self, key):\n val = self.data.get(key, missing)\n if val is missing or key not in self.multiple_keys:\n return val\n if hasattr(self.data, \"getlist\"):\n return self.data.getlist(key)\n if hasattr(self.data, \"getall\"):\n return self.data.getall(key)\n if isinstance(val, (list, tuple)):\n return val\n if val is None:\n return None\n return [val]\n\n def __str__(self): # str(proxy) proxies to str(proxy.data)\n return str(self.data)\n\n def __repr__(self):\n return \"MultiDictProxy(data={!r}, multiple_keys={!r})\".format(\n self.data, self.multiple_keys\n )\n\n def __delitem__(self, key):\n del self.data[key]\n\n def __setitem__(self, key, value):\n self.data[key] = value\n\n def __getattr__(self, name):\n return getattr(self.data, name)\n\n def __iter__(self):\n for x in iter(self.data):\n # special case for header dicts which produce an iterator of tuples\n # instead of an iterator of strings\n if isinstance(x, tuple):\n yield x[0]\n else:\n yield x\n\n def __contains__(self, x):\n return x in self.data\n\n def __len__(self):\n return len(self.data)\n\n def __eq__(self, other):\n return self.data == other\n\n def __ne__(self, other):\n return self.data != other\n", "path": "src/webargs/multidictproxy.py"}]}
| 2,359 | 158 |
gh_patches_debug_15199
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-3205
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CheckUpdates widget swallows crashes and shows as no updates
As per title, it's not clear if the check update command is working as any error in the command results in the widget treating it as no updates.
This makes debugging impossible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/widget/check_updates.py`
Content:
```
1 # Copyright (c) 2015 Ali Mousavi
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 import os
22 from subprocess import CalledProcessError, Popen
23
24 from libqtile.log_utils import logger
25 from libqtile.widget import base
26
27
28 class CheckUpdates(base.ThreadPoolText):
29 """Shows number of pending updates in different unix systems"""
30
31 defaults = [
32 ("distro", "Arch", "Name of your distribution"),
33 (
34 "custom_command",
35 None,
36 "Custom shell command for checking updates (counts the lines of the output)",
37 ),
38 (
39 "custom_command_modify",
40 (lambda x: x),
41 "Lambda function to modify line count from custom_command",
42 ),
43 ("update_interval", 60, "Update interval in seconds."),
44 ("execute", None, "Command to execute on click"),
45 ("display_format", "Updates: {updates}", "Display format if updates available"),
46 ("colour_no_updates", "ffffff", "Colour when there's no updates."),
47 ("colour_have_updates", "ffffff", "Colour when there are updates."),
48 ("restart_indicator", "", "Indicator to represent reboot is required. (Ubuntu only)"),
49 ("no_update_string", "", "String to display if no updates available"),
50 ]
51
52 def __init__(self, **config):
53 base.ThreadPoolText.__init__(self, "", **config)
54 self.add_defaults(CheckUpdates.defaults)
55
56 # Helpful to have this as a variable as we can shorten it for testing
57 self.execute_polling_interval = 1
58
59 # format: "Distro": ("cmd", "number of lines to subtract from output")
60 self.cmd_dict = {
61 "Arch": ("pacman -Qu", 0),
62 "Arch_checkupdates": ("checkupdates", 0),
63 "Arch_Sup": ("pacman -Sup", 0),
64 "Arch_paru": ("paru -Qu", 0),
65 "Arch_paru_Sup": ("paru -Sup", 0),
66 "Arch_yay": ("yay -Qu", 0),
67 "Debian": ("apt-show-versions -u -b", 0),
68 "Gentoo_eix": ("EIX_LIMIT=0 eix -u# --world", 0),
69 "Ubuntu": ("aptitude search ~U", 0),
70 "Fedora": ("dnf list updates -q", 1),
71 "FreeBSD": ("pkg_version -I -l '<'", 0),
72 "Mandriva": ("urpmq --auto-select", 0),
73 }
74
75 if self.custom_command:
76 # Use custom_command
77 self.cmd = self.custom_command
78
79 else:
80 # Check if distro name is valid.
81 try:
82 self.cmd = self.cmd_dict[self.distro][0]
83 self.custom_command_modify = lambda x: x - self.cmd_dict[self.distro][1]
84 except KeyError:
85 distros = sorted(self.cmd_dict.keys())
86 logger.error(
87 self.distro
88 + " is not a valid distro name. "
89 + "Use one of the list: "
90 + str(distros)
91 + "."
92 )
93 self.cmd = None
94
95 if self.execute:
96 self.add_callbacks({"Button1": self.do_execute})
97
98 def _check_updates(self):
99 # type: () -> str
100 try:
101 updates = self.call_process(self.cmd, shell=True)
102 except CalledProcessError:
103 updates = ""
104 num_updates = self.custom_command_modify(len(updates.splitlines()))
105
106 if num_updates < 0:
107 num_updates = 0
108 if num_updates == 0:
109 self.layout.colour = self.colour_no_updates
110 return self.no_update_string
111 num_updates = str(num_updates)
112
113 if self.restart_indicator and os.path.exists("/var/run/reboot-required"):
114 num_updates += self.restart_indicator
115
116 self.layout.colour = self.colour_have_updates
117 return self.display_format.format(**{"updates": num_updates})
118
119 def poll(self):
120 # type: () -> str
121 if not self.cmd:
122 return "N/A"
123 return self._check_updates()
124
125 def do_execute(self):
126 self._process = Popen(self.execute, shell=True)
127 self.timeout_add(self.execute_polling_interval, self._refresh_count)
128
129 def _refresh_count(self):
130 if self._process.poll() is None:
131 self.timeout_add(self.execute_polling_interval, self._refresh_count)
132
133 else:
134 self.timer_setup()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py
--- a/libqtile/widget/check_updates.py
+++ b/libqtile/widget/check_updates.py
@@ -26,7 +26,21 @@
class CheckUpdates(base.ThreadPoolText):
- """Shows number of pending updates in different unix systems"""
+ """
+ Shows number of pending updates in different unix systems.
+
+ .. note::
+
+ It is common for package managers to return a non-zero code when there are no
+ updates. As a result, the widget will treat *any* error as if there are no updates.
+ If you are using a custom commmand/script, you should therefore ensure that it
+ returns zero when it completes if you wish to see the output of your command.
+
+ In addition, as no errors are recorded to the log, if the widget is showing no
+ updates and you believe that to be incorrect, you should run the appropriate
+ command in a terminal to view any error messages.
+
+ """
defaults = [
("distro", "Arch", "Name of your distribution"),
|
{"golden_diff": "diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py\n--- a/libqtile/widget/check_updates.py\n+++ b/libqtile/widget/check_updates.py\n@@ -26,7 +26,21 @@\n \n \n class CheckUpdates(base.ThreadPoolText):\n- \"\"\"Shows number of pending updates in different unix systems\"\"\"\n+ \"\"\"\n+ Shows number of pending updates in different unix systems.\n+\n+ .. note::\n+\n+ It is common for package managers to return a non-zero code when there are no\n+ updates. As a result, the widget will treat *any* error as if there are no updates.\n+ If you are using a custom commmand/script, you should therefore ensure that it\n+ returns zero when it completes if you wish to see the output of your command.\n+\n+ In addition, as no errors are recorded to the log, if the widget is showing no\n+ updates and you believe that to be incorrect, you should run the appropriate\n+ command in a terminal to view any error messages.\n+\n+ \"\"\"\n \n defaults = [\n (\"distro\", \"Arch\", \"Name of your distribution\"),\n", "issue": "CheckUpdates widget swallows crashes and shows as no updates\nAs per title, it's not clear if the check update command is working as any error in the command results in the widget treating it as no updates. \r\n\r\nThis makes debugging impossible.\n", "before_files": [{"content": "# Copyright (c) 2015 Ali Mousavi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nfrom subprocess import CalledProcessError, Popen\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CheckUpdates(base.ThreadPoolText):\n \"\"\"Shows number of pending updates in different unix systems\"\"\"\n\n defaults = [\n (\"distro\", \"Arch\", \"Name of your distribution\"),\n (\n \"custom_command\",\n None,\n \"Custom shell command for checking updates (counts the lines of the output)\",\n ),\n (\n \"custom_command_modify\",\n (lambda x: x),\n \"Lambda function to modify line count from custom_command\",\n ),\n (\"update_interval\", 60, \"Update interval in seconds.\"),\n (\"execute\", None, \"Command to execute on click\"),\n (\"display_format\", \"Updates: {updates}\", \"Display format if updates available\"),\n (\"colour_no_updates\", \"ffffff\", \"Colour when there's no updates.\"),\n (\"colour_have_updates\", \"ffffff\", \"Colour when there are updates.\"),\n (\"restart_indicator\", \"\", \"Indicator to represent reboot is required. (Ubuntu only)\"),\n (\"no_update_string\", \"\", \"String to display if no updates available\"),\n ]\n\n def __init__(self, **config):\n base.ThreadPoolText.__init__(self, \"\", **config)\n self.add_defaults(CheckUpdates.defaults)\n\n # Helpful to have this as a variable as we can shorten it for testing\n self.execute_polling_interval = 1\n\n # format: \"Distro\": (\"cmd\", \"number of lines to subtract from output\")\n self.cmd_dict = {\n \"Arch\": (\"pacman -Qu\", 0),\n \"Arch_checkupdates\": (\"checkupdates\", 0),\n \"Arch_Sup\": (\"pacman -Sup\", 0),\n \"Arch_paru\": (\"paru -Qu\", 0),\n \"Arch_paru_Sup\": (\"paru -Sup\", 0),\n \"Arch_yay\": (\"yay -Qu\", 0),\n \"Debian\": (\"apt-show-versions -u -b\", 0),\n \"Gentoo_eix\": (\"EIX_LIMIT=0 eix -u# --world\", 0),\n \"Ubuntu\": (\"aptitude search ~U\", 0),\n \"Fedora\": (\"dnf list updates -q\", 1),\n \"FreeBSD\": (\"pkg_version -I -l '<'\", 0),\n \"Mandriva\": (\"urpmq --auto-select\", 0),\n }\n\n if self.custom_command:\n # Use custom_command\n self.cmd = self.custom_command\n\n else:\n # Check if distro name is valid.\n try:\n self.cmd = self.cmd_dict[self.distro][0]\n self.custom_command_modify = lambda x: x - self.cmd_dict[self.distro][1]\n except KeyError:\n distros = sorted(self.cmd_dict.keys())\n logger.error(\n self.distro\n + \" is not a valid distro name. \"\n + \"Use one of the list: \"\n + str(distros)\n + \".\"\n )\n self.cmd = None\n\n if self.execute:\n self.add_callbacks({\"Button1\": self.do_execute})\n\n def _check_updates(self):\n # type: () -> str\n try:\n updates = self.call_process(self.cmd, shell=True)\n except CalledProcessError:\n updates = \"\"\n num_updates = self.custom_command_modify(len(updates.splitlines()))\n\n if num_updates < 0:\n num_updates = 0\n if num_updates == 0:\n self.layout.colour = self.colour_no_updates\n return self.no_update_string\n num_updates = str(num_updates)\n\n if self.restart_indicator and os.path.exists(\"/var/run/reboot-required\"):\n num_updates += self.restart_indicator\n\n self.layout.colour = self.colour_have_updates\n return self.display_format.format(**{\"updates\": num_updates})\n\n def poll(self):\n # type: () -> str\n if not self.cmd:\n return \"N/A\"\n return self._check_updates()\n\n def do_execute(self):\n self._process = Popen(self.execute, shell=True)\n self.timeout_add(self.execute_polling_interval, self._refresh_count)\n\n def _refresh_count(self):\n if self._process.poll() is None:\n self.timeout_add(self.execute_polling_interval, self._refresh_count)\n\n else:\n self.timer_setup()\n", "path": "libqtile/widget/check_updates.py"}], "after_files": [{"content": "# Copyright (c) 2015 Ali Mousavi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nfrom subprocess import CalledProcessError, Popen\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CheckUpdates(base.ThreadPoolText):\n \"\"\"\n Shows number of pending updates in different unix systems.\n\n .. note::\n\n It is common for package managers to return a non-zero code when there are no\n updates. As a result, the widget will treat *any* error as if there are no updates.\n If you are using a custom commmand/script, you should therefore ensure that it\n returns zero when it completes if you wish to see the output of your command.\n\n In addition, as no errors are recorded to the log, if the widget is showing no\n updates and you believe that to be incorrect, you should run the appropriate\n command in a terminal to view any error messages.\n\n \"\"\"\n\n defaults = [\n (\"distro\", \"Arch\", \"Name of your distribution\"),\n (\n \"custom_command\",\n None,\n \"Custom shell command for checking updates (counts the lines of the output)\",\n ),\n (\n \"custom_command_modify\",\n (lambda x: x),\n \"Lambda function to modify line count from custom_command\",\n ),\n (\"update_interval\", 60, \"Update interval in seconds.\"),\n (\"execute\", None, \"Command to execute on click\"),\n (\"display_format\", \"Updates: {updates}\", \"Display format if updates available\"),\n (\"colour_no_updates\", \"ffffff\", \"Colour when there's no updates.\"),\n (\"colour_have_updates\", \"ffffff\", \"Colour when there are updates.\"),\n (\"restart_indicator\", \"\", \"Indicator to represent reboot is required. (Ubuntu only)\"),\n (\"no_update_string\", \"\", \"String to display if no updates available\"),\n ]\n\n def __init__(self, **config):\n base.ThreadPoolText.__init__(self, \"\", **config)\n self.add_defaults(CheckUpdates.defaults)\n\n # Helpful to have this as a variable as we can shorten it for testing\n self.execute_polling_interval = 1\n\n # format: \"Distro\": (\"cmd\", \"number of lines to subtract from output\")\n self.cmd_dict = {\n \"Arch\": (\"pacman -Qu\", 0),\n \"Arch_checkupdates\": (\"checkupdates\", 0),\n \"Arch_Sup\": (\"pacman -Sup\", 0),\n \"Arch_paru\": (\"paru -Qu\", 0),\n \"Arch_paru_Sup\": (\"paru -Sup\", 0),\n \"Arch_yay\": (\"yay -Qu\", 0),\n \"Debian\": (\"apt-show-versions -u -b\", 0),\n \"Gentoo_eix\": (\"EIX_LIMIT=0 eix -u# --world\", 0),\n \"Ubuntu\": (\"aptitude search ~U\", 0),\n \"Fedora\": (\"dnf list updates -q\", 1),\n \"FreeBSD\": (\"pkg_version -I -l '<'\", 0),\n \"Mandriva\": (\"urpmq --auto-select\", 0),\n }\n\n if self.custom_command:\n # Use custom_command\n self.cmd = self.custom_command\n\n else:\n # Check if distro name is valid.\n try:\n self.cmd = self.cmd_dict[self.distro][0]\n self.custom_command_modify = lambda x: x - self.cmd_dict[self.distro][1]\n except KeyError:\n distros = sorted(self.cmd_dict.keys())\n logger.error(\n self.distro\n + \" is not a valid distro name. \"\n + \"Use one of the list: \"\n + str(distros)\n + \".\"\n )\n self.cmd = None\n\n if self.execute:\n self.add_callbacks({\"Button1\": self.do_execute})\n\n def _check_updates(self):\n # type: () -> str\n try:\n updates = self.call_process(self.cmd, shell=True)\n except CalledProcessError:\n updates = \"\"\n num_updates = self.custom_command_modify(len(updates.splitlines()))\n\n if num_updates < 0:\n num_updates = 0\n if num_updates == 0:\n self.layout.colour = self.colour_no_updates\n return self.no_update_string\n num_updates = str(num_updates)\n\n if self.restart_indicator and os.path.exists(\"/var/run/reboot-required\"):\n num_updates += self.restart_indicator\n\n self.layout.colour = self.colour_have_updates\n return self.display_format.format(**{\"updates\": num_updates})\n\n def poll(self):\n # type: () -> str\n if not self.cmd:\n return \"N/A\"\n return self._check_updates()\n\n def do_execute(self):\n self._process = Popen(self.execute, shell=True)\n self.timeout_add(self.execute_polling_interval, self._refresh_count)\n\n def _refresh_count(self):\n if self._process.poll() is None:\n self.timeout_add(self.execute_polling_interval, self._refresh_count)\n\n else:\n self.timer_setup()\n", "path": "libqtile/widget/check_updates.py"}]}
| 1,793 | 250 |
gh_patches_debug_15946
|
rasdani/github-patches
|
git_diff
|
microsoft__Qcodes-485
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keithely 2600 "resolution"
@MerlinSmiles right now we are limiting the set to 8 digits (https://github.com/QCoDeS/Qcodes/blob/master/qcodes/instrument_drivers/tektronix/Keithley_2600.py#L23)
Afaik it can go to to 12 digits. Do you confirm ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/instrument_drivers/tektronix/Keithley_2600.py`
Content:
```
1 from qcodes import VisaInstrument
2
3
4 class Keithley_2600(VisaInstrument):
5 """
6 channel: use channel 'a' or 'b'
7
8 This is the qcodes driver for the Keithley_2600 Source-Meter series,
9 tested with Keithley_2614B
10
11 Status: beta-version.
12 TODO:
13 - Add all parameters that are in the manual
14 - range and limit should be set according to mode
15 - add ramping and such stuff
16
17 """
18 def __init__(self, name, address, channel, **kwargs):
19 super().__init__(name, address, terminator='\n', **kwargs)
20 self._channel = channel
21
22 self.add_parameter('volt', get_cmd='measure.v()',
23 get_parser=float, set_cmd='source.levelv={:.8f}',
24 label='Voltage',
25 unit='V')
26 self.add_parameter('curr', get_cmd='measure.i()',
27 get_parser=float, set_cmd='source.leveli={:.8f}',
28 label='Current',
29 unit='A')
30 self.add_parameter('mode',
31 get_cmd='source.func',
32 set_cmd='source.func={:d}',
33 val_mapping={'current': 0, 'voltage': 1})
34 self.add_parameter('output',
35 get_cmd='source.output',
36 set_cmd='source.output={:d}',
37 val_mapping={'on': 1, 'off': 0})
38 # Source range
39 # needs get after set
40 self.add_parameter('rangev',
41 get_cmd='source.rangev',
42 get_parser=float,
43 set_cmd='source.rangev={:.4f}',
44 unit='V')
45 # Measure range
46 # needs get after set
47 self.add_parameter('rangei',
48 get_cmd='source.rangei',
49 get_parser=float,
50 set_cmd='source.rangei={:.4f}',
51 unit='A')
52 # Compliance limit
53 self.add_parameter('limitv',
54 get_cmd='source.limitv',
55 get_parser=float,
56 set_cmd='source.limitv={:.4f}',
57 unit='V')
58 # Compliance limit
59 self.add_parameter('limiti',
60 get_cmd='source.limiti',
61 get_parser=float,
62 set_cmd='source.limiti={:.4f}',
63 unit='A')
64
65 self.connect_message()
66
67 def get_idn(self):
68 IDN = self.ask_raw('*IDN?')
69 vendor, model, serial, firmware = map(str.strip, IDN.split(','))
70 model = model[6:]
71
72 IDN = {'vendor': vendor, 'model': model,
73 'serial': serial, 'firmware': firmware}
74 return IDN
75
76 def reset(self):
77 self.write('reset()')
78
79 def ask(self, cmd):
80 return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))
81
82 def write(self, cmd):
83 super().write('smu{:s}.{:s}'.format(self._channel, cmd))
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py
+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
@@ -20,11 +20,11 @@
self._channel = channel
self.add_parameter('volt', get_cmd='measure.v()',
- get_parser=float, set_cmd='source.levelv={:.8f}',
+ get_parser=float, set_cmd='source.levelv={:.12f}',
label='Voltage',
unit='V')
self.add_parameter('curr', get_cmd='measure.i()',
- get_parser=float, set_cmd='source.leveli={:.8f}',
+ get_parser=float, set_cmd='source.leveli={:.12f}',
label='Current',
unit='A')
self.add_parameter('mode',
|
{"golden_diff": "diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n@@ -20,11 +20,11 @@\n self._channel = channel\n \n self.add_parameter('volt', get_cmd='measure.v()',\n- get_parser=float, set_cmd='source.levelv={:.8f}',\n+ get_parser=float, set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr', get_cmd='measure.i()',\n- get_parser=float, set_cmd='source.leveli={:.8f}',\n+ get_parser=float, set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n", "issue": "Keithely 2600 \"resolution\"\n@MerlinSmiles right now we are limiting the set to 8 digits (https://github.com/QCoDeS/Qcodes/blob/master/qcodes/instrument_drivers/tektronix/Keithley_2600.py#L23)\r\nAfaik it can go to to 12 digits. Do you confirm ? \r\n\n", "before_files": [{"content": "from qcodes import VisaInstrument\n\n\nclass Keithley_2600(VisaInstrument):\n \"\"\"\n channel: use channel 'a' or 'b'\n\n This is the qcodes driver for the Keithley_2600 Source-Meter series,\n tested with Keithley_2614B\n\n Status: beta-version.\n TODO:\n - Add all parameters that are in the manual\n - range and limit should be set according to mode\n - add ramping and such stuff\n\n \"\"\"\n def __init__(self, name, address, channel, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n\n self.add_parameter('volt', get_cmd='measure.v()',\n get_parser=float, set_cmd='source.levelv={:.8f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr', get_cmd='measure.i()',\n get_parser=float, set_cmd='source.leveli={:.8f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n # needs get after set\n self.add_parameter('rangev',\n get_cmd='source.rangev',\n get_parser=float,\n set_cmd='source.rangev={:.4f}',\n unit='V')\n # Measure range\n # needs get after set\n self.add_parameter('rangei',\n get_cmd='source.rangei',\n get_parser=float,\n set_cmd='source.rangei={:.4f}',\n unit='A')\n # Compliance limit\n self.add_parameter('limitv',\n get_cmd='source.limitv',\n get_parser=float,\n set_cmd='source.limitv={:.4f}',\n unit='V')\n # Compliance limit\n self.add_parameter('limiti',\n get_cmd='source.limiti',\n get_parser=float,\n set_cmd='source.limiti={:.4f}',\n unit='A')\n\n self.connect_message()\n\n def get_idn(self):\n IDN = self.ask_raw('*IDN?')\n vendor, model, serial, firmware = map(str.strip, IDN.split(','))\n model = model[6:]\n\n IDN = {'vendor': vendor, 'model': model,\n 'serial': serial, 'firmware': firmware}\n return IDN\n\n def reset(self):\n self.write('reset()')\n\n def ask(self, cmd):\n return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))\n\n def write(self, cmd):\n super().write('smu{:s}.{:s}'.format(self._channel, cmd))\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2600.py"}], "after_files": [{"content": "from qcodes import VisaInstrument\n\n\nclass Keithley_2600(VisaInstrument):\n \"\"\"\n channel: use channel 'a' or 'b'\n\n This is the qcodes driver for the Keithley_2600 Source-Meter series,\n tested with Keithley_2614B\n\n Status: beta-version.\n TODO:\n - Add all parameters that are in the manual\n - range and limit should be set according to mode\n - add ramping and such stuff\n\n \"\"\"\n def __init__(self, name, address, channel, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n\n self.add_parameter('volt', get_cmd='measure.v()',\n get_parser=float, set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr', get_cmd='measure.i()',\n get_parser=float, set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n # needs get after set\n self.add_parameter('rangev',\n get_cmd='source.rangev',\n get_parser=float,\n set_cmd='source.rangev={:.4f}',\n unit='V')\n # Measure range\n # needs get after set\n self.add_parameter('rangei',\n get_cmd='source.rangei',\n get_parser=float,\n set_cmd='source.rangei={:.4f}',\n unit='A')\n # Compliance limit\n self.add_parameter('limitv',\n get_cmd='source.limitv',\n get_parser=float,\n set_cmd='source.limitv={:.4f}',\n unit='V')\n # Compliance limit\n self.add_parameter('limiti',\n get_cmd='source.limiti',\n get_parser=float,\n set_cmd='source.limiti={:.4f}',\n unit='A')\n\n self.connect_message()\n\n def get_idn(self):\n IDN = self.ask_raw('*IDN?')\n vendor, model, serial, firmware = map(str.strip, IDN.split(','))\n model = model[6:]\n\n IDN = {'vendor': vendor, 'model': model,\n 'serial': serial, 'firmware': firmware}\n return IDN\n\n def reset(self):\n self.write('reset()')\n\n def ask(self, cmd):\n return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))\n\n def write(self, cmd):\n super().write('smu{:s}.{:s}'.format(self._channel, cmd))\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2600.py"}]}
| 1,173 | 234 |
gh_patches_debug_5064
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-255
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
batch_first broken in AutogradRNN
The last line here fails on CPU or when CUDNN is otherwise unavailable:
```python
l, b, t, x, h = 2, 3, 5, 10, 20
rnn = nn.LSTM(x, h, l, batch_first=True)
inpt = Variable(torch.randn(b, t, x))
h0 = Variable(torch.randn(l, b, h))
c0 = Variable(torch.randn(l, b, h))
output, hn = rnn(inpt, (h0, c0))
```
This is because `AutogradRNN.forward` accidentally assumes `Tensor`'s in-place `transpose` semantics rather than the functional semantics of `Variable` (`cudnn.rnn.forward` gets it right):
```python
def forward(input, weight, hidden):
if batch_first:
input.transpose(0, 1)
nexth, output = func(input, hidden, weight)
if batch_first:
output.transpose(0, 1)
```
I can push a PR that fixes this, or one of the devs can put it in the next bugfix PR:
```python
def forward(input, weight, hidden):
if batch_first:
input = input.transpose(0, 1)
nexth, output = func(input, hidden, weight)
if batch_first:
output = output.transpose(0, 1)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/nn/functions/rnn.py`
Content:
```
1 from torch.autograd import Function, NestedIOFunction, Variable
2 from torch._thnn import type2backend
3 import torch.backends.cudnn as cudnn
4 try:
5 import torch.backends.cudnn.rnn
6 except ImportError:
7 pass
8
9
10 # FIXME: write a proper function library
11 from .thnn import Tanh, Sigmoid, Threshold
12 from .linear import Linear
13 from .dropout import Dropout
14
15
16 def _wrap(fn, *args):
17 def inner(*inner_args):
18 return fn(*args)(*inner_args)
19 return inner
20 tanh = _wrap(Tanh)
21 sigmoid = _wrap(Sigmoid)
22 ReLU = _wrap(Threshold, 0, 0, False)
23
24
25 # get around autograd's lack of None-handling
26 def linear(input, w, b):
27 if b is not None:
28 return Linear()(input, w, b)
29 else:
30 return Linear()(input, w)
31
32
33 def RNNReLUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
34 hy = ReLU(linear(input, w_ih, b_ih) + linear(hidden, w_hh, b_hh))
35 return hy
36
37
38 def RNNTanhCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
39 hy = tanh(linear(input, w_ih, b_ih) + linear(hidden, w_hh, b_hh))
40 return hy
41
42
43 def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
44 hx, cx = hidden
45 gates = linear(input, w_ih, b_ih) + linear(hx, w_hh, b_hh)
46 ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
47
48 ingate = sigmoid(ingate)
49 forgetgate = sigmoid(forgetgate)
50 cellgate = tanh(cellgate)
51 outgate = sigmoid(outgate)
52
53 cy = (forgetgate * cx) + (ingate * cellgate)
54 hy = outgate * tanh(cy)
55
56 return hy, cy
57
58
59 def GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
60 gi = linear(input, w_ih, b_ih)
61 gh = linear(hidden, w_hh, b_hh)
62 i_r, i_i, i_n = gi.chunk(3, 1)
63 h_r, h_i, h_n = gh.chunk(3, 1)
64
65 resetgate = sigmoid(i_r + h_r)
66 inputgate = sigmoid(i_i + h_i)
67 newgate = tanh(i_n + resetgate * h_n)
68 hy = newgate + inputgate * (hidden - newgate)
69
70 return hy
71
72
73 def StackedRNN(inners, num_layers, lstm=False, dropout=0, train=True):
74
75 num_directions = len(inners)
76 total_layers = num_layers * num_directions
77
78 def forward(input, hidden, weight):
79 assert(len(weight) == total_layers)
80 next_hidden = []
81
82 if lstm:
83 hidden = list(zip(*hidden))
84
85 for i in range(num_layers):
86 all_output = []
87 for j, inner in enumerate(inners):
88 l = i * num_directions + j
89
90 hy, output = inner(input, hidden[l], weight[l])
91 next_hidden.append(hy)
92 all_output.append(output)
93
94 input = torch.cat(all_output, 2)
95
96 if dropout != 0 and i < num_layers - 1:
97 input = Dropout(p=dropout, train=train, inplace=False)(input)
98
99 if lstm:
100 next_h, next_c = zip(*next_hidden)
101 next_hidden = (
102 torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),
103 torch.cat(next_c, 0).view(total_layers, *next_c[0].size())
104 )
105 else:
106 next_hidden = torch.cat(next_hidden, 0).view(
107 total_layers, *next_hidden[0].size())
108
109 return next_hidden, input
110
111 return forward
112
113 def Recurrent(inner, reverse=False):
114 def forward(input, hidden, weight):
115 output = []
116 steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
117 for i in steps:
118 hidden = inner(input[i], hidden, *weight)
119 # hack to handle LSTM
120 output.append(isinstance(hidden, tuple) and hidden[0] or hidden)
121
122 if reverse:
123 output.reverse()
124 output = torch.cat(output, 0).view(input.size(0), *output[0].size())
125
126 return hidden, output
127
128 return forward
129
130
131 def AutogradRNN(mode, input_size, hidden_size, num_layers=1, batch_first=False, dropout=0, train=True, bidirectional=False):
132
133 if mode == 'RNN_RELU':
134 cell = RNNReLUCell
135 elif mode == 'RNN_TANH':
136 cell = RNNTanhCell
137 elif mode == 'LSTM':
138 cell = LSTMCell
139 elif mode == 'GRU':
140 cell = GRUCell
141 else:
142 raise Exception('Unknown mode: {}'.format(mode))
143
144 if bidirectional:
145 layer = (Recurrent(cell), Recurrent(cell, reverse=True))
146 else:
147 layer = (Recurrent(cell),)
148
149 func = StackedRNN(layer,
150 num_layers,
151 (mode == 'LSTM'),
152 dropout=dropout,
153 train=train)
154
155 def forward(input, weight, hidden):
156 if batch_first:
157 input.transpose(0, 1)
158
159 nexth, output = func(input, hidden, weight)
160
161 if batch_first:
162 output.transpose(0, 1)
163
164 return output, nexth
165
166 return forward
167
168
169 class CudnnRNN(NestedIOFunction):
170 def __init__(self, mode, input_size, hidden_size, num_layers=1, batch_first=False, dropout=0, train=True, bidirectional=False):
171 super(CudnnRNN, self).__init__()
172 self.mode = cudnn.rnn.get_cudnn_mode(mode)
173 self.input_mode = cudnn.CUDNN_LINEAR_INPUT
174 self.input_size = input_size
175 self.hidden_size = hidden_size
176 self.num_layers = num_layers
177 self.batch_first = batch_first
178 self.dropout = dropout
179 self.train = train
180 self.bidirectional = 1 if bidirectional else 0
181 self.num_directions = 2 if bidirectional else 1
182 self.seed = torch.IntTensor(1).random_()[0]
183
184 def forward_extended(self, input, weight, hx):
185
186 assert(cudnn.is_acceptable(input))
187
188 output = input.new()
189
190 if torch.is_tensor(hx):
191 hy = hx.new()
192 else:
193 hy = tuple(h.new() for h in hx)
194
195 cudnn.rnn.forward(self, input, hx, weight, output, hy)
196
197 self.save_for_backward(input, hx, weight, output)
198 return output, hy
199
200
201 def backward_extended(self, grad_output, grad_hy):
202 input, hx, weight, output = self.saved_tensors
203
204 grad_input, grad_weight, grad_hx = None, None, None
205
206 assert(cudnn.is_acceptable(input))
207
208 grad_input = input.new()
209 grad_weight = input.new()
210 grad_hx = input.new()
211 if torch.is_tensor(hx):
212 grad_hx = input.new()
213 else:
214 grad_hx = tuple(h.new() for h in hx)
215
216 cudnn.rnn.backward_grad(
217 self,
218 input,
219 hx,
220 weight,
221 output,
222 grad_output,
223 grad_hy,
224 grad_input,
225 grad_hx)
226
227 if self.needs_input_grad[1]:
228 grad_weight = [tuple(w.new().resize_as_(w).zero_() for w in layer_weight) for layer_weight in weight]
229 cudnn.rnn.backward_weight(
230 self,
231 input,
232 hx,
233 output,
234 weight,
235 grad_weight)
236
237 return grad_input, grad_weight, grad_hx
238
239
240 def RNN(*args, **kwargs):
241 def forward(input, *fargs, **fkwargs):
242 if cudnn.is_acceptable(input.data):
243 func = CudnnRNN(*args, **kwargs)
244 else:
245 func = AutogradRNN(*args, **kwargs)
246 return func(input, *fargs, **fkwargs)
247
248 return forward
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch/nn/functions/rnn.py b/torch/nn/functions/rnn.py
--- a/torch/nn/functions/rnn.py
+++ b/torch/nn/functions/rnn.py
@@ -154,12 +154,12 @@
def forward(input, weight, hidden):
if batch_first:
- input.transpose(0, 1)
+ input = input.transpose(0, 1)
nexth, output = func(input, hidden, weight)
if batch_first:
- output.transpose(0, 1)
+ output = output.transpose(0, 1)
return output, nexth
|
{"golden_diff": "diff --git a/torch/nn/functions/rnn.py b/torch/nn/functions/rnn.py\n--- a/torch/nn/functions/rnn.py\n+++ b/torch/nn/functions/rnn.py\n@@ -154,12 +154,12 @@\n \n def forward(input, weight, hidden):\n if batch_first:\n- input.transpose(0, 1)\n+ input = input.transpose(0, 1)\n \n nexth, output = func(input, hidden, weight)\n \n if batch_first:\n- output.transpose(0, 1)\n+ output = output.transpose(0, 1)\n \n return output, nexth\n", "issue": "batch_first broken in AutogradRNN\nThe last line here fails on CPU or when CUDNN is otherwise unavailable:\r\n\r\n```python\r\nl, b, t, x, h = 2, 3, 5, 10, 20\r\n\r\nrnn = nn.LSTM(x, h, l, batch_first=True)\r\ninpt = Variable(torch.randn(b, t, x))\r\nh0 = Variable(torch.randn(l, b, h))\r\nc0 = Variable(torch.randn(l, b, h))\r\noutput, hn = rnn(inpt, (h0, c0))\r\n```\r\n\r\nThis is because `AutogradRNN.forward` accidentally assumes `Tensor`'s in-place `transpose` semantics rather than the functional semantics of `Variable` (`cudnn.rnn.forward` gets it right):\r\n\r\n```python\r\ndef forward(input, weight, hidden):\r\n if batch_first:\r\n input.transpose(0, 1)\r\n nexth, output = func(input, hidden, weight)\r\n if batch_first:\r\n output.transpose(0, 1)\r\n```\r\n\r\nI can push a PR that fixes this, or one of the devs can put it in the next bugfix PR:\r\n```python\r\ndef forward(input, weight, hidden):\r\n if batch_first:\r\n input = input.transpose(0, 1)\r\n nexth, output = func(input, hidden, weight)\r\n if batch_first:\r\n output = output.transpose(0, 1)\r\n```\n", "before_files": [{"content": "from torch.autograd import Function, NestedIOFunction, Variable\nfrom torch._thnn import type2backend\nimport torch.backends.cudnn as cudnn\ntry:\n import torch.backends.cudnn.rnn\nexcept ImportError:\n pass\n\n\n# FIXME: write a proper function library\nfrom .thnn import Tanh, Sigmoid, Threshold\nfrom .linear import Linear\nfrom .dropout import Dropout\n\n\ndef _wrap(fn, *args):\n def inner(*inner_args):\n return fn(*args)(*inner_args)\n return inner\ntanh = _wrap(Tanh)\nsigmoid = _wrap(Sigmoid)\nReLU = _wrap(Threshold, 0, 0, False)\n\n\n# get around autograd's lack of None-handling\ndef linear(input, w, b):\n if b is not None:\n return Linear()(input, w, b)\n else:\n return Linear()(input, w)\n\n\ndef RNNReLUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hy = ReLU(linear(input, w_ih, b_ih) + linear(hidden, w_hh, b_hh))\n return hy\n\n\ndef RNNTanhCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hy = tanh(linear(input, w_ih, b_ih) + linear(hidden, w_hh, b_hh))\n return hy\n\n\ndef LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hx, cx = hidden\n gates = linear(input, w_ih, b_ih) + linear(hx, w_hh, b_hh)\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = sigmoid(ingate)\n forgetgate = sigmoid(forgetgate)\n cellgate = tanh(cellgate)\n outgate = sigmoid(outgate)\n\n cy = (forgetgate * cx) + (ingate * cellgate)\n hy = outgate * tanh(cy)\n\n return hy, cy\n\n\ndef GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n gi = linear(input, w_ih, b_ih)\n gh = linear(hidden, w_hh, b_hh)\n i_r, i_i, i_n = gi.chunk(3, 1)\n h_r, h_i, h_n = gh.chunk(3, 1)\n\n resetgate = sigmoid(i_r + h_r)\n inputgate = sigmoid(i_i + h_i)\n newgate = tanh(i_n + resetgate * h_n)\n hy = newgate + inputgate * (hidden - newgate)\n\n return hy\n\n\ndef StackedRNN(inners, num_layers, lstm=False, dropout=0, train=True):\n\n num_directions = len(inners)\n total_layers = num_layers * num_directions\n\n def forward(input, hidden, weight):\n assert(len(weight) == total_layers)\n next_hidden = []\n\n if lstm:\n hidden = list(zip(*hidden))\n\n for i in range(num_layers):\n all_output = []\n for j, inner in enumerate(inners):\n l = i * num_directions + j\n\n hy, output = inner(input, hidden[l], weight[l])\n next_hidden.append(hy)\n all_output.append(output)\n\n input = torch.cat(all_output, 2)\n\n if dropout != 0 and i < num_layers - 1:\n input = Dropout(p=dropout, train=train, inplace=False)(input)\n\n if lstm:\n next_h, next_c = zip(*next_hidden)\n next_hidden = (\n torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),\n torch.cat(next_c, 0).view(total_layers, *next_c[0].size())\n )\n else:\n next_hidden = torch.cat(next_hidden, 0).view(\n total_layers, *next_hidden[0].size())\n\n return next_hidden, input\n\n return forward\n\ndef Recurrent(inner, reverse=False):\n def forward(input, hidden, weight):\n output = []\n steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))\n for i in steps:\n hidden = inner(input[i], hidden, *weight)\n # hack to handle LSTM\n output.append(isinstance(hidden, tuple) and hidden[0] or hidden)\n\n if reverse:\n output.reverse()\n output = torch.cat(output, 0).view(input.size(0), *output[0].size())\n\n return hidden, output\n\n return forward\n\n\ndef AutogradRNN(mode, input_size, hidden_size, num_layers=1, batch_first=False, dropout=0, train=True, bidirectional=False):\n\n if mode == 'RNN_RELU':\n cell = RNNReLUCell\n elif mode == 'RNN_TANH':\n cell = RNNTanhCell\n elif mode == 'LSTM':\n cell = LSTMCell\n elif mode == 'GRU':\n cell = GRUCell\n else:\n raise Exception('Unknown mode: {}'.format(mode))\n\n if bidirectional:\n layer = (Recurrent(cell), Recurrent(cell, reverse=True))\n else:\n layer = (Recurrent(cell),)\n\n func = StackedRNN(layer,\n num_layers,\n (mode == 'LSTM'),\n dropout=dropout,\n train=train)\n\n def forward(input, weight, hidden):\n if batch_first:\n input.transpose(0, 1)\n\n nexth, output = func(input, hidden, weight)\n\n if batch_first:\n output.transpose(0, 1)\n\n return output, nexth\n\n return forward\n\n\nclass CudnnRNN(NestedIOFunction):\n def __init__(self, mode, input_size, hidden_size, num_layers=1, batch_first=False, dropout=0, train=True, bidirectional=False):\n super(CudnnRNN, self).__init__()\n self.mode = cudnn.rnn.get_cudnn_mode(mode)\n self.input_mode = cudnn.CUDNN_LINEAR_INPUT\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.batch_first = batch_first\n self.dropout = dropout\n self.train = train\n self.bidirectional = 1 if bidirectional else 0\n self.num_directions = 2 if bidirectional else 1\n self.seed = torch.IntTensor(1).random_()[0]\n\n def forward_extended(self, input, weight, hx):\n\n assert(cudnn.is_acceptable(input))\n\n output = input.new()\n\n if torch.is_tensor(hx):\n hy = hx.new()\n else:\n hy = tuple(h.new() for h in hx)\n\n cudnn.rnn.forward(self, input, hx, weight, output, hy)\n\n self.save_for_backward(input, hx, weight, output)\n return output, hy\n\n\n def backward_extended(self, grad_output, grad_hy):\n input, hx, weight, output = self.saved_tensors\n\n grad_input, grad_weight, grad_hx = None, None, None\n\n assert(cudnn.is_acceptable(input))\n\n grad_input = input.new()\n grad_weight = input.new()\n grad_hx = input.new()\n if torch.is_tensor(hx):\n grad_hx = input.new()\n else:\n grad_hx = tuple(h.new() for h in hx)\n\n cudnn.rnn.backward_grad(\n self,\n input,\n hx,\n weight,\n output,\n grad_output,\n grad_hy,\n grad_input,\n grad_hx)\n\n if self.needs_input_grad[1]:\n grad_weight = [tuple(w.new().resize_as_(w).zero_() for w in layer_weight) for layer_weight in weight]\n cudnn.rnn.backward_weight(\n self,\n input,\n hx,\n output,\n weight,\n grad_weight)\n\n return grad_input, grad_weight, grad_hx\n\n\ndef RNN(*args, **kwargs):\n def forward(input, *fargs, **fkwargs):\n if cudnn.is_acceptable(input.data):\n func = CudnnRNN(*args, **kwargs)\n else:\n func = AutogradRNN(*args, **kwargs)\n return func(input, *fargs, **fkwargs)\n\n return forward\n", "path": "torch/nn/functions/rnn.py"}], "after_files": [{"content": "from torch.autograd import Function, NestedIOFunction, Variable\nfrom torch._thnn import type2backend\nimport torch.backends.cudnn as cudnn\ntry:\n import torch.backends.cudnn.rnn\nexcept ImportError:\n pass\n\n\n# FIXME: write a proper function library\nfrom .thnn import Tanh, Sigmoid, Threshold\nfrom .linear import Linear\nfrom .dropout import Dropout\n\n\ndef _wrap(fn, *args):\n def inner(*inner_args):\n return fn(*args)(*inner_args)\n return inner\ntanh = _wrap(Tanh)\nsigmoid = _wrap(Sigmoid)\nReLU = _wrap(Threshold, 0, 0, False)\n\n\n# get around autograd's lack of None-handling\ndef linear(input, w, b):\n if b is not None:\n return Linear()(input, w, b)\n else:\n return Linear()(input, w)\n\n\ndef RNNReLUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hy = ReLU(linear(input, w_ih, b_ih) + linear(hidden, w_hh, b_hh))\n return hy\n\n\ndef RNNTanhCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hy = tanh(linear(input, w_ih, b_ih) + linear(hidden, w_hh, b_hh))\n return hy\n\n\ndef LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hx, cx = hidden\n gates = linear(input, w_ih, b_ih) + linear(hx, w_hh, b_hh)\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = sigmoid(ingate)\n forgetgate = sigmoid(forgetgate)\n cellgate = tanh(cellgate)\n outgate = sigmoid(outgate)\n\n cy = (forgetgate * cx) + (ingate * cellgate)\n hy = outgate * tanh(cy)\n\n return hy, cy\n\n\ndef GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n gi = linear(input, w_ih, b_ih)\n gh = linear(hidden, w_hh, b_hh)\n i_r, i_i, i_n = gi.chunk(3, 1)\n h_r, h_i, h_n = gh.chunk(3, 1)\n\n resetgate = sigmoid(i_r + h_r)\n inputgate = sigmoid(i_i + h_i)\n newgate = tanh(i_n + resetgate * h_n)\n hy = newgate + inputgate * (hidden - newgate)\n\n return hy\n\n\ndef StackedRNN(inners, num_layers, lstm=False, dropout=0, train=True):\n\n num_directions = len(inners)\n total_layers = num_layers * num_directions\n\n def forward(input, hidden, weight):\n assert(len(weight) == total_layers)\n next_hidden = []\n\n if lstm:\n hidden = list(zip(*hidden))\n\n for i in range(num_layers):\n all_output = []\n for j, inner in enumerate(inners):\n l = i * num_directions + j\n\n hy, output = inner(input, hidden[l], weight[l])\n next_hidden.append(hy)\n all_output.append(output)\n\n input = torch.cat(all_output, 2)\n\n if dropout != 0 and i < num_layers - 1:\n input = Dropout(p=dropout, train=train, inplace=False)(input)\n\n if lstm:\n next_h, next_c = zip(*next_hidden)\n next_hidden = (\n torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),\n torch.cat(next_c, 0).view(total_layers, *next_c[0].size())\n )\n else:\n next_hidden = torch.cat(next_hidden, 0).view(\n total_layers, *next_hidden[0].size())\n\n return next_hidden, input\n\n return forward\n\ndef Recurrent(inner, reverse=False):\n def forward(input, hidden, weight):\n output = []\n steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))\n for i in steps:\n hidden = inner(input[i], hidden, *weight)\n # hack to handle LSTM\n output.append(isinstance(hidden, tuple) and hidden[0] or hidden)\n\n if reverse:\n output.reverse()\n output = torch.cat(output, 0).view(input.size(0), *output[0].size())\n\n return hidden, output\n\n return forward\n\n\ndef AutogradRNN(mode, input_size, hidden_size, num_layers=1, batch_first=False, dropout=0, train=True, bidirectional=False):\n\n if mode == 'RNN_RELU':\n cell = RNNReLUCell\n elif mode == 'RNN_TANH':\n cell = RNNTanhCell\n elif mode == 'LSTM':\n cell = LSTMCell\n elif mode == 'GRU':\n cell = GRUCell\n else:\n raise Exception('Unknown mode: {}'.format(mode))\n\n if bidirectional:\n layer = (Recurrent(cell), Recurrent(cell, reverse=True))\n else:\n layer = (Recurrent(cell),)\n\n func = StackedRNN(layer,\n num_layers,\n (mode == 'LSTM'),\n dropout=dropout,\n train=train)\n\n def forward(input, weight, hidden):\n if batch_first:\n input = input.transpose(0, 1)\n\n nexth, output = func(input, hidden, weight)\n\n if batch_first:\n output = output.transpose(0, 1)\n\n return output, nexth\n\n return forward\n\n\nclass CudnnRNN(NestedIOFunction):\n def __init__(self, mode, input_size, hidden_size, num_layers=1, batch_first=False, dropout=0, train=True, bidirectional=False):\n super(CudnnRNN, self).__init__()\n self.mode = cudnn.rnn.get_cudnn_mode(mode)\n self.input_mode = cudnn.CUDNN_LINEAR_INPUT\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.batch_first = batch_first\n self.dropout = dropout\n self.train = train\n self.bidirectional = 1 if bidirectional else 0\n self.num_directions = 2 if bidirectional else 1\n self.seed = torch.IntTensor(1).random_()[0]\n\n def forward_extended(self, input, weight, hx):\n\n assert(cudnn.is_acceptable(input))\n\n output = input.new()\n\n if torch.is_tensor(hx):\n hy = hx.new()\n else:\n hy = tuple(h.new() for h in hx)\n\n cudnn.rnn.forward(self, input, hx, weight, output, hy)\n\n self.save_for_backward(input, hx, weight, output)\n return output, hy\n\n\n def backward_extended(self, grad_output, grad_hy):\n input, hx, weight, output = self.saved_tensors\n\n grad_input, grad_weight, grad_hx = None, None, None\n\n assert(cudnn.is_acceptable(input))\n\n grad_input = input.new()\n grad_weight = input.new()\n grad_hx = input.new()\n if torch.is_tensor(hx):\n grad_hx = input.new()\n else:\n grad_hx = tuple(h.new() for h in hx)\n\n cudnn.rnn.backward_grad(\n self,\n input,\n hx,\n weight,\n output,\n grad_output,\n grad_hy,\n grad_input,\n grad_hx)\n\n if self.needs_input_grad[1]:\n grad_weight = [tuple(w.new().resize_as_(w).zero_() for w in layer_weight) for layer_weight in weight]\n cudnn.rnn.backward_weight(\n self,\n input,\n hx,\n output,\n weight,\n grad_weight)\n\n return grad_input, grad_weight, grad_hx\n\n\ndef RNN(*args, **kwargs):\n def forward(input, *fargs, **fkwargs):\n if cudnn.is_acceptable(input.data):\n func = CudnnRNN(*args, **kwargs)\n else:\n func = AutogradRNN(*args, **kwargs)\n return func(input, *fargs, **fkwargs)\n\n return forward\n", "path": "torch/nn/functions/rnn.py"}]}
| 3,094 | 147 |
gh_patches_debug_4901
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-6349
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError handle_modules with 0.27.0 on openSUSE
## My operating system is (include version):
openSUSE Leap 42.1
## I installed Certbot with (certbot-auto, OS package manager, pip, etc):
certbot-auto
## I ran this command and it produced this output:
````
kevdev36:~ # certbot-auto --version
Upgrading certbot-auto 0.26.1 to 0.27.0...
Replacing certbot-auto...
Creating virtual environment...
Installing Python packages...
Installation succeeded.
An unexpected error occurred:
KeyError: 'handle_modules'
Please see the logfile '/tmp/tmpMAZJox' for more details.
````
## Certbot's behavior differed from what I expected because:
It did not print the version.
## Here is a Certbot log showing the issue (if available):
/tmp/tmpMAZJox
````
2018-09-06 09:59:58,652:DEBUG:certbot.main:certbot version: 0.27.0
2018-09-06 09:59:58,652:DEBUG:certbot.main:Arguments: ['--version']
2018-09-06 09:59:58,653:DEBUG:certbot.main:Discovered plugins: PluginsRegistry(PluginEntryPoint#apache,PluginEntryPoint#manual,PluginEntryPoint#nginx,PluginEntryPoint#null,PluginEntryPoint#standalone,PluginEntryPoint#webroot)
2018-09-06 09:59:58,660:DEBUG:certbot.log:Exiting abnormally:
Traceback (most recent call last):
File "/opt/eff.org/certbot/venv/bin/letsencrypt", line 11, in <module>
sys.exit(main())
File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/main.py", line 1345, in main
args = cli.prepare_and_parse_args(plugins, cli_args)
File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py", line 1243, in prepare_and_parse_args
_plugins_parsing(helpful, plugins)
File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py", line 1458, in _plugins_parsing
helpful.add_plugin_args(plugins)
File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py", line 840, in add_plugin_args
plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name)
File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/plugins/common.py", line 81, in inject_parser_options
return cls.add_parser_arguments(add)
File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot_apache/configurator.py", line 159, in add_parser_arguments
add("handle-modules", default=cls.OS_DEFAULTS["handle_modules"],
KeyError: 'handle_modules'
2018-09-06 09:59:58,660:ERROR:certbot.log:An unexpected error occurred:
````
## Workaround
Downgrade to 0.26.1 and use `certbot-auto` with `--no-self-upgrade`.
````
kevdev36:~ # wget https://raw.githubusercontent.com/certbot/certbot/v0.26.1/certbot-auto
kevdev36:~ # chmod +x certbot-auto
kevdev36:~ # /opt/eff.org/certbot/venv/bin/pip install certbot==0.26.1 certbot-apache==0.26.1 certbot-nginx==0.26.1
kevdev36:~ # ./certbot-auto --no-self-upgrade --version
certbot 0.26.1
````
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot-apache/certbot_apache/override_suse.py`
Content:
```
1 """ Distribution specific override class for OpenSUSE """
2 import pkg_resources
3
4 import zope.interface
5
6 from certbot import interfaces
7
8 from certbot_apache import configurator
9
10 @zope.interface.provider(interfaces.IPluginFactory)
11 class OpenSUSEConfigurator(configurator.ApacheConfigurator):
12 """OpenSUSE specific ApacheConfigurator override class"""
13
14 OS_DEFAULTS = dict(
15 server_root="/etc/apache2",
16 vhost_root="/etc/apache2/vhosts.d",
17 vhost_files="*.conf",
18 logs_root="/var/log/apache2",
19 ctl="apache2ctl",
20 version_cmd=['apache2ctl', '-v'],
21 restart_cmd=['apache2ctl', 'graceful'],
22 conftest_cmd=['apache2ctl', 'configtest'],
23 enmod="a2enmod",
24 dismod="a2dismod",
25 le_vhost_ext="-le-ssl.conf",
26 handle_mods=False,
27 handle_sites=False,
28 challenge_location="/etc/apache2/vhosts.d",
29 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
30 "certbot_apache", "options-ssl-apache.conf")
31 )
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/certbot-apache/certbot_apache/override_suse.py b/certbot-apache/certbot_apache/override_suse.py
--- a/certbot-apache/certbot_apache/override_suse.py
+++ b/certbot-apache/certbot_apache/override_suse.py
@@ -23,7 +23,7 @@
enmod="a2enmod",
dismod="a2dismod",
le_vhost_ext="-le-ssl.conf",
- handle_mods=False,
+ handle_modules=False,
handle_sites=False,
challenge_location="/etc/apache2/vhosts.d",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
|
{"golden_diff": "diff --git a/certbot-apache/certbot_apache/override_suse.py b/certbot-apache/certbot_apache/override_suse.py\n--- a/certbot-apache/certbot_apache/override_suse.py\n+++ b/certbot-apache/certbot_apache/override_suse.py\n@@ -23,7 +23,7 @@\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n- handle_mods=False,\n+ handle_modules=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n", "issue": "KeyError handle_modules with 0.27.0 on openSUSE\n## My operating system is (include version):\r\n\r\nopenSUSE Leap 42.1\r\n\r\n## I installed Certbot with (certbot-auto, OS package manager, pip, etc):\r\n\r\ncertbot-auto\r\n\r\n## I ran this command and it produced this output:\r\n\r\n````\r\nkevdev36:~ # certbot-auto --version\r\nUpgrading certbot-auto 0.26.1 to 0.27.0...\r\nReplacing certbot-auto...\r\nCreating virtual environment...\r\nInstalling Python packages...\r\nInstallation succeeded.\r\nAn unexpected error occurred:\r\nKeyError: 'handle_modules'\r\nPlease see the logfile '/tmp/tmpMAZJox' for more details.\r\n````\r\n\r\n## Certbot's behavior differed from what I expected because:\r\n\r\nIt did not print the version.\r\n\r\n## Here is a Certbot log showing the issue (if available):\r\n\r\n/tmp/tmpMAZJox\r\n\r\n````\r\n2018-09-06 09:59:58,652:DEBUG:certbot.main:certbot version: 0.27.0\r\n2018-09-06 09:59:58,652:DEBUG:certbot.main:Arguments: ['--version']\r\n2018-09-06 09:59:58,653:DEBUG:certbot.main:Discovered plugins: PluginsRegistry(PluginEntryPoint#apache,PluginEntryPoint#manual,PluginEntryPoint#nginx,PluginEntryPoint#null,PluginEntryPoint#standalone,PluginEntryPoint#webroot)\r\n2018-09-06 09:59:58,660:DEBUG:certbot.log:Exiting abnormally:\r\nTraceback (most recent call last):\r\n File \"/opt/eff.org/certbot/venv/bin/letsencrypt\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/main.py\", line 1345, in main\r\n args = cli.prepare_and_parse_args(plugins, cli_args)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py\", line 1243, in prepare_and_parse_args\r\n _plugins_parsing(helpful, plugins)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py\", line 1458, in _plugins_parsing\r\n helpful.add_plugin_args(plugins)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py\", line 840, in add_plugin_args\r\n plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/plugins/common.py\", line 81, in inject_parser_options\r\n return cls.add_parser_arguments(add)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot_apache/configurator.py\", line 159, in add_parser_arguments\r\n add(\"handle-modules\", default=cls.OS_DEFAULTS[\"handle_modules\"],\r\nKeyError: 'handle_modules'\r\n2018-09-06 09:59:58,660:ERROR:certbot.log:An unexpected error occurred:\r\n````\r\n\r\n## Workaround\r\n\r\nDowngrade to 0.26.1 and use `certbot-auto` with `--no-self-upgrade`.\r\n\r\n````\r\nkevdev36:~ # wget https://raw.githubusercontent.com/certbot/certbot/v0.26.1/certbot-auto\r\nkevdev36:~ # chmod +x certbot-auto\r\nkevdev36:~ # /opt/eff.org/certbot/venv/bin/pip install certbot==0.26.1 certbot-apache==0.26.1 certbot-nginx==0.26.1\r\nkevdev36:~ # ./certbot-auto --no-self-upgrade --version\r\ncertbot 0.26.1\r\n````\n", "before_files": [{"content": "\"\"\" Distribution specific override class for OpenSUSE \"\"\"\nimport pkg_resources\n\nimport zope.interface\n\nfrom certbot import interfaces\n\nfrom certbot_apache import configurator\n\[email protected](interfaces.IPluginFactory)\nclass OpenSUSEConfigurator(configurator.ApacheConfigurator):\n \"\"\"OpenSUSE specific ApacheConfigurator override class\"\"\"\n\n OS_DEFAULTS = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n ctl=\"apache2ctl\",\n version_cmd=['apache2ctl', '-v'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n )\n", "path": "certbot-apache/certbot_apache/override_suse.py"}], "after_files": [{"content": "\"\"\" Distribution specific override class for OpenSUSE \"\"\"\nimport pkg_resources\n\nimport zope.interface\n\nfrom certbot import interfaces\n\nfrom certbot_apache import configurator\n\[email protected](interfaces.IPluginFactory)\nclass OpenSUSEConfigurator(configurator.ApacheConfigurator):\n \"\"\"OpenSUSE specific ApacheConfigurator override class\"\"\"\n\n OS_DEFAULTS = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n ctl=\"apache2ctl\",\n version_cmd=['apache2ctl', '-v'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n handle_modules=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n )\n", "path": "certbot-apache/certbot_apache/override_suse.py"}]}
| 1,497 | 154 |
gh_patches_debug_21031
|
rasdani/github-patches
|
git_diff
|
spack__spack-15252
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
py-pyfftw import issue with scipy.fftpack
Hi,
Sorry to bother you all.
After loading the spack modules via:
```console
spack load -r [email protected]
spack load py-h5py
spack load py-scipy
spack load py-pyfftw
spack load py-mpi4py
```
When in the python code I am using I try to do `import spicy_fftpack`, I have been getting an error message that ends with:
### Error Message
```python
from scipy.fftpack import (dct, idct, dst, idst, diff, tilbert, itilbert,
ImportError: cannot import name '_fftpack' from 'scipy.fftpack'
```
The full error output is in [error.txt](https://github.com/spack/spack/files/4252499/error.txt).
I think that that error is solved in the recent version of pfftw (https://github.com/pyFFTW/pyFFTW/pull/265 and https://github.com/pyFFTW/pyFFTW/issues/279).
But in my machine I still get that error.
I am not sure if I am installing py-pyfftw or py-scipy incorrectly, or making another mistake.
Or if I would just need to add an equivalent line to:
```vim
version('0.11.1', sha256='05ea28dede4c3aaaf5c66f56eb0f71849d0d50f5bc0f53ca0ffa69534af14926')
```
but for version `0.12.0`, to the package.py of py-pyfftw of spack.
Do you have any suggestion on how I can fix this issue and correctly import the library?
Thank you,
Diana
### System
1. macOS Catalina - %[email protected] (but with [email protected] fortran compilers - see compilers.yaml below)
2. spack installed python (@3.7.6)
3. spack installed py-scipy (@1.4.1)
4. spack installed py-pfftw (@0.11.1)
-----
**compilers.yaml**
```vim
compilers:
- compiler:
spec: [email protected]
paths:
cc: /usr/bin/clang
cxx: /usr/bin/clang++
f77: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gfortran
fc: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gfortran
flags: {}
operating_system: catalina
target: x86_64
modules: []
environment: {}
extra_rpaths: []
- compiler:
spec: [email protected]
paths:
cc: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gcc
cxx: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/g++
f77: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gfortran
fc: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gfortran
flags: {}
operating_system: catalina
target: x86_64
modules: []
environment: {}
extra_rpaths: []
```
-----
**packages.yaml**
```vim
packages:
all:
providers:
mpi: [mpich, openmpi]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/py-pyfftw/package.py`
Content:
```
1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class PyPyfftw(PythonPackage):
10 """A pythonic wrapper around FFTW, the FFT library,
11 presenting a unified interface for all the supported transforms."""
12
13 homepage = "http://hgomersall.github.com/pyFFTW"
14 url = "https://pypi.io/packages/source/p/pyFFTW/pyFFTW-0.10.4.tar.gz"
15
16 version('0.11.1', sha256='05ea28dede4c3aaaf5c66f56eb0f71849d0d50f5bc0f53ca0ffa69534af14926')
17 version('0.10.4', sha256='739b436b7c0aeddf99a48749380260364d2dc027cf1d5f63dafb5f50068ede1a')
18
19 depends_on('fftw')
20 depends_on('py-setuptools', type='build')
21 depends_on('py-cython', type='build')
22 depends_on('[email protected]:', type=('build', 'run'))
23 depends_on('[email protected]:', type=('build', 'run'))
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/var/spack/repos/builtin/packages/py-pyfftw/package.py b/var/spack/repos/builtin/packages/py-pyfftw/package.py
--- a/var/spack/repos/builtin/packages/py-pyfftw/package.py
+++ b/var/spack/repos/builtin/packages/py-pyfftw/package.py
@@ -13,11 +13,12 @@
homepage = "http://hgomersall.github.com/pyFFTW"
url = "https://pypi.io/packages/source/p/pyFFTW/pyFFTW-0.10.4.tar.gz"
+ version('0.12.0', sha256='60988e823ca75808a26fd79d88dbae1de3699e72a293f812aa4534f8a0a58cb0')
version('0.11.1', sha256='05ea28dede4c3aaaf5c66f56eb0f71849d0d50f5bc0f53ca0ffa69534af14926')
version('0.10.4', sha256='739b436b7c0aeddf99a48749380260364d2dc027cf1d5f63dafb5f50068ede1a')
depends_on('fftw')
- depends_on('py-setuptools', type='build')
- depends_on('py-cython', type='build')
- depends_on('[email protected]:', type=('build', 'run'))
- depends_on('[email protected]:', type=('build', 'run'))
+ depends_on('py-setuptools', type='build')
+ depends_on('[email protected]:0.999', type='build')
+ depends_on('[email protected]:', type=('build', 'run'), when='@:0.10.4')
+ depends_on('[email protected]:1.999', type=('build', 'run'), when='@0.11.0:')
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/py-pyfftw/package.py b/var/spack/repos/builtin/packages/py-pyfftw/package.py\n--- a/var/spack/repos/builtin/packages/py-pyfftw/package.py\n+++ b/var/spack/repos/builtin/packages/py-pyfftw/package.py\n@@ -13,11 +13,12 @@\n homepage = \"http://hgomersall.github.com/pyFFTW\"\n url = \"https://pypi.io/packages/source/p/pyFFTW/pyFFTW-0.10.4.tar.gz\"\n \n+ version('0.12.0', sha256='60988e823ca75808a26fd79d88dbae1de3699e72a293f812aa4534f8a0a58cb0')\n version('0.11.1', sha256='05ea28dede4c3aaaf5c66f56eb0f71849d0d50f5bc0f53ca0ffa69534af14926')\n version('0.10.4', sha256='739b436b7c0aeddf99a48749380260364d2dc027cf1d5f63dafb5f50068ede1a')\n \n depends_on('fftw')\n- depends_on('py-setuptools', type='build')\n- depends_on('py-cython', type='build')\n- depends_on('[email protected]:', type=('build', 'run'))\n- depends_on('[email protected]:', type=('build', 'run'))\n+ depends_on('py-setuptools', type='build')\n+ depends_on('[email protected]:0.999', type='build')\n+ depends_on('[email protected]:', type=('build', 'run'), when='@:0.10.4')\n+ depends_on('[email protected]:1.999', type=('build', 'run'), when='@0.11.0:')\n", "issue": "py-pyfftw import issue with scipy.fftpack\nHi,\r\nSorry to bother you all.\r\nAfter loading the spack modules via:\r\n```console\r\n spack load -r [email protected]\r\n spack load py-h5py\r\n spack load py-scipy\r\n spack load py-pyfftw\r\n spack load py-mpi4py\r\n```\r\nWhen in the python code I am using I try to do `import spicy_fftpack`, I have been getting an error message that ends with:\r\n\r\n### Error Message\r\n```python\r\nfrom scipy.fftpack import (dct, idct, dst, idst, diff, tilbert, itilbert,\r\nImportError: cannot import name '_fftpack' from 'scipy.fftpack'\r\n```\r\nThe full error output is in [error.txt](https://github.com/spack/spack/files/4252499/error.txt).\r\n\r\nI think that that error is solved in the recent version of pfftw (https://github.com/pyFFTW/pyFFTW/pull/265 and https://github.com/pyFFTW/pyFFTW/issues/279).\r\n\r\nBut in my machine I still get that error.\r\nI am not sure if I am installing py-pyfftw or py-scipy incorrectly, or making another mistake.\r\nOr if I would just need to add an equivalent line to:\r\n```vim\r\nversion('0.11.1', sha256='05ea28dede4c3aaaf5c66f56eb0f71849d0d50f5bc0f53ca0ffa69534af14926')\r\n```\r\nbut for version `0.12.0`, to the package.py of py-pyfftw of spack.\r\n\r\nDo you have any suggestion on how I can fix this issue and correctly import the library?\r\n\r\nThank you,\r\nDiana\r\n\r\n### System\r\n\r\n 1. macOS Catalina - %[email protected] (but with [email protected] fortran compilers - see compilers.yaml below)\r\n 2. spack installed python (@3.7.6)\r\n 3. spack installed py-scipy (@1.4.1)\r\n 4. spack installed py-pfftw (@0.11.1)\r\n\r\n-----\r\n\r\n**compilers.yaml**\r\n```vim\r\ncompilers:\r\n- compiler:\r\n spec: [email protected]\r\n paths:\r\n cc: /usr/bin/clang\r\n cxx: /usr/bin/clang++\r\n f77: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gfortran\r\n fc: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gfortran\r\n flags: {}\r\n operating_system: catalina\r\n target: x86_64\r\n modules: []\r\n environment: {}\r\n extra_rpaths: []\r\n- compiler:\r\n spec: [email protected]\r\n paths:\r\n cc: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gcc\r\n cxx: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/g++\r\n f77: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gfortran\r\n fc: /Users/LDianaAmorim/Documents/opt/spack/opt/spack/darwin-catalina-x86_64/clang-11.0.0-apple/gcc-9.2.0-exw25ccpcwqlkcvuwn266kvwqzxbyelp/bin/gfortran\r\n flags: {}\r\n operating_system: catalina\r\n target: x86_64\r\n modules: []\r\n environment: {}\r\n extra_rpaths: []\r\n```\r\n-----\r\n\r\n**packages.yaml**\r\n```vim\r\npackages:\r\n all:\r\n providers:\r\n mpi: [mpich, openmpi]\r\n```\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyPyfftw(PythonPackage):\n \"\"\"A pythonic wrapper around FFTW, the FFT library,\n presenting a unified interface for all the supported transforms.\"\"\"\n\n homepage = \"http://hgomersall.github.com/pyFFTW\"\n url = \"https://pypi.io/packages/source/p/pyFFTW/pyFFTW-0.10.4.tar.gz\"\n\n version('0.11.1', sha256='05ea28dede4c3aaaf5c66f56eb0f71849d0d50f5bc0f53ca0ffa69534af14926')\n version('0.10.4', sha256='739b436b7c0aeddf99a48749380260364d2dc027cf1d5f63dafb5f50068ede1a')\n\n depends_on('fftw')\n depends_on('py-setuptools', type='build')\n depends_on('py-cython', type='build')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/py-pyfftw/package.py"}], "after_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyPyfftw(PythonPackage):\n \"\"\"A pythonic wrapper around FFTW, the FFT library,\n presenting a unified interface for all the supported transforms.\"\"\"\n\n homepage = \"http://hgomersall.github.com/pyFFTW\"\n url = \"https://pypi.io/packages/source/p/pyFFTW/pyFFTW-0.10.4.tar.gz\"\n\n version('0.12.0', sha256='60988e823ca75808a26fd79d88dbae1de3699e72a293f812aa4534f8a0a58cb0')\n version('0.11.1', sha256='05ea28dede4c3aaaf5c66f56eb0f71849d0d50f5bc0f53ca0ffa69534af14926')\n version('0.10.4', sha256='739b436b7c0aeddf99a48749380260364d2dc027cf1d5f63dafb5f50068ede1a')\n\n depends_on('fftw')\n depends_on('py-setuptools', type='build')\n depends_on('[email protected]:0.999', type='build')\n depends_on('[email protected]:', type=('build', 'run'), when='@:0.10.4')\n depends_on('[email protected]:1.999', type=('build', 'run'), when='@0.11.0:')\n", "path": "var/spack/repos/builtin/packages/py-pyfftw/package.py"}]}
| 1,790 | 530 |
gh_patches_debug_11159
|
rasdani/github-patches
|
git_diff
|
mozilla__kitsune-3192
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve performance of _get_creator_counts util function
`kitsune.community.utils._get_creator_counts` until function is DB heavy and takes a lot of time to execute. Evaluate its usefulness and provide a way to optimize the query and/or cache the results.
This issue is related to the degraded performance SUMO experienced on Fri March 30th ([NR Error](https://rpm.newrelic.com/accounts/1299394/applications/45097089/downtime/34422892))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kitsune/community/utils.py`
Content:
```
1 import hashlib
2
3 from datetime import datetime, date, timedelta
4 from django.conf import settings
5 from django.core.cache import cache
6 from django.db.models import Count, F
7
8 from kitsune.products.models import Product
9 from kitsune.questions.models import Answer
10 from kitsune.users.models import User, UserMappingType
11 from kitsune.wiki.models import Revision
12
13
14 def top_contributors_questions(start=None, end=None, locale=None, product=None,
15 count=10, page=1, use_cache=True):
16 """Get the top Support Forum contributors."""
17 if use_cache:
18 cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)
19 cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()
20 cache_key = 'top_contributors_questions_{}'.format(cache_key)
21 cached = cache.get(cache_key, None)
22 if cached:
23 return cached
24
25 answers = (Answer.objects
26 .exclude(is_spam=True)
27 .exclude(question__is_spam=True)
28 # Adding answer to your own question, isn't a contribution.
29 .exclude(creator_id=F('question__creator_id')))
30
31 if start is None:
32 # By default we go back 90 days.
33 start = date.today() - timedelta(days=90)
34 answers = answers.filter(created__gte=start)
35 if end:
36 # If no end is specified, we don't need to filter by it.
37 answers = answers.filter(created__lt=end)
38 if locale:
39 answers = answers.filter(question__locale=locale)
40 if product:
41 if isinstance(product, Product):
42 product = product.slug
43 answers = answers.filter(question__product__slug=product)
44
45 users = (User.objects
46 .filter(answers__in=answers)
47 .annotate(query_count=Count('answers'))
48 .order_by('-query_count'))
49 counts = _get_creator_counts(users, count, page)
50
51 if use_cache:
52 cache.set(cache_key, counts, 60*15) # 15 minutes
53 return counts
54
55
56 def top_contributors_kb(start=None, end=None, product=None, count=10, page=1, use_cache=True):
57 """Get the top KB editors (locale='en-US')."""
58 return top_contributors_l10n(
59 start, end, settings.WIKI_DEFAULT_LANGUAGE, product, count, use_cache)
60
61
62 def top_contributors_l10n(start=None, end=None, locale=None, product=None,
63 count=10, page=1, use_cache=True):
64 """Get the top l10n contributors for the KB."""
65 if use_cache:
66 cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)
67 cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()
68 cache_key = u'top_contributors_l10n_{}'.format(cache_key)
69 cached = cache.get(cache_key, None)
70 if cached:
71 return cached
72
73 # Get the user ids and contribution count of the top contributors.
74 revisions = Revision.objects.all()
75 if locale is None:
76 # If there is no locale specified, exclude en-US only. The rest are
77 # l10n.
78 revisions = revisions.exclude(document__locale=settings.WIKI_DEFAULT_LANGUAGE)
79 if start is None:
80 # By default we go back 90 days.
81 start = date.today() - timedelta(days=90)
82 revisions = revisions.filter(created__gte=start)
83 if end:
84 # If no end is specified, we don't need to filter by it.
85 revisions = revisions.filter(created__lt=end)
86 if locale:
87 revisions = revisions.filter(document__locale=locale)
88 if product:
89 if isinstance(product, Product):
90 product = product.slug
91 revisions = revisions.filter(document__products__slug=product)
92
93 users = (User.objects
94 .filter(created_revisions__in=revisions)
95 .annotate(query_count=Count('created_revisions'))
96 .order_by('-query_count'))
97 counts = _get_creator_counts(users, count, page)
98
99 if use_cache:
100 cache.set(cache_key, counts, 60*15) # 15 minutes
101 return counts
102
103
104 def top_contributors_aoa(start=None, end=None, locale=None, count=10, page=1, use_cache=True):
105 """Get the top Army of Awesome contributors."""
106 # AoA is deprecated, return 0 until we remove all related code.
107 return ([], 0)
108
109
110 def _get_creator_counts(query, count, page):
111 total = query.count()
112
113 start = (page - 1) * count
114 end = page * count
115 query_data = query.values('id', 'query_count')[start:end]
116
117 query_data = {obj['id']: obj['query_count'] for obj in query_data}
118
119 users_data = (UserMappingType.search().filter(id__in=query_data.keys())
120 .values_dict('id', 'username', 'display_name',
121 'avatar', 'twitter_usernames',
122 'last_contribution_date')[:count])
123
124 users_data = UserMappingType.reshape(users_data)
125
126 results = []
127 now = datetime.now()
128
129 for u_data in users_data:
130 user_id = u_data.get('id')
131 last_contribution_date = u_data.get('last_contribution_date', None)
132
133 u_data['days_since_last_activity'] = ((now - last_contribution_date).days
134 if last_contribution_date else None)
135
136 data = {
137 'count': query_data.get(user_id),
138 'term': user_id,
139 'user': u_data
140 }
141
142 results.append(data)
143
144 return results, total
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kitsune/community/utils.py b/kitsune/community/utils.py
--- a/kitsune/community/utils.py
+++ b/kitsune/community/utils.py
@@ -1,6 +1,8 @@
import hashlib
from datetime import datetime, date, timedelta
+from operator import itemgetter
+
from django.conf import settings
from django.core.cache import cache
from django.db.models import Count, F
@@ -141,4 +143,8 @@
results.append(data)
+ # Descending Order the list according to count.
+ # As the top number of contributor should be at first
+ results = sorted(results, key=itemgetter('count'), reverse=True)
+
return results, total
|
{"golden_diff": "diff --git a/kitsune/community/utils.py b/kitsune/community/utils.py\n--- a/kitsune/community/utils.py\n+++ b/kitsune/community/utils.py\n@@ -1,6 +1,8 @@\n import hashlib\n \n from datetime import datetime, date, timedelta\n+from operator import itemgetter\n+\n from django.conf import settings\n from django.core.cache import cache\n from django.db.models import Count, F\n@@ -141,4 +143,8 @@\n \n results.append(data)\n \n+ # Descending Order the list according to count.\n+ # As the top number of contributor should be at first\n+ results = sorted(results, key=itemgetter('count'), reverse=True)\n+\n return results, total\n", "issue": "Improve performance of _get_creator_counts util function\n`kitsune.community.utils._get_creator_counts` until function is DB heavy and takes a lot of time to execute. Evaluate its usefulness and provide a way to optimize the query and/or cache the results. \r\n\r\nThis issue is related to the degraded performance SUMO experienced on Fri March 30th ([NR Error](https://rpm.newrelic.com/accounts/1299394/applications/45097089/downtime/34422892))\n", "before_files": [{"content": "import hashlib\n\nfrom datetime import datetime, date, timedelta\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db.models import Count, F\n\nfrom kitsune.products.models import Product\nfrom kitsune.questions.models import Answer\nfrom kitsune.users.models import User, UserMappingType\nfrom kitsune.wiki.models import Revision\n\n\ndef top_contributors_questions(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top Support Forum contributors.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = 'top_contributors_questions_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n answers = (Answer.objects\n .exclude(is_spam=True)\n .exclude(question__is_spam=True)\n # Adding answer to your own question, isn't a contribution.\n .exclude(creator_id=F('question__creator_id')))\n\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n answers = answers.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n answers = answers.filter(created__lt=end)\n if locale:\n answers = answers.filter(question__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n answers = answers.filter(question__product__slug=product)\n\n users = (User.objects\n .filter(answers__in=answers)\n .annotate(query_count=Count('answers'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_kb(start=None, end=None, product=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top KB editors (locale='en-US').\"\"\"\n return top_contributors_l10n(\n start, end, settings.WIKI_DEFAULT_LANGUAGE, product, count, use_cache)\n\n\ndef top_contributors_l10n(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top l10n contributors for the KB.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = u'top_contributors_l10n_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n # Get the user ids and contribution count of the top contributors.\n revisions = Revision.objects.all()\n if locale is None:\n # If there is no locale specified, exclude en-US only. The rest are\n # l10n.\n revisions = revisions.exclude(document__locale=settings.WIKI_DEFAULT_LANGUAGE)\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n revisions = revisions.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n revisions = revisions.filter(created__lt=end)\n if locale:\n revisions = revisions.filter(document__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n revisions = revisions.filter(document__products__slug=product)\n\n users = (User.objects\n .filter(created_revisions__in=revisions)\n .annotate(query_count=Count('created_revisions'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_aoa(start=None, end=None, locale=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top Army of Awesome contributors.\"\"\"\n # AoA is deprecated, return 0 until we remove all related code.\n return ([], 0)\n\n\ndef _get_creator_counts(query, count, page):\n total = query.count()\n\n start = (page - 1) * count\n end = page * count\n query_data = query.values('id', 'query_count')[start:end]\n\n query_data = {obj['id']: obj['query_count'] for obj in query_data}\n\n users_data = (UserMappingType.search().filter(id__in=query_data.keys())\n .values_dict('id', 'username', 'display_name',\n 'avatar', 'twitter_usernames',\n 'last_contribution_date')[:count])\n\n users_data = UserMappingType.reshape(users_data)\n\n results = []\n now = datetime.now()\n\n for u_data in users_data:\n user_id = u_data.get('id')\n last_contribution_date = u_data.get('last_contribution_date', None)\n\n u_data['days_since_last_activity'] = ((now - last_contribution_date).days\n if last_contribution_date else None)\n\n data = {\n 'count': query_data.get(user_id),\n 'term': user_id,\n 'user': u_data\n }\n\n results.append(data)\n\n return results, total\n", "path": "kitsune/community/utils.py"}], "after_files": [{"content": "import hashlib\n\nfrom datetime import datetime, date, timedelta\nfrom operator import itemgetter\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db.models import Count, F\n\nfrom kitsune.products.models import Product\nfrom kitsune.questions.models import Answer\nfrom kitsune.users.models import User, UserMappingType\nfrom kitsune.wiki.models import Revision\n\n\ndef top_contributors_questions(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top Support Forum contributors.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = 'top_contributors_questions_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n answers = (Answer.objects\n .exclude(is_spam=True)\n .exclude(question__is_spam=True)\n # Adding answer to your own question, isn't a contribution.\n .exclude(creator_id=F('question__creator_id')))\n\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n answers = answers.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n answers = answers.filter(created__lt=end)\n if locale:\n answers = answers.filter(question__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n answers = answers.filter(question__product__slug=product)\n\n users = (User.objects\n .filter(answers__in=answers)\n .annotate(query_count=Count('answers'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_kb(start=None, end=None, product=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top KB editors (locale='en-US').\"\"\"\n return top_contributors_l10n(\n start, end, settings.WIKI_DEFAULT_LANGUAGE, product, count, use_cache)\n\n\ndef top_contributors_l10n(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top l10n contributors for the KB.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = u'top_contributors_l10n_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n # Get the user ids and contribution count of the top contributors.\n revisions = Revision.objects.all()\n if locale is None:\n # If there is no locale specified, exclude en-US only. The rest are\n # l10n.\n revisions = revisions.exclude(document__locale=settings.WIKI_DEFAULT_LANGUAGE)\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n revisions = revisions.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n revisions = revisions.filter(created__lt=end)\n if locale:\n revisions = revisions.filter(document__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n revisions = revisions.filter(document__products__slug=product)\n\n users = (User.objects\n .filter(created_revisions__in=revisions)\n .annotate(query_count=Count('created_revisions'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_aoa(start=None, end=None, locale=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top Army of Awesome contributors.\"\"\"\n # AoA is deprecated, return 0 until we remove all related code.\n return ([], 0)\n\n\ndef _get_creator_counts(query, count, page):\n total = query.count()\n\n start = (page - 1) * count\n end = page * count\n query_data = query.values('id', 'query_count')[start:end]\n\n query_data = {obj['id']: obj['query_count'] for obj in query_data}\n\n users_data = (UserMappingType.search().filter(id__in=query_data.keys())\n .values_dict('id', 'username', 'display_name',\n 'avatar', 'twitter_usernames',\n 'last_contribution_date')[:count])\n\n users_data = UserMappingType.reshape(users_data)\n\n results = []\n now = datetime.now()\n\n for u_data in users_data:\n user_id = u_data.get('id')\n last_contribution_date = u_data.get('last_contribution_date', None)\n\n u_data['days_since_last_activity'] = ((now - last_contribution_date).days\n if last_contribution_date else None)\n\n data = {\n 'count': query_data.get(user_id),\n 'term': user_id,\n 'user': u_data\n }\n\n results.append(data)\n\n # Descending Order the list according to count.\n # As the top number of contributor should be at first\n results = sorted(results, key=itemgetter('count'), reverse=True)\n\n return results, total\n", "path": "kitsune/community/utils.py"}]}
| 1,949 | 158 |
gh_patches_debug_25001
|
rasdani/github-patches
|
git_diff
|
awslabs__gluonts-1652
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
_tsf_reader.convert_base doesn't handle "10 minutes" frequency correctly
## Description
For Monash datasets with the "10 minutes" frequency, the frequency converter will convert it to a frequency 10 MonthEnd (10M), instead of the expect 10 Minutes (10T) frequency.
There is already code to properly handle the "minutely" frequency, but it checks for that string explicitly, so it doesn't catch the "10 minutes" case.
## To Reproduce
One dataset which has this frequency is the 10 minutes observation Solar dataset: https://zenodo.org/record/4656144
filename: `"solar_10_minutes_dataset.zip"`
record: `"4656132"`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gluonts/dataset/repository/_tsf_reader.py`
Content:
```
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License").
4 # You may not use this file except in compliance with the License.
5 # A copy of the License is located at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # or in the "license" file accompanying this file. This file is distributed
10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 # express or implied. See the License for the specific language governing
12 # permissions and limitations under the License.
13
14 from datetime import datetime
15 from distutils.util import strtobool
16 from multiprocessing import cpu_count
17 from types import SimpleNamespace
18
19 import numpy as np
20 from toolz import compose_left
21
22 from gluonts import json
23 from gluonts.nursery import glide
24
25 parse_bool = compose_left(strtobool, bool)
26
27
28 def parse_attribute(ty, value: str):
29 if ty == "numeric":
30 return int(value)
31
32 if ty == "string":
33 return value
34
35 if ty == "date":
36 return datetime.strptime(value, "%Y-%m-%d %H-%M-%S")
37
38 raise AttributeError(ty)
39
40
41 def frequency_converter(freq: str):
42 parts = freq.split("_")
43 if len(parts) == 1:
44 return convert_base(parts[0])
45 if len(parts) == 2:
46 return convert_multiple(parts[0]) + convert_base(parts[1])
47 raise ValueError(f"Invalid frequency string {freq}.")
48
49
50 def convert_base(text: str) -> str:
51 if text.lower() == "minutely":
52 return "T"
53 return text[0].upper()
54
55
56 def convert_multiple(text: str) -> str:
57 if text.isnumeric():
58 return text
59 if text == "half":
60 return "0.5"
61 raise ValueError(f"Unknown frequency multiple {text}.")
62
63
64 class TSFReader:
65 def __init__(
66 self,
67 path,
68 target_name="target",
69 ):
70 self.path = path
71 self.target_name = target_name
72
73 self.meta = SimpleNamespace(columns={})
74
75 def read(self):
76 with open(self.path, encoding="latin1") as in_file:
77 # strip whitespace
78 lines = map(str.strip, in_file)
79
80 # ignore all lines starting with #
81 lines = filter(lambda line: not line.startswith("#"), lines)
82
83 data_tag_found = self._read_header(lines)
84 assert data_tag_found, "Missing @data tag."
85 assert (
86 self.meta.columns
87 ), "Missing attribute section. Attribute section must come before data."
88
89 assert self.target_name not in self.meta.columns
90 self.meta.columns[self.target_name] = None
91
92 data = self._read_data_section(lines)
93
94 return self.meta, data
95
96 def _read_header(self, lines):
97 for line in lines:
98 assert line.startswith("@")
99 stop = self._tag(line[1:])
100
101 if stop:
102 return True
103
104 return False
105
106 def _read_data_section(self, lines):
107 lines = list(lines)
108
109 lines = glide.imap_unordered(
110 self._read_data, lines, num_workers=cpu_count(), batch_size=8092
111 )
112
113 return list(lines)
114
115 def _read_data(self, line):
116 parts = line.split(":")
117
118 assert len(parts) == len(
119 self.meta.columns
120 ), "Missing attributes/values in series."
121
122 *attributes, target = parts
123
124 record = {}
125
126 record[self.target_name] = self._data_target(target)
127
128 for (column, ty), attr in zip(self.meta.columns.items(), attributes):
129 record[column] = parse_attribute(ty, attr)
130
131 return record
132
133 def _data_target(self, s):
134 s = s.replace("?", '"nan"')
135
136 values = json.loads(f"[{s}]")
137 assert (
138 values
139 ), "A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series. Missing values should be indicated with ? symbol"
140
141 return np.array(values, dtype=float)
142
143 def _tag(self, line):
144 fn_by_tag = {
145 "attribute": self._tag_attribute,
146 "frequency": self._tag_frequency,
147 "horizon": self._tag_horizon,
148 "missing": self._tag_missing,
149 "equallength": self._tag_equallength,
150 "data": self._tag_data,
151 }
152 tag, *args = line.split(" ")
153
154 if tag not in fn_by_tag:
155 return
156
157 return fn_by_tag[tag](*args)
158
159 def _tag_attribute(self, name, ty):
160 self.meta.columns[name] = ty
161
162 def _tag_frequency(self, frequency):
163 self.meta.frequency = frequency
164
165 def _tag_horizon(self, horizon):
166 self.meta.forecast_horizon = horizon
167
168 def _tag_missing(self, missing):
169 self.meta.has_missing_values = parse_bool(missing)
170
171 def _tag_equallength(self, equallength):
172 self.meta.has_equal_length = parse_bool(equallength)
173
174 def _tag_data(self):
175 return True
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/gluonts/dataset/repository/_tsf_reader.py b/src/gluonts/dataset/repository/_tsf_reader.py
--- a/src/gluonts/dataset/repository/_tsf_reader.py
+++ b/src/gluonts/dataset/repository/_tsf_reader.py
@@ -15,11 +15,13 @@
from distutils.util import strtobool
from multiprocessing import cpu_count
from types import SimpleNamespace
+from typing import Dict
import numpy as np
from toolz import compose_left
from gluonts import json
+from gluonts.exceptions import GluonTSDataError
from gluonts.nursery import glide
parse_bool = compose_left(strtobool, bool)
@@ -47,10 +49,32 @@
raise ValueError(f"Invalid frequency string {freq}.")
+BASE_FREQ_TO_PANDAS_OFFSET: Dict[str, str] = {
+ "seconds": "S",
+ "minutely": "T",
+ "minutes": "T",
+ "hourly": "H",
+ "hours": "H",
+ "daily": "D",
+ "days": "D",
+ "weekly": "W",
+ "weeks": "W",
+ "monthly": "M",
+ "months": "M",
+ "quarterly": "Q",
+ "quarters": "Q",
+ "yearly": "Y",
+ "years": "Y",
+}
+
+
def convert_base(text: str) -> str:
- if text.lower() == "minutely":
- return "T"
- return text[0].upper()
+ try:
+ return BASE_FREQ_TO_PANDAS_OFFSET[text]
+ except KeyError:
+ raise GluonTSDataError(
+ f'"{text}" is not recognized as a frequency string'
+ )
def convert_multiple(text: str) -> str:
|
{"golden_diff": "diff --git a/src/gluonts/dataset/repository/_tsf_reader.py b/src/gluonts/dataset/repository/_tsf_reader.py\n--- a/src/gluonts/dataset/repository/_tsf_reader.py\n+++ b/src/gluonts/dataset/repository/_tsf_reader.py\n@@ -15,11 +15,13 @@\n from distutils.util import strtobool\n from multiprocessing import cpu_count\n from types import SimpleNamespace\n+from typing import Dict\n \n import numpy as np\n from toolz import compose_left\n \n from gluonts import json\n+from gluonts.exceptions import GluonTSDataError\n from gluonts.nursery import glide\n \n parse_bool = compose_left(strtobool, bool)\n@@ -47,10 +49,32 @@\n raise ValueError(f\"Invalid frequency string {freq}.\")\n \n \n+BASE_FREQ_TO_PANDAS_OFFSET: Dict[str, str] = {\n+ \"seconds\": \"S\",\n+ \"minutely\": \"T\",\n+ \"minutes\": \"T\",\n+ \"hourly\": \"H\",\n+ \"hours\": \"H\",\n+ \"daily\": \"D\",\n+ \"days\": \"D\",\n+ \"weekly\": \"W\",\n+ \"weeks\": \"W\",\n+ \"monthly\": \"M\",\n+ \"months\": \"M\",\n+ \"quarterly\": \"Q\",\n+ \"quarters\": \"Q\",\n+ \"yearly\": \"Y\",\n+ \"years\": \"Y\",\n+}\n+\n+\n def convert_base(text: str) -> str:\n- if text.lower() == \"minutely\":\n- return \"T\"\n- return text[0].upper()\n+ try:\n+ return BASE_FREQ_TO_PANDAS_OFFSET[text]\n+ except KeyError:\n+ raise GluonTSDataError(\n+ f'\"{text}\" is not recognized as a frequency string'\n+ )\n \n \n def convert_multiple(text: str) -> str:\n", "issue": "_tsf_reader.convert_base doesn't handle \"10 minutes\" frequency correctly\n## Description\r\nFor Monash datasets with the \"10 minutes\" frequency, the frequency converter will convert it to a frequency 10 MonthEnd (10M), instead of the expect 10 Minutes (10T) frequency.\r\n\r\nThere is already code to properly handle the \"minutely\" frequency, but it checks for that string explicitly, so it doesn't catch the \"10 minutes\" case.\r\n\r\n## To Reproduce\r\nOne dataset which has this frequency is the 10 minutes observation Solar dataset: https://zenodo.org/record/4656144\r\nfilename: `\"solar_10_minutes_dataset.zip\"`\r\nrecord: `\"4656132\"`\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom datetime import datetime\nfrom distutils.util import strtobool\nfrom multiprocessing import cpu_count\nfrom types import SimpleNamespace\n\nimport numpy as np\nfrom toolz import compose_left\n\nfrom gluonts import json\nfrom gluonts.nursery import glide\n\nparse_bool = compose_left(strtobool, bool)\n\n\ndef parse_attribute(ty, value: str):\n if ty == \"numeric\":\n return int(value)\n\n if ty == \"string\":\n return value\n\n if ty == \"date\":\n return datetime.strptime(value, \"%Y-%m-%d %H-%M-%S\")\n\n raise AttributeError(ty)\n\n\ndef frequency_converter(freq: str):\n parts = freq.split(\"_\")\n if len(parts) == 1:\n return convert_base(parts[0])\n if len(parts) == 2:\n return convert_multiple(parts[0]) + convert_base(parts[1])\n raise ValueError(f\"Invalid frequency string {freq}.\")\n\n\ndef convert_base(text: str) -> str:\n if text.lower() == \"minutely\":\n return \"T\"\n return text[0].upper()\n\n\ndef convert_multiple(text: str) -> str:\n if text.isnumeric():\n return text\n if text == \"half\":\n return \"0.5\"\n raise ValueError(f\"Unknown frequency multiple {text}.\")\n\n\nclass TSFReader:\n def __init__(\n self,\n path,\n target_name=\"target\",\n ):\n self.path = path\n self.target_name = target_name\n\n self.meta = SimpleNamespace(columns={})\n\n def read(self):\n with open(self.path, encoding=\"latin1\") as in_file:\n # strip whitespace\n lines = map(str.strip, in_file)\n\n # ignore all lines starting with #\n lines = filter(lambda line: not line.startswith(\"#\"), lines)\n\n data_tag_found = self._read_header(lines)\n assert data_tag_found, \"Missing @data tag.\"\n assert (\n self.meta.columns\n ), \"Missing attribute section. Attribute section must come before data.\"\n\n assert self.target_name not in self.meta.columns\n self.meta.columns[self.target_name] = None\n\n data = self._read_data_section(lines)\n\n return self.meta, data\n\n def _read_header(self, lines):\n for line in lines:\n assert line.startswith(\"@\")\n stop = self._tag(line[1:])\n\n if stop:\n return True\n\n return False\n\n def _read_data_section(self, lines):\n lines = list(lines)\n\n lines = glide.imap_unordered(\n self._read_data, lines, num_workers=cpu_count(), batch_size=8092\n )\n\n return list(lines)\n\n def _read_data(self, line):\n parts = line.split(\":\")\n\n assert len(parts) == len(\n self.meta.columns\n ), \"Missing attributes/values in series.\"\n\n *attributes, target = parts\n\n record = {}\n\n record[self.target_name] = self._data_target(target)\n\n for (column, ty), attr in zip(self.meta.columns.items(), attributes):\n record[column] = parse_attribute(ty, attr)\n\n return record\n\n def _data_target(self, s):\n s = s.replace(\"?\", '\"nan\"')\n\n values = json.loads(f\"[{s}]\")\n assert (\n values\n ), \"A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series. Missing values should be indicated with ? symbol\"\n\n return np.array(values, dtype=float)\n\n def _tag(self, line):\n fn_by_tag = {\n \"attribute\": self._tag_attribute,\n \"frequency\": self._tag_frequency,\n \"horizon\": self._tag_horizon,\n \"missing\": self._tag_missing,\n \"equallength\": self._tag_equallength,\n \"data\": self._tag_data,\n }\n tag, *args = line.split(\" \")\n\n if tag not in fn_by_tag:\n return\n\n return fn_by_tag[tag](*args)\n\n def _tag_attribute(self, name, ty):\n self.meta.columns[name] = ty\n\n def _tag_frequency(self, frequency):\n self.meta.frequency = frequency\n\n def _tag_horizon(self, horizon):\n self.meta.forecast_horizon = horizon\n\n def _tag_missing(self, missing):\n self.meta.has_missing_values = parse_bool(missing)\n\n def _tag_equallength(self, equallength):\n self.meta.has_equal_length = parse_bool(equallength)\n\n def _tag_data(self):\n return True\n", "path": "src/gluonts/dataset/repository/_tsf_reader.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom datetime import datetime\nfrom distutils.util import strtobool\nfrom multiprocessing import cpu_count\nfrom types import SimpleNamespace\nfrom typing import Dict\n\nimport numpy as np\nfrom toolz import compose_left\n\nfrom gluonts import json\nfrom gluonts.exceptions import GluonTSDataError\nfrom gluonts.nursery import glide\n\nparse_bool = compose_left(strtobool, bool)\n\n\ndef parse_attribute(ty, value: str):\n if ty == \"numeric\":\n return int(value)\n\n if ty == \"string\":\n return value\n\n if ty == \"date\":\n return datetime.strptime(value, \"%Y-%m-%d %H-%M-%S\")\n\n raise AttributeError(ty)\n\n\ndef frequency_converter(freq: str):\n parts = freq.split(\"_\")\n if len(parts) == 1:\n return convert_base(parts[0])\n if len(parts) == 2:\n return convert_multiple(parts[0]) + convert_base(parts[1])\n raise ValueError(f\"Invalid frequency string {freq}.\")\n\n\nBASE_FREQ_TO_PANDAS_OFFSET: Dict[str, str] = {\n \"seconds\": \"S\",\n \"minutely\": \"T\",\n \"minutes\": \"T\",\n \"hourly\": \"H\",\n \"hours\": \"H\",\n \"daily\": \"D\",\n \"days\": \"D\",\n \"weekly\": \"W\",\n \"weeks\": \"W\",\n \"monthly\": \"M\",\n \"months\": \"M\",\n \"quarterly\": \"Q\",\n \"quarters\": \"Q\",\n \"yearly\": \"Y\",\n \"years\": \"Y\",\n}\n\n\ndef convert_base(text: str) -> str:\n try:\n return BASE_FREQ_TO_PANDAS_OFFSET[text]\n except KeyError:\n raise GluonTSDataError(\n f'\"{text}\" is not recognized as a frequency string'\n )\n\n\ndef convert_multiple(text: str) -> str:\n if text.isnumeric():\n return text\n if text == \"half\":\n return \"0.5\"\n raise ValueError(f\"Unknown frequency multiple {text}.\")\n\n\nclass TSFReader:\n def __init__(\n self,\n path,\n target_name=\"target\",\n ):\n self.path = path\n self.target_name = target_name\n\n self.meta = SimpleNamespace(columns={})\n\n def read(self):\n with open(self.path, encoding=\"latin1\") as in_file:\n # strip whitespace\n lines = map(str.strip, in_file)\n\n # ignore all lines starting with #\n lines = filter(lambda line: not line.startswith(\"#\"), lines)\n\n data_tag_found = self._read_header(lines)\n assert data_tag_found, \"Missing @data tag.\"\n assert (\n self.meta.columns\n ), \"Missing attribute section. Attribute section must come before data.\"\n\n assert self.target_name not in self.meta.columns\n self.meta.columns[self.target_name] = None\n\n data = self._read_data_section(lines)\n\n return self.meta, data\n\n def _read_header(self, lines):\n for line in lines:\n assert line.startswith(\"@\")\n stop = self._tag(line[1:])\n\n if stop:\n return True\n\n return False\n\n def _read_data_section(self, lines):\n lines = list(lines)\n\n lines = glide.imap_unordered(\n self._read_data, lines, num_workers=cpu_count(), batch_size=8092\n )\n\n return list(lines)\n\n def _read_data(self, line):\n parts = line.split(\":\")\n\n assert len(parts) == len(\n self.meta.columns\n ), \"Missing attributes/values in series.\"\n\n *attributes, target = parts\n\n record = {}\n\n record[self.target_name] = self._data_target(target)\n\n for (column, ty), attr in zip(self.meta.columns.items(), attributes):\n record[column] = parse_attribute(ty, attr)\n\n return record\n\n def _data_target(self, s):\n s = s.replace(\"?\", '\"nan\"')\n\n values = json.loads(f\"[{s}]\")\n assert (\n values\n ), \"A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series. Missing values should be indicated with ? symbol\"\n\n return np.array(values, dtype=float)\n\n def _tag(self, line):\n fn_by_tag = {\n \"attribute\": self._tag_attribute,\n \"frequency\": self._tag_frequency,\n \"horizon\": self._tag_horizon,\n \"missing\": self._tag_missing,\n \"equallength\": self._tag_equallength,\n \"data\": self._tag_data,\n }\n tag, *args = line.split(\" \")\n\n if tag not in fn_by_tag:\n return\n\n return fn_by_tag[tag](*args)\n\n def _tag_attribute(self, name, ty):\n self.meta.columns[name] = ty\n\n def _tag_frequency(self, frequency):\n self.meta.frequency = frequency\n\n def _tag_horizon(self, horizon):\n self.meta.forecast_horizon = horizon\n\n def _tag_missing(self, missing):\n self.meta.has_missing_values = parse_bool(missing)\n\n def _tag_equallength(self, equallength):\n self.meta.has_equal_length = parse_bool(equallength)\n\n def _tag_data(self):\n return True\n", "path": "src/gluonts/dataset/repository/_tsf_reader.py"}]}
| 1,999 | 427 |
gh_patches_debug_15953
|
rasdani/github-patches
|
git_diff
|
pytorch__audio-1465
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove unused module
[`torchaudio._internal.misc_ops`](https://github.com/pytorch/audio/blob/b059f08742e70700ce4c92296a1131118f67a588/torchaudio/_internal/misc_ops.py) is a residue from refactoring of I/O features in the past releases. We can get rid of the whole module.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchaudio/_internal/misc_ops.py`
Content:
```
1 from typing import Union, Callable
2
3 import torch
4 from torch import Tensor
5
6
7 def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:
8 """Audio normalization of a tensor in-place. The normalization can be a bool,
9 a number, or a callable that takes the audio tensor as an input. SoX uses
10 32-bit signed integers internally, thus bool normalizes based on that assumption.
11 """
12
13 if not normalization:
14 return
15
16 if isinstance(normalization, bool):
17 normalization = 1 << 31
18
19 if isinstance(normalization, (float, int)):
20 # normalize with custom value
21 signal /= normalization
22 elif callable(normalization):
23 signal /= normalization(signal)
24
25
26 def check_input(src: Tensor) -> None:
27 if not torch.is_tensor(src):
28 raise TypeError('Expected a tensor, got %s' % type(src))
29 if src.is_cuda:
30 raise TypeError('Expected a CPU based tensor, got %s' % type(src))
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchaudio/_internal/misc_ops.py b/torchaudio/_internal/misc_ops.py
deleted file mode 100644
--- a/torchaudio/_internal/misc_ops.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from typing import Union, Callable
-
-import torch
-from torch import Tensor
-
-
-def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:
- """Audio normalization of a tensor in-place. The normalization can be a bool,
- a number, or a callable that takes the audio tensor as an input. SoX uses
- 32-bit signed integers internally, thus bool normalizes based on that assumption.
- """
-
- if not normalization:
- return
-
- if isinstance(normalization, bool):
- normalization = 1 << 31
-
- if isinstance(normalization, (float, int)):
- # normalize with custom value
- signal /= normalization
- elif callable(normalization):
- signal /= normalization(signal)
-
-
-def check_input(src: Tensor) -> None:
- if not torch.is_tensor(src):
- raise TypeError('Expected a tensor, got %s' % type(src))
- if src.is_cuda:
- raise TypeError('Expected a CPU based tensor, got %s' % type(src))
|
{"golden_diff": "diff --git a/torchaudio/_internal/misc_ops.py b/torchaudio/_internal/misc_ops.py\ndeleted file mode 100644\n--- a/torchaudio/_internal/misc_ops.py\n+++ /dev/null\n@@ -1,30 +0,0 @@\n-from typing import Union, Callable\n-\n-import torch\n-from torch import Tensor\n-\n-\n-def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:\n- \"\"\"Audio normalization of a tensor in-place. The normalization can be a bool,\n- a number, or a callable that takes the audio tensor as an input. SoX uses\n- 32-bit signed integers internally, thus bool normalizes based on that assumption.\n- \"\"\"\n-\n- if not normalization:\n- return\n-\n- if isinstance(normalization, bool):\n- normalization = 1 << 31\n-\n- if isinstance(normalization, (float, int)):\n- # normalize with custom value\n- signal /= normalization\n- elif callable(normalization):\n- signal /= normalization(signal)\n-\n-\n-def check_input(src: Tensor) -> None:\n- if not torch.is_tensor(src):\n- raise TypeError('Expected a tensor, got %s' % type(src))\n- if src.is_cuda:\n- raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n", "issue": "Remove unused module\n[`torchaudio._internal.misc_ops`](https://github.com/pytorch/audio/blob/b059f08742e70700ce4c92296a1131118f67a588/torchaudio/_internal/misc_ops.py) is a residue from refactoring of I/O features in the past releases. We can get rid of the whole module.\r\n\r\n\n", "before_files": [{"content": "from typing import Union, Callable\n\nimport torch\nfrom torch import Tensor\n\n\ndef normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:\n \"\"\"Audio normalization of a tensor in-place. The normalization can be a bool,\n a number, or a callable that takes the audio tensor as an input. SoX uses\n 32-bit signed integers internally, thus bool normalizes based on that assumption.\n \"\"\"\n\n if not normalization:\n return\n\n if isinstance(normalization, bool):\n normalization = 1 << 31\n\n if isinstance(normalization, (float, int)):\n # normalize with custom value\n signal /= normalization\n elif callable(normalization):\n signal /= normalization(signal)\n\n\ndef check_input(src: Tensor) -> None:\n if not torch.is_tensor(src):\n raise TypeError('Expected a tensor, got %s' % type(src))\n if src.is_cuda:\n raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n", "path": "torchaudio/_internal/misc_ops.py"}], "after_files": [{"content": null, "path": "torchaudio/_internal/misc_ops.py"}]}
| 631 | 300 |
gh_patches_debug_691
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-15263
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
eigh
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py`
Content:
```
1 # local
2 import ivy
3 from ivy.functional.frontends.numpy.func_wrapper import (
4 to_ivy_arrays_and_back,
5 from_zero_dim_arrays_to_scalar,
6 )
7
8
9 @to_ivy_arrays_and_back
10 @from_zero_dim_arrays_to_scalar
11 def eigvalsh(a, /, UPLO="L"):
12 return ivy.eigvalsh(a, UPLO=UPLO)
13
14
15 @to_ivy_arrays_and_back
16 def eig(a):
17 return ivy.eig(a)
18
19
20 @from_zero_dim_arrays_to_scalar
21 def eigh(a, /, UPLO="L"):
22 return ivy.eigh(a, UPLO=UPLO)
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py
@@ -17,6 +17,7 @@
return ivy.eig(a)
+@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def eigh(a, /, UPLO="L"):
return ivy.eigh(a, UPLO=UPLO)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n@@ -17,6 +17,7 @@\n return ivy.eig(a)\n \n \n+@to_ivy_arrays_and_back\n @from_zero_dim_arrays_to_scalar\n def eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n", "issue": "eigh\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}]}
| 449 | 140 |
gh_patches_debug_11763
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-11223
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] crash on Windows when trying to obtain USERNAME from environment
<!--
Please don't forget to update the issue title.
Include all applicable information to help us reproduce your problem.
To help us debug your issue please explain:
-->
### Environment Details (include every applicable attribute)
* Operating System+version: Windows 11
* Compiler+version: N/A
* Conan version: 1.48
* Python version: 3.10.4
### Steps to reproduce (Include if Applicable)
Powershell in which `USERNAME` is not set as a variable
```powershell
git clone https://github.com/ultimaker/libarcus.git
cd libarcus
git checkout CURA-9177_fix_CI_CT
conan create . arcus/5.0.0-PullRequest0137.67@ultimaker/testing -pr:b cura_build.jinja -pr:h cura_release.jinja --build=missing
```
### Logs (Executed commands with output) (Include/Attach if Applicable)
Traceback
```shell
Traceback (most recent call last):
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\command.py", line 2238, in run
method(args[0][1:])
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\command.py", line 382, in create
info = self._conan.create(args.path, name=name, version=version, user=user,
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\conan_api.py", line 93, in wrapper
return f(api, *args, **kwargs)
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\conan_api.py", line 388, in create
deps_install(app=app,
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\manager.py", line 58, in deps_install
deps_graph = graph_manager.load_graph(ref_or_path, create_reference, graph_info, build_modes,
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\graph\graph_manager.py", line 127, in load_graph
deps_graph = self._resolve_graph(root_node, profile_host, profile_build, graph_lock,
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\graph\graph_manager.py", line 289, in _resolve_graph
deps_graph = self._load_graph(root_node, check_updates, update,
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\graph\graph_manager.py", line 410, in _load_graph
self._recurse_build_requires(graph, builder, check_updates, update, build_mode,
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\graph\graph_manager.py", line 336, in _recurse_build_requires
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\graph\graph_manager.py", line 410, in _load_graph
self._recurse_build_requires(graph, builder, check_updates, update, build_mode,
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\graph\graph_manager.py", line 336, in _recurse_build_requires
self._binary_analyzer.evaluate_graph(graph, build_mode, update, remotes, nodes_subset, root)
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\graph\graph_binaries.py", line 431, in evaluate_graph
self._evaluate_node(node, build_mode, update, remotes)
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\graph\graph_binaries.py", line 202, in _evaluate_node
self._process_node(node, pref, build_mode, update, remotes)
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\client\graph\graph_binaries.py", line 275, in _process_node
if package_layout.package_id_exists(pref.id) and pref.id in metadata.packages:
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\paths\package_layouts\package_cache_layout.py", line 133, in package_id_exists
pkg_folder = self.package(PackageReference(self._ref, package_id))
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\paths\package_layouts\package_cache_layout.py", line 32, in wrap
return path_shortener(p, self._short_paths)
File "C:\Users\********\AppData\Roaming\Python\Python310\site-packages\conans\util\windows.py", line 88, in path_shortener
userdomain, username = os.getenv("USERDOMAIN"), os.environ["USERNAME"]
File "C:\Program Files\Python310\lib\os.py", line 679, in __getitem__
raise KeyError(key) from None
KeyError: 'USERNAME'
ERROR: 'USERNAME'
```
Environmental variables
```powershell
PS C:\dev\libarcus> gci env:* | sort-object name
Name Value
---- -----
__INTELLIJ_COMMAND_HISTFILE__ C:\Users\********\AppData\Local\JetBrains\CLion2022.1\terminal\history\libarcus-history
ALLUSERSPROFILE C:\ProgramData
APPDATA C:\Users\********\AppData\Roaming
CommonProgramFiles C:\Program Files\Common Files
CommonProgramFiles(x86) C:\Program Files (x86)\Common Files
CommonProgramW6432 C:\Program Files\Common Files
COMPUTERNAME ********
ComSpec C:\windows\system32\cmd.exe
DriverData C:\Windows\System32\Drivers\DriverData
FPS_BROWSER_APP_PROFILE_STRING Internet Explorer
FPS_BROWSER_USER_PROFILE_ST... Default
HOMEDRIVE C:
HOMEPATH \Users\********
IDEA_INITIAL_DIRECTORY C:\
JAVA_HOME C:\Program Files\Amazon Corretto\jre8
LOCALAPPDATA C:\Users\********\AppData\Local
LOGONSERVER \\********
NUMBER_OF_PROCESSORS 12
OneDrive C:\Users\********\OneDrive - ***********
OneDriveCommercial C:\Users\********\OneDrive - ***********
OS Windows_NT
Path C:\Program Files\Python310\Scripts\;C:\Program Files\Python310\;C:\windows\system32;C:\windows;C:\windows\System32\Wbem;C:\windows\System32\WindowsPowerShell\v1.0\;C:\windows\System32\OpenSSH\;C:\windows\system32\config\systemprofile\AppData\Local\Microsoft\WindowsApps;C:\Program Files\Amazon Corretto\jre8\bin;C:\Program Files\Git\cmd;C:\Program Files\dotnet\;C:\Users\********\AppDat...
PATHEXT .COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.PY;.PYW;.CPL
PROCESSOR_ARCHITECTURE AMD64
PROCESSOR_IDENTIFIER Intel64 Family 6 Model 165 Stepping 2, GenuineIntel
PROCESSOR_LEVEL 6
PROCESSOR_REVISION a502
ProgramData C:\ProgramData
ProgramFiles C:\Program Files
ProgramFiles(x86) C:\Program Files (x86)
ProgramW6432 C:\Program Files
PSModulePath C:\Users\********\OneDrive - ***********\Documents\WindowsPowerShell\Modules;C:\Program Files\WindowsPowerShell\Modules;C:\windows\system32\WindowsPow
SESSIONNAME Console
SystemDrive C:
SystemRoot C:\windows
TEMP C:\Users\JC9C9~1.SPI\AppData\Local\Temp
TERM_SESSION_ID d1e664b1-b85c-49eb-85d8-be45055fdd9c
TERMINAL_EMULATOR JetBrains-JediTerm
TMP C:\Users\JC9C9~1.SPI\AppData\Local\Temp
USERDOMAIN_ROAMINGPROFILE ********
USERPROFILE C:\Users\********
windir C:\windows
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/util/windows.py`
Content:
```
1 import os
2 import subprocess
3 import tempfile
4
5 from conans.client.tools.oss import OSInfo
6 from conans.errors import ConanException
7 from conans.util.env_reader import get_env
8 from conans.util.files import decode_text
9 from conans.util.files import load, mkdir, rmdir, save
10 from conans.util.log import logger
11 from conans.util.sha import sha256
12
13 CONAN_LINK = ".conan_link"
14 CONAN_REAL_PATH = "real_path.txt"
15
16
17 def conan_expand_user(path):
18 """ wrapper to the original expanduser function, to workaround python returning
19 verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar
20 """
21 if path[:1] != '~':
22 return path
23 # In win these variables should exist and point to user directory, which
24 # must exist. Using context to avoid permanent modification of os.environ
25 old_env = dict(os.environ)
26 try:
27 home = os.environ.get("HOME")
28 # Problematic cases of wrong HOME variable
29 # - HOME = %USERPROFILE% verbatim, as messed by some other tools
30 # - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx
31 # In these cases, it is safe to remove it and rely on USERPROFILE directly
32 if home and (not os.path.exists(home) or
33 (os.getenv("MSYSTEM") and os.getenv("USERPROFILE"))):
34 del os.environ["HOME"]
35 result = os.path.expanduser(path)
36 finally:
37 os.environ.clear()
38 os.environ.update(old_env)
39 return result
40
41
42 def path_shortener(path, short_paths):
43 """ short_paths is 4-state:
44 False: Never shorten the path
45 True: Always shorten the path, create link if not existing
46 None: Use shorten path only if already exists, not create
47 """
48 use_always_short_paths = get_env("CONAN_USE_ALWAYS_SHORT_PATHS", False)
49 short_paths = use_always_short_paths or short_paths
50
51 if short_paths is False or os.getenv("CONAN_USER_HOME_SHORT") == "None":
52 return path
53 link = os.path.join(path, CONAN_LINK)
54 if os.path.exists(link):
55 return load(link)
56 elif short_paths is None:
57 return path
58
59 if os.path.exists(path):
60 rmdir(path)
61
62 short_home = os.getenv("CONAN_USER_HOME_SHORT")
63 if not short_home:
64 if OSInfo().is_cygwin:
65 try:
66 cmd = ['cygpath', path, '--unix']
67 out, _ = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False).communicate()
68 out = decode_text(out)
69 if out.startswith('/cygdrive/'): # It was a Windows 'path'
70 _, _, drive, _ = out.split('/', 3)
71 short_home = os.path.join('/cygdrive', drive, '.conan')
72 else: # It was a cygwin path, use a path inside the user home
73 short_home = os.path.join(os.path.expanduser("~"), '.conan_short')
74 except Exception:
75 raise ConanException("Conan failed to create the short_paths home for path '{}'"
76 " in Cygwin. Please report this issue. You can use environment"
77 " variable 'CONAN_USER_HOME_SHORT' to set the short_paths"
78 " home.".format(path))
79 else:
80 drive = os.path.splitdrive(path)[0]
81 short_home = os.path.join(drive, os.sep, ".conan")
82 mkdir(short_home)
83
84 # Workaround for short_home living in NTFS file systems. Give full control permission
85 # to current user to avoid
86 # access problems in cygwin/msys2 windows subsystems when using short_home folder
87 try:
88 userdomain, username = os.getenv("USERDOMAIN"), os.environ["USERNAME"]
89 domainname = "%s\%s" % (userdomain, username) if userdomain else username
90 cmd = r'cacls %s /E /G "%s":F' % (short_home, domainname)
91 subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, quiet
92 except (subprocess.CalledProcessError, EnvironmentError):
93 # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.
94 pass
95
96 redirect = hashed_redirect(short_home, path)
97 if not redirect:
98 logger.warning("Failed to create a deterministic short path in %s", short_home)
99 redirect = tempfile.mkdtemp(dir=short_home, prefix="")
100
101 # Save the full path of the local cache directory where the redirect is from.
102 # This file is for debugging purposes and not used by Conan.
103 save(os.path.join(redirect, CONAN_REAL_PATH), path)
104
105 # This "1" is the way to have a non-existing directory, so commands like
106 # shutil.copytree() to it, works. It can be removed without compromising the
107 # temp folder generator and conan-links consistency
108 redirect = os.path.join(redirect, "1")
109 save(link, redirect)
110 return redirect
111
112
113 def rm_conandir(path):
114 """removal of a directory that might contain a link to a short path"""
115 link = os.path.join(path, CONAN_LINK)
116 if os.path.exists(link):
117 short_path = load(link)
118 rmdir(os.path.dirname(short_path))
119 rmdir(path)
120
121
122 def hashed_redirect(base, path, min_length=6, attempts=10):
123 max_length = min_length + attempts
124
125 full_hash = sha256(path.encode())
126 assert len(full_hash) > max_length
127
128 for length in range(min_length, max_length):
129 redirect = os.path.join(base, full_hash[:length])
130 if not os.path.exists(redirect):
131 return redirect
132 else:
133 return None
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/util/windows.py b/conans/util/windows.py
--- a/conans/util/windows.py
+++ b/conans/util/windows.py
@@ -89,7 +89,7 @@
domainname = "%s\%s" % (userdomain, username) if userdomain else username
cmd = r'cacls %s /E /G "%s":F' % (short_home, domainname)
subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, quiet
- except (subprocess.CalledProcessError, EnvironmentError):
+ except (subprocess.CalledProcessError, EnvironmentError, KeyError):
# cmd can fail if trying to set ACL in non NTFS drives, ignoring it.
pass
|
{"golden_diff": "diff --git a/conans/util/windows.py b/conans/util/windows.py\n--- a/conans/util/windows.py\n+++ b/conans/util/windows.py\n@@ -89,7 +89,7 @@\n domainname = \"%s\\%s\" % (userdomain, username) if userdomain else username\n cmd = r'cacls %s /E /G \"%s\":F' % (short_home, domainname)\n subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, quiet\n- except (subprocess.CalledProcessError, EnvironmentError):\n+ except (subprocess.CalledProcessError, EnvironmentError, KeyError):\n # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.\n pass\n", "issue": "[bug] crash on Windows when trying to obtain USERNAME from environment\n<!--\r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n\r\n To help us debug your issue please explain:\r\n-->\r\n\r\n### Environment Details (include every applicable attribute)\r\n * Operating System+version: Windows 11\r\n * Compiler+version: N/A\r\n * Conan version: 1.48\r\n * Python version: 3.10.4\r\n\r\n### Steps to reproduce (Include if Applicable)\r\nPowershell in which `USERNAME` is not set as a variable\r\n\r\n```powershell\r\ngit clone https://github.com/ultimaker/libarcus.git\r\ncd libarcus\r\ngit checkout CURA-9177_fix_CI_CT\r\nconan create . arcus/5.0.0-PullRequest0137.67@ultimaker/testing -pr:b cura_build.jinja -pr:h cura_release.jinja --build=missing\r\n```\r\n\r\n### Logs (Executed commands with output) (Include/Attach if Applicable)\r\n\r\nTraceback\r\n```shell\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\command.py\", line 2238, in run\r\n method(args[0][1:])\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\command.py\", line 382, in create\r\n info = self._conan.create(args.path, name=name, version=version, user=user,\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\conan_api.py\", line 93, in wrapper\r\n return f(api, *args, **kwargs)\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\conan_api.py\", line 388, in create\r\n deps_install(app=app,\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\manager.py\", line 58, in deps_install\r\n deps_graph = graph_manager.load_graph(ref_or_path, create_reference, graph_info, build_modes,\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\graph\\graph_manager.py\", line 127, in load_graph\r\n deps_graph = self._resolve_graph(root_node, profile_host, profile_build, graph_lock,\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\graph\\graph_manager.py\", line 289, in _resolve_graph\r\n deps_graph = self._load_graph(root_node, check_updates, update,\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\graph\\graph_manager.py\", line 410, in _load_graph\r\n self._recurse_build_requires(graph, builder, check_updates, update, build_mode,\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\graph\\graph_manager.py\", line 336, in _recurse_build_requires\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\graph\\graph_manager.py\", line 410, in _load_graph\r\n self._recurse_build_requires(graph, builder, check_updates, update, build_mode,\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\graph\\graph_manager.py\", line 336, in _recurse_build_requires\r\n self._binary_analyzer.evaluate_graph(graph, build_mode, update, remotes, nodes_subset, root)\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\graph\\graph_binaries.py\", line 431, in evaluate_graph\r\n self._evaluate_node(node, build_mode, update, remotes)\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\graph\\graph_binaries.py\", line 202, in _evaluate_node\r\n self._process_node(node, pref, build_mode, update, remotes)\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\client\\graph\\graph_binaries.py\", line 275, in _process_node\r\n if package_layout.package_id_exists(pref.id) and pref.id in metadata.packages:\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\paths\\package_layouts\\package_cache_layout.py\", line 133, in package_id_exists\r\n pkg_folder = self.package(PackageReference(self._ref, package_id))\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\paths\\package_layouts\\package_cache_layout.py\", line 32, in wrap\r\n return path_shortener(p, self._short_paths)\r\n File \"C:\\Users\\********\\AppData\\Roaming\\Python\\Python310\\site-packages\\conans\\util\\windows.py\", line 88, in path_shortener\r\n userdomain, username = os.getenv(\"USERDOMAIN\"), os.environ[\"USERNAME\"]\r\n File \"C:\\Program Files\\Python310\\lib\\os.py\", line 679, in __getitem__\r\n raise KeyError(key) from None\r\nKeyError: 'USERNAME'\r\n\r\nERROR: 'USERNAME'\r\n```\r\n\r\nEnvironmental variables\r\n```powershell\r\nPS C:\\dev\\libarcus> gci env:* | sort-object name\r\n\r\nName Value\r\n---- ----- \r\n__INTELLIJ_COMMAND_HISTFILE__ C:\\Users\\********\\AppData\\Local\\JetBrains\\CLion2022.1\\terminal\\history\\libarcus-history \r\nALLUSERSPROFILE C:\\ProgramData \r\nAPPDATA C:\\Users\\********\\AppData\\Roaming \r\nCommonProgramFiles C:\\Program Files\\Common Files \r\nCommonProgramFiles(x86) C:\\Program Files (x86)\\Common Files \r\nCommonProgramW6432 C:\\Program Files\\Common Files \r\nCOMPUTERNAME ******** \r\nComSpec C:\\windows\\system32\\cmd.exe \r\nDriverData C:\\Windows\\System32\\Drivers\\DriverData \r\nFPS_BROWSER_APP_PROFILE_STRING Internet Explorer \r\nFPS_BROWSER_USER_PROFILE_ST... Default \r\nHOMEDRIVE C: \r\nHOMEPATH \\Users\\********\r\nIDEA_INITIAL_DIRECTORY C:\\\r\nJAVA_HOME C:\\Program Files\\Amazon Corretto\\jre8\r\nLOCALAPPDATA C:\\Users\\********\\AppData\\Local\r\nLOGONSERVER \\\\********\r\nNUMBER_OF_PROCESSORS 12\r\nOneDrive C:\\Users\\********\\OneDrive - ***********\r\nOneDriveCommercial C:\\Users\\********\\OneDrive - ***********\r\nOS Windows_NT\r\nPath C:\\Program Files\\Python310\\Scripts\\;C:\\Program Files\\Python310\\;C:\\windows\\system32;C:\\windows;C:\\windows\\System32\\Wbem;C:\\windows\\System32\\WindowsPowerShell\\v1.0\\;C:\\windows\\System32\\OpenSSH\\;C:\\windows\\system32\\config\\systemprofile\\AppData\\Local\\Microsoft\\WindowsApps;C:\\Program Files\\Amazon Corretto\\jre8\\bin;C:\\Program Files\\Git\\cmd;C:\\Program Files\\dotnet\\;C:\\Users\\********\\AppDat... \r\nPATHEXT .COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.PY;.PYW;.CPL\r\nPROCESSOR_ARCHITECTURE AMD64\r\nPROCESSOR_IDENTIFIER Intel64 Family 6 Model 165 Stepping 2, GenuineIntel\r\nPROCESSOR_LEVEL 6\r\nPROCESSOR_REVISION a502\r\nProgramData C:\\ProgramData\r\nProgramFiles C:\\Program Files\r\nProgramFiles(x86) C:\\Program Files (x86)\r\nProgramW6432 C:\\Program Files\r\nPSModulePath C:\\Users\\********\\OneDrive - ***********\\Documents\\WindowsPowerShell\\Modules;C:\\Program Files\\WindowsPowerShell\\Modules;C:\\windows\\system32\\WindowsPow\r\nSESSIONNAME Console\r\nSystemDrive C:\r\nSystemRoot C:\\windows\r\nTEMP C:\\Users\\JC9C9~1.SPI\\AppData\\Local\\Temp\r\nTERM_SESSION_ID d1e664b1-b85c-49eb-85d8-be45055fdd9c\r\nTERMINAL_EMULATOR JetBrains-JediTerm\r\nTMP C:\\Users\\JC9C9~1.SPI\\AppData\\Local\\Temp\r\nUSERDOMAIN_ROAMINGPROFILE ********\r\nUSERPROFILE C:\\Users\\********\r\nwindir C:\\windows\r\n\r\n```\n", "before_files": [{"content": "import os\nimport subprocess\nimport tempfile\n\nfrom conans.client.tools.oss import OSInfo\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import decode_text\nfrom conans.util.files import load, mkdir, rmdir, save\nfrom conans.util.log import logger\nfrom conans.util.sha import sha256\n\nCONAN_LINK = \".conan_link\"\nCONAN_REAL_PATH = \"real_path.txt\"\n\n\ndef conan_expand_user(path):\n \"\"\" wrapper to the original expanduser function, to workaround python returning\n verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar\n \"\"\"\n if path[:1] != '~':\n return path\n # In win these variables should exist and point to user directory, which\n # must exist. Using context to avoid permanent modification of os.environ\n old_env = dict(os.environ)\n try:\n home = os.environ.get(\"HOME\")\n # Problematic cases of wrong HOME variable\n # - HOME = %USERPROFILE% verbatim, as messed by some other tools\n # - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx\n # In these cases, it is safe to remove it and rely on USERPROFILE directly\n if home and (not os.path.exists(home) or\n (os.getenv(\"MSYSTEM\") and os.getenv(\"USERPROFILE\"))):\n del os.environ[\"HOME\"]\n result = os.path.expanduser(path)\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n return result\n\n\ndef path_shortener(path, short_paths):\n \"\"\" short_paths is 4-state:\n False: Never shorten the path\n True: Always shorten the path, create link if not existing\n None: Use shorten path only if already exists, not create\n \"\"\"\n use_always_short_paths = get_env(\"CONAN_USE_ALWAYS_SHORT_PATHS\", False)\n short_paths = use_always_short_paths or short_paths\n\n if short_paths is False or os.getenv(\"CONAN_USER_HOME_SHORT\") == \"None\":\n return path\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n return load(link)\n elif short_paths is None:\n return path\n\n if os.path.exists(path):\n rmdir(path)\n\n short_home = os.getenv(\"CONAN_USER_HOME_SHORT\")\n if not short_home:\n if OSInfo().is_cygwin:\n try:\n cmd = ['cygpath', path, '--unix']\n out, _ = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False).communicate()\n out = decode_text(out)\n if out.startswith('/cygdrive/'): # It was a Windows 'path'\n _, _, drive, _ = out.split('/', 3)\n short_home = os.path.join('/cygdrive', drive, '.conan')\n else: # It was a cygwin path, use a path inside the user home\n short_home = os.path.join(os.path.expanduser(\"~\"), '.conan_short')\n except Exception:\n raise ConanException(\"Conan failed to create the short_paths home for path '{}'\"\n \" in Cygwin. Please report this issue. You can use environment\"\n \" variable 'CONAN_USER_HOME_SHORT' to set the short_paths\"\n \" home.\".format(path))\n else:\n drive = os.path.splitdrive(path)[0]\n short_home = os.path.join(drive, os.sep, \".conan\")\n mkdir(short_home)\n\n # Workaround for short_home living in NTFS file systems. Give full control permission\n # to current user to avoid\n # access problems in cygwin/msys2 windows subsystems when using short_home folder\n try:\n userdomain, username = os.getenv(\"USERDOMAIN\"), os.environ[\"USERNAME\"]\n domainname = \"%s\\%s\" % (userdomain, username) if userdomain else username\n cmd = r'cacls %s /E /G \"%s\":F' % (short_home, domainname)\n subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, quiet\n except (subprocess.CalledProcessError, EnvironmentError):\n # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.\n pass\n\n redirect = hashed_redirect(short_home, path)\n if not redirect:\n logger.warning(\"Failed to create a deterministic short path in %s\", short_home)\n redirect = tempfile.mkdtemp(dir=short_home, prefix=\"\")\n\n # Save the full path of the local cache directory where the redirect is from.\n # This file is for debugging purposes and not used by Conan.\n save(os.path.join(redirect, CONAN_REAL_PATH), path)\n\n # This \"1\" is the way to have a non-existing directory, so commands like\n # shutil.copytree() to it, works. It can be removed without compromising the\n # temp folder generator and conan-links consistency\n redirect = os.path.join(redirect, \"1\")\n save(link, redirect)\n return redirect\n\n\ndef rm_conandir(path):\n \"\"\"removal of a directory that might contain a link to a short path\"\"\"\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n short_path = load(link)\n rmdir(os.path.dirname(short_path))\n rmdir(path)\n\n\ndef hashed_redirect(base, path, min_length=6, attempts=10):\n max_length = min_length + attempts\n\n full_hash = sha256(path.encode())\n assert len(full_hash) > max_length\n\n for length in range(min_length, max_length):\n redirect = os.path.join(base, full_hash[:length])\n if not os.path.exists(redirect):\n return redirect\n else:\n return None\n", "path": "conans/util/windows.py"}], "after_files": [{"content": "import os\nimport subprocess\nimport tempfile\n\nfrom conans.client.tools.oss import OSInfo\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import decode_text\nfrom conans.util.files import load, mkdir, rmdir, save\nfrom conans.util.log import logger\nfrom conans.util.sha import sha256\n\nCONAN_LINK = \".conan_link\"\nCONAN_REAL_PATH = \"real_path.txt\"\n\n\ndef conan_expand_user(path):\n \"\"\" wrapper to the original expanduser function, to workaround python returning\n verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar\n \"\"\"\n if path[:1] != '~':\n return path\n # In win these variables should exist and point to user directory, which\n # must exist. Using context to avoid permanent modification of os.environ\n old_env = dict(os.environ)\n try:\n home = os.environ.get(\"HOME\")\n # Problematic cases of wrong HOME variable\n # - HOME = %USERPROFILE% verbatim, as messed by some other tools\n # - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx\n # In these cases, it is safe to remove it and rely on USERPROFILE directly\n if home and (not os.path.exists(home) or\n (os.getenv(\"MSYSTEM\") and os.getenv(\"USERPROFILE\"))):\n del os.environ[\"HOME\"]\n result = os.path.expanduser(path)\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n return result\n\n\ndef path_shortener(path, short_paths):\n \"\"\" short_paths is 4-state:\n False: Never shorten the path\n True: Always shorten the path, create link if not existing\n None: Use shorten path only if already exists, not create\n \"\"\"\n use_always_short_paths = get_env(\"CONAN_USE_ALWAYS_SHORT_PATHS\", False)\n short_paths = use_always_short_paths or short_paths\n\n if short_paths is False or os.getenv(\"CONAN_USER_HOME_SHORT\") == \"None\":\n return path\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n return load(link)\n elif short_paths is None:\n return path\n\n if os.path.exists(path):\n rmdir(path)\n\n short_home = os.getenv(\"CONAN_USER_HOME_SHORT\")\n if not short_home:\n if OSInfo().is_cygwin:\n try:\n cmd = ['cygpath', path, '--unix']\n out, _ = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False).communicate()\n out = decode_text(out)\n if out.startswith('/cygdrive/'): # It was a Windows 'path'\n _, _, drive, _ = out.split('/', 3)\n short_home = os.path.join('/cygdrive', drive, '.conan')\n else: # It was a cygwin path, use a path inside the user home\n short_home = os.path.join(os.path.expanduser(\"~\"), '.conan_short')\n except Exception:\n raise ConanException(\"Conan failed to create the short_paths home for path '{}'\"\n \" in Cygwin. Please report this issue. You can use environment\"\n \" variable 'CONAN_USER_HOME_SHORT' to set the short_paths\"\n \" home.\".format(path))\n else:\n drive = os.path.splitdrive(path)[0]\n short_home = os.path.join(drive, os.sep, \".conan\")\n mkdir(short_home)\n\n # Workaround for short_home living in NTFS file systems. Give full control permission\n # to current user to avoid\n # access problems in cygwin/msys2 windows subsystems when using short_home folder\n try:\n userdomain, username = os.getenv(\"USERDOMAIN\"), os.environ[\"USERNAME\"]\n domainname = \"%s\\%s\" % (userdomain, username) if userdomain else username\n cmd = r'cacls %s /E /G \"%s\":F' % (short_home, domainname)\n subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, quiet\n except (subprocess.CalledProcessError, EnvironmentError, KeyError):\n # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.\n pass\n\n redirect = hashed_redirect(short_home, path)\n if not redirect:\n logger.warning(\"Failed to create a deterministic short path in %s\", short_home)\n redirect = tempfile.mkdtemp(dir=short_home, prefix=\"\")\n\n # Save the full path of the local cache directory where the redirect is from.\n # This file is for debugging purposes and not used by Conan.\n save(os.path.join(redirect, CONAN_REAL_PATH), path)\n\n # This \"1\" is the way to have a non-existing directory, so commands like\n # shutil.copytree() to it, works. It can be removed without compromising the\n # temp folder generator and conan-links consistency\n redirect = os.path.join(redirect, \"1\")\n save(link, redirect)\n return redirect\n\n\ndef rm_conandir(path):\n \"\"\"removal of a directory that might contain a link to a short path\"\"\"\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n short_path = load(link)\n rmdir(os.path.dirname(short_path))\n rmdir(path)\n\n\ndef hashed_redirect(base, path, min_length=6, attempts=10):\n max_length = min_length + attempts\n\n full_hash = sha256(path.encode())\n assert len(full_hash) > max_length\n\n for length in range(min_length, max_length):\n redirect = os.path.join(base, full_hash[:length])\n if not os.path.exists(redirect):\n return redirect\n else:\n return None\n", "path": "conans/util/windows.py"}]}
| 3,975 | 167 |
gh_patches_debug_53306
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-2418
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update requirements for upcoming version 3.5
Push requirements to newest versions according to https://github.com/privacyidea/privacyidea/wiki/Development-workflow#requirements
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import print_function
3 from setuptools import setup, find_packages
4 import os
5 import stat
6 import sys
7
8 #VERSION = "2.1dev4"
9 VERSION = "3.4"
10
11 # Taken from kennethreitz/requests/setup.py
12 package_directory = os.path.realpath(os.path.dirname(__file__))
13
14
15 def get_file_contents(file_path):
16 """Get the context of the file using full path name."""
17 content = ""
18 try:
19 full_path = os.path.join(package_directory, file_path)
20 content = open(full_path, 'r').read()
21 except:
22 print("### could not open file {0!r}".format(file_path), file=sys.stderr)
23 return content
24
25
26 def get_file_list(file_path):
27 full_path = os.path.join(package_directory, file_path)
28 file_list = os.listdir(full_path)
29 # now we need to add the path to the files
30 return [file_path + f for f in file_list]
31
32
33 install_requires = ["beautifulsoup4[lxml]>=4.3.2",
34 "cbor2>=5.0.1",
35 "configobj>=5.0.6",
36 "croniter>=0.3.8",
37 "cryptography>=2.4.2",
38 "defusedxml>=0.4.1",
39 "ecdsa>=0.13.3",
40 "Flask>=0.10.1",
41 "Flask-Babel>=0.9",
42 "Flask-Migrate>=1.2.0",
43 "Flask-Script>=2.0.5",
44 "Flask-SQLAlchemy>=2.0",
45 "Flask-Versioned>=0.9.4",
46 "future>=0.18.2;python_version<'3.0'",
47 "huey[redis]>=1.11.0",
48 "ldap3>=2.6",
49 "netaddr>=0.7.12",
50 "oauth2client>=2.0.1",
51 "passlib[bcrypt]>=1.7.0",
52 "Pillow>=6.2.1",
53 "PyJWT>=1.3.0",
54 "PyMySQL>=0.6.6",
55 "pyOpenSSL>=17.5",
56 "pyrad>=2.0",
57 "python-dateutil>=2.7.3",
58 "python-gnupg>=0.4.4",
59 "PyYAML>=5.1",
60 "qrcode>=6.1",
61 "requests>=2.7.0",
62 "smpplib>=2.0",
63 "SQLAlchemy>=1.3.0",
64 "sqlsoup>=0.9.0"]
65
66
67 def get_man_pages(dir):
68 """
69 Get man pages in a directory.
70 :param dir:
71 :return: list of file names
72 """
73 files = os.listdir(dir)
74 r_files = []
75 for file in files:
76 if file.endswith(".1"):
77 r_files.append(dir + "/" + file)
78 return r_files
79
80
81 def get_scripts(dir):
82 """
83 Get files that are executable
84 :param dir:
85 :return: list of file names
86 """
87 files = os.listdir(dir)
88 r_files = []
89 for file in files:
90 if os.stat(dir + "/" + file)[stat.ST_MODE] & stat.S_IEXEC:
91 r_files.append(dir + "/" + file)
92 return r_files
93
94
95 setup(
96 name='privacyIDEA',
97 version=VERSION,
98 description='privacyIDEA: identity, multifactor authentication (OTP), '
99 'authorization, audit',
100 author='privacyidea.org',
101 license='AGPLv3',
102 author_email='[email protected]',
103 url='http://www.privacyidea.org',
104 keywords='OTP, two factor authentication, management, security',
105 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.9.*',
106 packages=find_packages(),
107 scripts=["pi-manage"] + get_scripts("tools"),
108 extras_require={
109 'doc': ["Sphinx>=1.3.1",
110 "sphinxcontrib-httpdomain>=1.3.0",
111 "sphinxcontrib-plantuml>=0.18"],
112 'test': ["mock>=2.0.0",
113 "pytest>=3.6.0",
114 "pytest-cov>=2.5.1",
115 "responses>=0.9.0"],
116 'postgres': ['psycopg2>=2.8.3']
117 },
118 install_requires=install_requires,
119 include_package_data=True,
120 data_files=[('etc/privacyidea/',
121 ['deploy/apache/privacyideaapp.wsgi',
122 'deploy/privacyidea/dictionary']),
123 ('share/man/man1', get_man_pages("tools")),
124 ('lib/privacyidea/migrations',
125 ["migrations/alembic.ini",
126 "migrations/env.py",
127 "migrations/README",
128 "migrations/script.py.mako"]),
129 ('lib/privacyidea/migrations/versions',
130 get_file_list("migrations/versions/")),
131 ('lib/privacyidea/', ['requirements.txt'])
132 ],
133 classifiers=["Framework :: Flask",
134 "License :: OSI Approved :: "
135 "GNU Affero General Public License v3",
136 "Programming Language :: Python",
137 "Development Status :: 5 - Production/Stable",
138 "Topic :: Internet",
139 "Topic :: Security",
140 "Topic :: System ::"
141 " Systems Administration :: Authentication/Directory",
142 'Programming Language :: Python',
143 'Programming Language :: Python :: 2',
144 'Programming Language :: Python :: 2.7',
145 'Programming Language :: Python :: 3',
146 'Programming Language :: Python :: 3.5',
147 'Programming Language :: Python :: 3.6',
148 'Programming Language :: Python :: 3.7',
149 'Programming Language :: Python :: 3.8'
150 ],
151 zip_safe=False,
152 long_description=get_file_contents('README.rst')
153 )
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,6 +50,7 @@
"oauth2client>=2.0.1",
"passlib[bcrypt]>=1.7.0",
"Pillow>=6.2.1",
+ "pydash>=4.7.4",
"PyJWT>=1.3.0",
"PyMySQL>=0.6.6",
"pyOpenSSL>=17.5",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,6 +50,7 @@\n \"oauth2client>=2.0.1\",\n \"passlib[bcrypt]>=1.7.0\",\n \"Pillow>=6.2.1\",\n+ \"pydash>=4.7.4\",\n \"PyJWT>=1.3.0\",\n \"PyMySQL>=0.6.6\",\n \"pyOpenSSL>=17.5\",\n", "issue": "Update requirements for upcoming version 3.5\nPush requirements to newest versions according to https://github.com/privacyidea/privacyidea/wiki/Development-workflow#requirements\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION = \"2.1dev4\"\nVERSION = \"3.4\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print(\"### could not open file {0!r}\".format(file_path), file=sys.stderr)\n return content\n\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [file_path + f for f in file_list]\n\n\ninstall_requires = [\"beautifulsoup4[lxml]>=4.3.2\",\n \"cbor2>=5.0.1\",\n \"configobj>=5.0.6\",\n \"croniter>=0.3.8\",\n \"cryptography>=2.4.2\",\n \"defusedxml>=0.4.1\",\n \"ecdsa>=0.13.3\",\n \"Flask>=0.10.1\",\n \"Flask-Babel>=0.9\",\n \"Flask-Migrate>=1.2.0\",\n \"Flask-Script>=2.0.5\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Versioned>=0.9.4\",\n \"future>=0.18.2;python_version<'3.0'\",\n \"huey[redis]>=1.11.0\",\n \"ldap3>=2.6\",\n \"netaddr>=0.7.12\",\n \"oauth2client>=2.0.1\",\n \"passlib[bcrypt]>=1.7.0\",\n \"Pillow>=6.2.1\",\n \"PyJWT>=1.3.0\",\n \"PyMySQL>=0.6.6\",\n \"pyOpenSSL>=17.5\",\n \"pyrad>=2.0\",\n \"python-dateutil>=2.7.3\",\n \"python-gnupg>=0.4.4\",\n \"PyYAML>=5.1\",\n \"qrcode>=6.1\",\n \"requests>=2.7.0\",\n \"smpplib>=2.0\",\n \"SQLAlchemy>=1.3.0\",\n \"sqlsoup>=0.9.0\"]\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.9.*',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'doc': [\"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\",\n \"sphinxcontrib-plantuml>=0.18\"],\n 'test': [\"mock>=2.0.0\",\n \"pytest>=3.6.0\",\n \"pytest-cov>=2.5.1\",\n \"responses>=0.9.0\"],\n 'postgres': ['psycopg2>=2.8.3']\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\")),\n ('lib/privacyidea/', ['requirements.txt'])\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\",\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'\n ],\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION = \"2.1dev4\"\nVERSION = \"3.4\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print(\"### could not open file {0!r}\".format(file_path), file=sys.stderr)\n return content\n\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [file_path + f for f in file_list]\n\n\ninstall_requires = [\"beautifulsoup4[lxml]>=4.3.2\",\n \"cbor2>=5.0.1\",\n \"configobj>=5.0.6\",\n \"croniter>=0.3.8\",\n \"cryptography>=2.4.2\",\n \"defusedxml>=0.4.1\",\n \"ecdsa>=0.13.3\",\n \"Flask>=0.10.1\",\n \"Flask-Babel>=0.9\",\n \"Flask-Migrate>=1.2.0\",\n \"Flask-Script>=2.0.5\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Versioned>=0.9.4\",\n \"future>=0.18.2;python_version<'3.0'\",\n \"huey[redis]>=1.11.0\",\n \"ldap3>=2.6\",\n \"netaddr>=0.7.12\",\n \"oauth2client>=2.0.1\",\n \"passlib[bcrypt]>=1.7.0\",\n \"Pillow>=6.2.1\",\n \"pydash>=4.7.4\",\n \"PyJWT>=1.3.0\",\n \"PyMySQL>=0.6.6\",\n \"pyOpenSSL>=17.5\",\n \"pyrad>=2.0\",\n \"python-dateutil>=2.7.3\",\n \"python-gnupg>=0.4.4\",\n \"PyYAML>=5.1\",\n \"qrcode>=6.1\",\n \"requests>=2.7.0\",\n \"smpplib>=2.0\",\n \"SQLAlchemy>=1.3.0\",\n \"sqlsoup>=0.9.0\"]\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir:\n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.9.*',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'doc': [\"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\",\n \"sphinxcontrib-plantuml>=0.18\"],\n 'test': [\"mock>=2.0.0\",\n \"pytest>=3.6.0\",\n \"pytest-cov>=2.5.1\",\n \"responses>=0.9.0\"],\n 'postgres': ['psycopg2>=2.8.3']\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\")),\n ('lib/privacyidea/', ['requirements.txt'])\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\",\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8'\n ],\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}]}
| 1,965 | 113 |
gh_patches_debug_3635
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-lint-1625
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive: async jobs
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and master branch are affected too -->
##### Summary
<!--- Explain the problem briefly below -->
A `command` module task that is run as an async job is incorrectly treated as a normal sync task.
For async tasks the options like `changed_when` (and `failed_when` and so on) are not given to the async `command` task itself, they are given to the `async_status` module task that is run after the async task.
Ansible-lint does not understand this and complains for rule `no-changed-when` for the `command` task.
Example:
```yaml
---
- name: Asynchronous long task
command: alongtask.sh
async: 1000
poll: 0
register: job_sleeper
- name: Wait for asynchronous job to end
async_status:
jid: '{{ job_sleeper.ansible_job_id }}'
register: job_result
until: job_result.finished
retries: 100
delay: 10
changed_when: [....]
```
Note how the `changed_when` is given in the `async_status` task and not in the `command` task.
##### Issue Type
- Bug Report
##### Ansible and Ansible Lint details
<!--- Paste verbatim output between triple backticks -->
```console (paste below)
ansible --version
2.9.21
ansible-lint --version
5.0.8
```
- ansible installation method: pip
- ansible-lint installation method: pip
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
EL7.9 all updated
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
```yaml
---
- name: Asynchronous yum task
command: alongtask.sh
async: 1000
poll: 0
register: job_sleeper
- name: Wait for asynchronous job to end
async_status:
jid: '{{ job_sleeper.ansible_job_id }}'
register: job_result
until: job_result.finished
retries: 100
delay: 10
changed_when: [....]
```
<!--- Paste example playbooks or commands between triple backticks below -->
```console (paste below)
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### Desired Behaviour
<!--- Describe what you expected to happen when running the steps above -->
Ansible-lint should not detect `no-changed-when` for `command` module task run as async job since the `changed_when` cannot be given to the `command` module task itself.
It should detect that there is a `changed_when` in the following `async_status` task.
##### Actual Behaviour
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
Ansible-lint detects false positive `no-changed-when` for `command` module task run as async job even though `changed_when` cannot be correctly given for an async task - the `changed_when` is given for the subsequent `async_status` module task.
<!--- Paste verbatim command output between triple backticks -->
```paste below
```
[minimum complete verifiable example]: http://stackoverflow.com/help/mcve
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ansiblelint/rules/CommandHasChangesCheckRule.py`
Content:
```
1 # Copyright (c) 2016 Will Thames <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 # THE SOFTWARE.
20
21 from typing import TYPE_CHECKING, Any, Dict, Union
22
23 from ansiblelint.rules import AnsibleLintRule
24
25 if TYPE_CHECKING:
26 from typing import Optional
27
28 from ansiblelint.file_utils import Lintable
29
30
31 class CommandHasChangesCheckRule(AnsibleLintRule):
32 id = 'no-changed-when'
33 shortdesc = 'Commands should not change things if nothing needs doing'
34 description = (
35 'Commands should either read information (and thus set '
36 '``changed_when``) or not do something if it has already been '
37 'done (using creates/removes) or only do it if another '
38 'check has a particular result (``when``)'
39 )
40 severity = 'HIGH'
41 tags = ['command-shell', 'idempotency']
42 version_added = 'historic'
43
44 _commands = ['command', 'shell', 'raw']
45
46 def matchtask(
47 self, task: Dict[str, Any], file: 'Optional[Lintable]' = None
48 ) -> Union[bool, str]:
49 if task["__ansible_action_type__"] == 'task':
50 if task["action"]["__ansible_module__"] in self._commands:
51 return (
52 'changed_when' not in task
53 and 'when' not in task
54 and 'creates' not in task['action']
55 and 'removes' not in task['action']
56 )
57 return False
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/ansiblelint/rules/CommandHasChangesCheckRule.py b/src/ansiblelint/rules/CommandHasChangesCheckRule.py
--- a/src/ansiblelint/rules/CommandHasChangesCheckRule.py
+++ b/src/ansiblelint/rules/CommandHasChangesCheckRule.py
@@ -53,5 +53,6 @@
and 'when' not in task
and 'creates' not in task['action']
and 'removes' not in task['action']
+ and not ('async' in task and task.get('poll') == 0)
)
return False
|
{"golden_diff": "diff --git a/src/ansiblelint/rules/CommandHasChangesCheckRule.py b/src/ansiblelint/rules/CommandHasChangesCheckRule.py\n--- a/src/ansiblelint/rules/CommandHasChangesCheckRule.py\n+++ b/src/ansiblelint/rules/CommandHasChangesCheckRule.py\n@@ -53,5 +53,6 @@\n and 'when' not in task\n and 'creates' not in task['action']\n and 'removes' not in task['action']\n+ and not ('async' in task and task.get('poll') == 0)\n )\n return False\n", "issue": "False positive: async jobs\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and master branch are affected too -->\r\n\r\n##### Summary\r\n<!--- Explain the problem briefly below -->\r\nA `command` module task that is run as an async job is incorrectly treated as a normal sync task.\r\n\r\nFor async tasks the options like `changed_when` (and `failed_when` and so on) are not given to the async `command` task itself, they are given to the `async_status` module task that is run after the async task.\r\n\r\nAnsible-lint does not understand this and complains for rule `no-changed-when` for the `command` task.\r\n\r\nExample:\r\n```yaml\r\n---\r\n- name: Asynchronous long task\r\n command: alongtask.sh\r\n async: 1000\r\n poll: 0\r\n register: job_sleeper\r\n\r\n- name: Wait for asynchronous job to end\r\n async_status:\r\n jid: '{{ job_sleeper.ansible_job_id }}'\r\n register: job_result\r\n until: job_result.finished\r\n retries: 100\r\n delay: 10\r\n changed_when: [....]\r\n```\r\n\r\nNote how the `changed_when` is given in the `async_status` task and not in the `command` task.\r\n\r\n##### Issue Type\r\n\r\n- Bug Report\r\n\r\n##### Ansible and Ansible Lint details\r\n<!--- Paste verbatim output between triple backticks -->\r\n```console (paste below)\r\nansible --version\r\n2.9.21\r\n\r\nansible-lint --version\r\n5.0.8\r\n\r\n```\r\n\r\n- ansible installation method: pip\r\n- ansible-lint installation method: pip\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\nEL7.9 all updated\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n```yaml\r\n---\r\n- name: Asynchronous yum task\r\n command: alongtask.sh\r\n async: 1000\r\n poll: 0\r\n register: job_sleeper\r\n\r\n- name: Wait for asynchronous job to end\r\n async_status:\r\n jid: '{{ job_sleeper.ansible_job_id }}'\r\n register: job_result\r\n until: job_result.finished\r\n retries: 100\r\n delay: 10\r\n changed_when: [....]\r\n```\r\n\r\n<!--- Paste example playbooks or commands between triple backticks below -->\r\n```console (paste below)\r\n\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### Desired Behaviour\r\n<!--- Describe what you expected to happen when running the steps above -->\r\nAnsible-lint should not detect `no-changed-when` for `command` module task run as async job since the `changed_when` cannot be given to the `command` module task itself.\r\n\r\nIt should detect that there is a `changed_when` in the following `async_status` task.\r\n\r\n##### Actual Behaviour\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\nAnsible-lint detects false positive `no-changed-when` for `command` module task run as async job even though `changed_when` cannot be correctly given for an async task - the `changed_when` is given for the subsequent `async_status` module task.\r\n\r\n<!--- Paste verbatim command output between triple backticks -->\r\n```paste below\r\n\r\n```\r\n\r\n\r\n[minimum complete verifiable example]: http://stackoverflow.com/help/mcve\r\n\n", "before_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom typing import TYPE_CHECKING, Any, Dict, Union\n\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from typing import Optional\n\n from ansiblelint.file_utils import Lintable\n\n\nclass CommandHasChangesCheckRule(AnsibleLintRule):\n id = 'no-changed-when'\n shortdesc = 'Commands should not change things if nothing needs doing'\n description = (\n 'Commands should either read information (and thus set '\n '``changed_when``) or not do something if it has already been '\n 'done (using creates/removes) or only do it if another '\n 'check has a particular result (``when``)'\n )\n severity = 'HIGH'\n tags = ['command-shell', 'idempotency']\n version_added = 'historic'\n\n _commands = ['command', 'shell', 'raw']\n\n def matchtask(\n self, task: Dict[str, Any], file: 'Optional[Lintable]' = None\n ) -> Union[bool, str]:\n if task[\"__ansible_action_type__\"] == 'task':\n if task[\"action\"][\"__ansible_module__\"] in self._commands:\n return (\n 'changed_when' not in task\n and 'when' not in task\n and 'creates' not in task['action']\n and 'removes' not in task['action']\n )\n return False\n", "path": "src/ansiblelint/rules/CommandHasChangesCheckRule.py"}], "after_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom typing import TYPE_CHECKING, Any, Dict, Union\n\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from typing import Optional\n\n from ansiblelint.file_utils import Lintable\n\n\nclass CommandHasChangesCheckRule(AnsibleLintRule):\n id = 'no-changed-when'\n shortdesc = 'Commands should not change things if nothing needs doing'\n description = (\n 'Commands should either read information (and thus set '\n '``changed_when``) or not do something if it has already been '\n 'done (using creates/removes) or only do it if another '\n 'check has a particular result (``when``)'\n )\n severity = 'HIGH'\n tags = ['command-shell', 'idempotency']\n version_added = 'historic'\n\n _commands = ['command', 'shell', 'raw']\n\n def matchtask(\n self, task: Dict[str, Any], file: 'Optional[Lintable]' = None\n ) -> Union[bool, str]:\n if task[\"__ansible_action_type__\"] == 'task':\n if task[\"action\"][\"__ansible_module__\"] in self._commands:\n return (\n 'changed_when' not in task\n and 'when' not in task\n and 'creates' not in task['action']\n and 'removes' not in task['action']\n and not ('async' in task and task.get('poll') == 0)\n )\n return False\n", "path": "src/ansiblelint/rules/CommandHasChangesCheckRule.py"}]}
| 1,675 | 129 |
gh_patches_debug_42365
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-3660
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document LogFormatter
Currently, the `LogFormatter` class is only mentioned in the [Release notes](https://docs.scrapy.org/en/latest/news.html) page of the documentation. This class should be properly documented, both its API members and a small section introducing it on the documentation page about [Logging](https://docs.scrapy.org/en/latest/topics/logging.html).
The responses to [Scrapy - Silently drop an item](https://stackoverflow.com/q/13527921/939364) in StackOverflow would be a good starting point.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/logformatter.py`
Content:
```
1 import os
2 import logging
3
4 from twisted.python.failure import Failure
5
6 from scrapy.utils.request import referer_str
7
8 SCRAPEDMSG = u"Scraped from %(src)s" + os.linesep + "%(item)s"
9 DROPPEDMSG = u"Dropped: %(exception)s" + os.linesep + "%(item)s"
10 CRAWLEDMSG = u"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s"
11
12
13 class LogFormatter(object):
14 """Class for generating log messages for different actions.
15
16 All methods must return a dictionary listing the parameters ``level``,
17 ``msg`` and ``args`` which are going to be used for constructing the log
18 message when calling logging.log.
19
20 Dictionary keys for the method outputs:
21 * ``level`` should be the log level for that action, you can use those
22 from the python logging library: logging.DEBUG, logging.INFO,
23 logging.WARNING, logging.ERROR and logging.CRITICAL.
24
25 * ``msg`` should be a string that can contain different formatting
26 placeholders. This string, formatted with the provided ``args``, is
27 going to be the log message for that action.
28
29 * ``args`` should be a tuple or dict with the formatting placeholders
30 for ``msg``. The final log message is computed as output['msg'] %
31 output['args'].
32 """
33
34 def crawled(self, request, response, spider):
35 request_flags = ' %s' % str(request.flags) if request.flags else ''
36 response_flags = ' %s' % str(response.flags) if response.flags else ''
37 return {
38 'level': logging.DEBUG,
39 'msg': CRAWLEDMSG,
40 'args': {
41 'status': response.status,
42 'request': request,
43 'request_flags' : request_flags,
44 'referer': referer_str(request),
45 'response_flags': response_flags,
46 # backward compatibility with Scrapy logformatter below 1.4 version
47 'flags': response_flags
48 }
49 }
50
51 def scraped(self, item, response, spider):
52 if isinstance(response, Failure):
53 src = response.getErrorMessage()
54 else:
55 src = response
56 return {
57 'level': logging.DEBUG,
58 'msg': SCRAPEDMSG,
59 'args': {
60 'src': src,
61 'item': item,
62 }
63 }
64
65 def dropped(self, item, exception, response, spider):
66 return {
67 'level': logging.WARNING,
68 'msg': DROPPEDMSG,
69 'args': {
70 'exception': exception,
71 'item': item,
72 }
73 }
74
75 @classmethod
76 def from_crawler(cls, crawler):
77 return cls()
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py
--- a/scrapy/logformatter.py
+++ b/scrapy/logformatter.py
@@ -12,26 +12,40 @@
class LogFormatter(object):
"""Class for generating log messages for different actions.
-
- All methods must return a dictionary listing the parameters ``level``,
- ``msg`` and ``args`` which are going to be used for constructing the log
- message when calling logging.log.
+
+ All methods must return a dictionary listing the parameters ``level``, ``msg``
+ and ``args`` which are going to be used for constructing the log message when
+ calling ``logging.log``.
Dictionary keys for the method outputs:
- * ``level`` should be the log level for that action, you can use those
- from the python logging library: logging.DEBUG, logging.INFO,
- logging.WARNING, logging.ERROR and logging.CRITICAL.
- * ``msg`` should be a string that can contain different formatting
- placeholders. This string, formatted with the provided ``args``, is
- going to be the log message for that action.
+ * ``level`` is the log level for that action, you can use those from the
+ `python logging library <https://docs.python.org/3/library/logging.html>`_ :
+ ``logging.DEBUG``, ``logging.INFO``, ``logging.WARNING``, ``logging.ERROR``
+ and ``logging.CRITICAL``.
+ * ``msg`` should be a string that can contain different formatting placeholders.
+ This string, formatted with the provided ``args``, is going to be the long message
+ for that action.
+ * ``args`` should be a tuple or dict with the formatting placeholders for ``msg``.
+ The final log message is computed as ``msg % args``.
- * ``args`` should be a tuple or dict with the formatting placeholders
- for ``msg``. The final log message is computed as output['msg'] %
- output['args'].
- """
+ Here is an example on how to create a custom log formatter to lower the severity level of
+ the log message when an item is dropped from the pipeline::
+ class PoliteLogFormatter(logformatter.LogFormatter):
+ def dropped(self, item, exception, response, spider):
+ return {
+ 'level': logging.INFO, # lowering the level from logging.WARNING
+ 'msg': u"Dropped: %(exception)s" + os.linesep + "%(item)s",
+ 'args': {
+ 'exception': exception,
+ 'item': item,
+ }
+ }
+ """
+
def crawled(self, request, response, spider):
+ """Logs a message when the crawler finds a webpage."""
request_flags = ' %s' % str(request.flags) if request.flags else ''
response_flags = ' %s' % str(response.flags) if response.flags else ''
return {
@@ -40,7 +54,7 @@
'args': {
'status': response.status,
'request': request,
- 'request_flags' : request_flags,
+ 'request_flags': request_flags,
'referer': referer_str(request),
'response_flags': response_flags,
# backward compatibility with Scrapy logformatter below 1.4 version
@@ -49,6 +63,7 @@
}
def scraped(self, item, response, spider):
+ """Logs a message when an item is scraped by a spider."""
if isinstance(response, Failure):
src = response.getErrorMessage()
else:
@@ -63,6 +78,7 @@
}
def dropped(self, item, exception, response, spider):
+ """Logs a message when an item is dropped while it is passing through the item pipeline."""
return {
'level': logging.WARNING,
'msg': DROPPEDMSG,
|
{"golden_diff": "diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py\n--- a/scrapy/logformatter.py\n+++ b/scrapy/logformatter.py\n@@ -12,26 +12,40 @@\n \n class LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n-\n- All methods must return a dictionary listing the parameters ``level``,\n- ``msg`` and ``args`` which are going to be used for constructing the log\n- message when calling logging.log.\n+ \n+ All methods must return a dictionary listing the parameters ``level``, ``msg``\n+ and ``args`` which are going to be used for constructing the log message when\n+ calling ``logging.log``.\n \n Dictionary keys for the method outputs:\n- * ``level`` should be the log level for that action, you can use those\n- from the python logging library: logging.DEBUG, logging.INFO,\n- logging.WARNING, logging.ERROR and logging.CRITICAL.\n \n- * ``msg`` should be a string that can contain different formatting\n- placeholders. This string, formatted with the provided ``args``, is\n- going to be the log message for that action.\n+ * ``level`` is the log level for that action, you can use those from the\n+ `python logging library <https://docs.python.org/3/library/logging.html>`_ :\n+ ``logging.DEBUG``, ``logging.INFO``, ``logging.WARNING``, ``logging.ERROR``\n+ and ``logging.CRITICAL``.\n+ * ``msg`` should be a string that can contain different formatting placeholders.\n+ This string, formatted with the provided ``args``, is going to be the long message\n+ for that action.\n+ * ``args`` should be a tuple or dict with the formatting placeholders for ``msg``.\n+ The final log message is computed as ``msg % args``.\n \n- * ``args`` should be a tuple or dict with the formatting placeholders\n- for ``msg``. The final log message is computed as output['msg'] %\n- output['args'].\n- \"\"\"\n+ Here is an example on how to create a custom log formatter to lower the severity level of\n+ the log message when an item is dropped from the pipeline::\n \n+ class PoliteLogFormatter(logformatter.LogFormatter):\n+ def dropped(self, item, exception, response, spider):\n+ return {\n+ 'level': logging.INFO, # lowering the level from logging.WARNING\n+ 'msg': u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\",\n+ 'args': {\n+ 'exception': exception,\n+ 'item': item,\n+ }\n+ }\n+ \"\"\"\n+ \n def crawled(self, request, response, spider):\n+ \"\"\"Logs a message when the crawler finds a webpage.\"\"\"\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n@@ -40,7 +54,7 @@\n 'args': {\n 'status': response.status,\n 'request': request,\n- 'request_flags' : request_flags,\n+ 'request_flags': request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n # backward compatibility with Scrapy logformatter below 1.4 version\n@@ -49,6 +63,7 @@\n }\n \n def scraped(self, item, response, spider):\n+ \"\"\"Logs a message when an item is scraped by a spider.\"\"\"\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n@@ -63,6 +78,7 @@\n }\n \n def dropped(self, item, exception, response, spider):\n+ \"\"\"Logs a message when an item is dropped while it is passing through the item pipeline.\"\"\"\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n", "issue": "Document LogFormatter\nCurrently, the `LogFormatter` class is only mentioned in the [Release notes](https://docs.scrapy.org/en/latest/news.html) page of the documentation. This class should be properly documented, both its API members and a small section introducing it on the documentation page about [Logging](https://docs.scrapy.org/en/latest/topics/logging.html).\r\n\r\nThe responses to [Scrapy - Silently drop an item](https://stackoverflow.com/q/13527921/939364) in StackOverflow would be a good starting point.\n", "before_files": [{"content": "import os\nimport logging\n\nfrom twisted.python.failure import Failure\n\nfrom scrapy.utils.request import referer_str\n\nSCRAPEDMSG = u\"Scraped from %(src)s\" + os.linesep + \"%(item)s\"\nDROPPEDMSG = u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\"\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\"\n\n\nclass LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n\n All methods must return a dictionary listing the parameters ``level``,\n ``msg`` and ``args`` which are going to be used for constructing the log\n message when calling logging.log.\n\n Dictionary keys for the method outputs:\n * ``level`` should be the log level for that action, you can use those\n from the python logging library: logging.DEBUG, logging.INFO,\n logging.WARNING, logging.ERROR and logging.CRITICAL.\n\n * ``msg`` should be a string that can contain different formatting\n placeholders. This string, formatted with the provided ``args``, is\n going to be the log message for that action.\n\n * ``args`` should be a tuple or dict with the formatting placeholders\n for ``msg``. The final log message is computed as output['msg'] %\n output['args'].\n \"\"\"\n\n def crawled(self, request, response, spider):\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n 'level': logging.DEBUG,\n 'msg': CRAWLEDMSG,\n 'args': {\n 'status': response.status,\n 'request': request,\n 'request_flags' : request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n # backward compatibility with Scrapy logformatter below 1.4 version\n 'flags': response_flags\n }\n }\n\n def scraped(self, item, response, spider):\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n src = response\n return {\n 'level': logging.DEBUG,\n 'msg': SCRAPEDMSG,\n 'args': {\n 'src': src,\n 'item': item,\n }\n }\n\n def dropped(self, item, exception, response, spider):\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls()\n", "path": "scrapy/logformatter.py"}], "after_files": [{"content": "import os\nimport logging\n\nfrom twisted.python.failure import Failure\n\nfrom scrapy.utils.request import referer_str\n\nSCRAPEDMSG = u\"Scraped from %(src)s\" + os.linesep + \"%(item)s\"\nDROPPEDMSG = u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\"\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\"\n\n\nclass LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n \n All methods must return a dictionary listing the parameters ``level``, ``msg``\n and ``args`` which are going to be used for constructing the log message when\n calling ``logging.log``.\n\n Dictionary keys for the method outputs:\n\n * ``level`` is the log level for that action, you can use those from the\n `python logging library <https://docs.python.org/3/library/logging.html>`_ :\n ``logging.DEBUG``, ``logging.INFO``, ``logging.WARNING``, ``logging.ERROR``\n and ``logging.CRITICAL``.\n * ``msg`` should be a string that can contain different formatting placeholders.\n This string, formatted with the provided ``args``, is going to be the long message\n for that action.\n * ``args`` should be a tuple or dict with the formatting placeholders for ``msg``.\n The final log message is computed as ``msg % args``.\n\n Here is an example on how to create a custom log formatter to lower the severity level of\n the log message when an item is dropped from the pipeline::\n\n class PoliteLogFormatter(logformatter.LogFormatter):\n def dropped(self, item, exception, response, spider):\n return {\n 'level': logging.INFO, # lowering the level from logging.WARNING\n 'msg': u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\",\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n \"\"\"\n \n def crawled(self, request, response, spider):\n \"\"\"Logs a message when the crawler finds a webpage.\"\"\"\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n 'level': logging.DEBUG,\n 'msg': CRAWLEDMSG,\n 'args': {\n 'status': response.status,\n 'request': request,\n 'request_flags': request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n # backward compatibility with Scrapy logformatter below 1.4 version\n 'flags': response_flags\n }\n }\n\n def scraped(self, item, response, spider):\n \"\"\"Logs a message when an item is scraped by a spider.\"\"\"\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n src = response\n return {\n 'level': logging.DEBUG,\n 'msg': SCRAPEDMSG,\n 'args': {\n 'src': src,\n 'item': item,\n }\n }\n\n def dropped(self, item, exception, response, spider):\n \"\"\"Logs a message when an item is dropped while it is passing through the item pipeline.\"\"\"\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls()\n", "path": "scrapy/logformatter.py"}]}
| 1,115 | 858 |
gh_patches_debug_36614
|
rasdani/github-patches
|
git_diff
|
catalyst-team__catalyst-685
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SupervisedWandbRunner logs wrong number of epochs to WandB
**Describe the bug**
Catalyst 20.02.4
`WandbRunner` is logging wrong number of epochs to WandB
**To Reproduce**
Steps to reproduce the behavior:
```
from catalyst import dl
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
class Projector(nn.Module):
def __init__(self, input_size):
super().__init__()
self.linear = nn.Linear(input_size, 1)
def forward(self, X):
return self.linear(X).squeeze(-1)
X = torch.rand(16, 10)
y = torch.rand(X.shape[0])
model = Projector(X.shape[1])
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=8)
runner = dl.SupervisedWandbRunner()
runner.train(
model=model,
loaders={
"train": loader,
"valid": loader
},
criterion=nn.MSELoss(),
optimizer=optim.Adam(model.parameters()),
logdir="log_xxx_000",
monitoring_params={
"project": "wandb_wrong_epochs"
},
num_epochs=10
)
```
**Expected behavior**
In WandB I see two plots with `MSELoss` with exactly 10 epochs
**Actual behaviour**
In WandB I see two plots with `MSELoss` with 20 epochs (but each has exactly 10 dots)
**Screenshots**
Look on number of steps. It has 20 steps. But should have 10.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `catalyst/contrib/dl/runner/wandb.py`
Content:
```
1 from typing import Dict, List # isort:skip
2 from pathlib import Path
3 import shutil
4
5 import wandb
6
7 from catalyst.dl import utils
8 from catalyst.dl.core import Experiment, Runner
9 from catalyst.dl.experiment import ConfigExperiment
10 from catalyst.dl.runner import SupervisedRunner
11
12
13 class WandbRunner(Runner):
14 """
15 Runner wrapper with wandb integration hooks.
16 """
17 @staticmethod
18 def _log_metrics(metrics: Dict, mode: str, suffix: str = ""):
19 def key_locate(key: str):
20 """
21 Wandb uses first symbol _ for it service purposes
22 because of that fact, we can not send original metric names
23
24 Args:
25 key: metric name
26 Returns:
27 formatted metric name
28 """
29 if key.startswith("_"):
30 return key[1:]
31 return key
32
33 metrics = {
34 f"{key_locate(key)}/{mode}{suffix}": value
35 for key, value in metrics.items()
36 }
37 wandb.log(metrics)
38
39 def _init(
40 self,
41 log_on_batch_end: bool = False,
42 log_on_epoch_end: bool = True,
43 checkpoints_glob: List = None,
44 ):
45 super()._init()
46 self.log_on_batch_end = log_on_batch_end
47 self.log_on_epoch_end = log_on_epoch_end
48 self.checkpoints_glob = checkpoints_glob
49
50 if (self.log_on_batch_end and not self.log_on_epoch_end) \
51 or (not self.log_on_batch_end and self.log_on_epoch_end):
52 self.batch_log_suffix = ""
53 self.epoch_log_suffix = ""
54 else:
55 self.batch_log_suffix = "_batch"
56 self.epoch_log_suffix = "_epoch"
57
58 def _pre_experiment_hook(self, experiment: Experiment):
59 monitoring_params = experiment.monitoring_params
60 monitoring_params["dir"] = str(Path(experiment.logdir).absolute())
61
62 log_on_batch_end: bool = \
63 monitoring_params.pop("log_on_batch_end", False)
64 log_on_epoch_end: bool = \
65 monitoring_params.pop("log_on_epoch_end", True)
66 checkpoints_glob: List[str] = \
67 monitoring_params.pop("checkpoints_glob", [])
68 self._init(
69 log_on_batch_end=log_on_batch_end,
70 log_on_epoch_end=log_on_epoch_end,
71 checkpoints_glob=checkpoints_glob,
72 )
73 if isinstance(experiment, ConfigExperiment):
74 exp_config = utils.flatten_dict(experiment.stages_config)
75 wandb.init(**monitoring_params, config=exp_config)
76 else:
77 wandb.init(**monitoring_params)
78
79 def _post_experiment_hook(self, experiment: Experiment):
80 # @TODO: add params for artefacts logging
81 logdir_src = Path(experiment.logdir)
82 # logdir_dst = wandb.run.dir
83 #
84 # exclude = ["wandb", "checkpoints"]
85 # logdir_files = list(logdir_src.glob("*"))
86 # logdir_files = list(
87 # filter(
88 # lambda x: all(z not in str(x) for z in exclude), logdir_files
89 # )
90 # )
91 #
92 # for subdir in logdir_files:
93 # if subdir.is_dir():
94 # os.makedirs(f"{logdir_dst}/{subdir.name}", exist_ok=True)
95 # shutil.rmtree(f"{logdir_dst}/{subdir.name}")
96 # shutil.copytree(
97 # f"{str(subdir.absolute())}",
98 # f"{logdir_dst}/{subdir.name}"
99 # )
100 # else:
101 # shutil.copy2(
102 # f"{str(subdir.absolute())}",
103 # f"{logdir_dst}/{subdir.name}"
104 # )
105 #
106 checkpoints_src = logdir_src.joinpath("checkpoints")
107 checkpoints_dst = Path(wandb.run.dir).joinpath("checkpoints")
108 # os.makedirs(checkpoints_dst, exist_ok=True)
109
110 checkpoint_paths = []
111 for glob in self.checkpoints_glob:
112 checkpoint_paths.extend(list(checkpoints_src.glob(glob)))
113 checkpoint_paths = list(set(checkpoint_paths))
114 for checkpoint_path in checkpoint_paths:
115 shutil.copy2(
116 f"{str(checkpoint_path.absolute())}",
117 f"{checkpoints_dst}/{checkpoint_path.name}"
118 )
119
120 def _run_batch(self, batch):
121 super()._run_batch(batch=batch)
122 if self.log_on_batch_end:
123 mode = self.state.loader_name
124 metrics = self.state.batch_metrics
125 self._log_metrics(
126 metrics=metrics, mode=mode, suffix=self.batch_log_suffix
127 )
128
129 def _run_epoch(self, stage: str, epoch: int):
130 super()._run_epoch(stage=stage, epoch=epoch)
131 if self.log_on_epoch_end:
132 mode_metrics = utils.split_dict_to_subdicts(
133 dct=self.state.epoch_metrics,
134 prefixes=list(self.state.loaders.keys()),
135 extra_key="_base",
136 )
137 for mode, metrics in mode_metrics.items():
138 self._log_metrics(
139 metrics=metrics, mode=mode, suffix=self.epoch_log_suffix
140 )
141
142 def run_experiment(self, experiment: Experiment):
143 self._pre_experiment_hook(experiment=experiment)
144 super().run_experiment(experiment=experiment)
145 self._post_experiment_hook(experiment=experiment)
146
147
148 class SupervisedWandbRunner(WandbRunner, SupervisedRunner):
149 pass
150
151
152 __all__ = ["WandbRunner", "SupervisedWandbRunner"]
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/catalyst/contrib/dl/runner/wandb.py b/catalyst/contrib/dl/runner/wandb.py
--- a/catalyst/contrib/dl/runner/wandb.py
+++ b/catalyst/contrib/dl/runner/wandb.py
@@ -15,7 +15,9 @@
Runner wrapper with wandb integration hooks.
"""
@staticmethod
- def _log_metrics(metrics: Dict, mode: str, suffix: str = ""):
+ def _log_metrics(
+ metrics: Dict, mode: str, suffix: str = "", commit: bool = True
+ ):
def key_locate(key: str):
"""
Wandb uses first symbol _ for it service purposes
@@ -34,7 +36,7 @@
f"{key_locate(key)}/{mode}{suffix}": value
for key, value in metrics.items()
}
- wandb.log(metrics)
+ wandb.log(metrics, commit=commit)
def _init(
self,
@@ -123,7 +125,10 @@
mode = self.state.loader_name
metrics = self.state.batch_metrics
self._log_metrics(
- metrics=metrics, mode=mode, suffix=self.batch_log_suffix
+ metrics=metrics,
+ mode=mode,
+ suffix=self.batch_log_suffix,
+ commit=True
)
def _run_epoch(self, stage: str, epoch: int):
@@ -136,17 +141,26 @@
)
for mode, metrics in mode_metrics.items():
self._log_metrics(
- metrics=metrics, mode=mode, suffix=self.epoch_log_suffix
+ metrics=metrics,
+ mode=mode,
+ suffix=self.epoch_log_suffix,
+ commit=False
)
+ wandb.log(commit=True)
def run_experiment(self, experiment: Experiment):
+ """Starts experiment
+
+ Args:
+ experiment (Experiment): experiment class
+ """
self._pre_experiment_hook(experiment=experiment)
super().run_experiment(experiment=experiment)
self._post_experiment_hook(experiment=experiment)
class SupervisedWandbRunner(WandbRunner, SupervisedRunner):
- pass
+ """SupervisedRunner with WandB"""
__all__ = ["WandbRunner", "SupervisedWandbRunner"]
|
{"golden_diff": "diff --git a/catalyst/contrib/dl/runner/wandb.py b/catalyst/contrib/dl/runner/wandb.py\n--- a/catalyst/contrib/dl/runner/wandb.py\n+++ b/catalyst/contrib/dl/runner/wandb.py\n@@ -15,7 +15,9 @@\n Runner wrapper with wandb integration hooks.\n \"\"\"\n @staticmethod\n- def _log_metrics(metrics: Dict, mode: str, suffix: str = \"\"):\n+ def _log_metrics(\n+ metrics: Dict, mode: str, suffix: str = \"\", commit: bool = True\n+ ):\n def key_locate(key: str):\n \"\"\"\n Wandb uses first symbol _ for it service purposes\n@@ -34,7 +36,7 @@\n f\"{key_locate(key)}/{mode}{suffix}\": value\n for key, value in metrics.items()\n }\n- wandb.log(metrics)\n+ wandb.log(metrics, commit=commit)\n \n def _init(\n self,\n@@ -123,7 +125,10 @@\n mode = self.state.loader_name\n metrics = self.state.batch_metrics\n self._log_metrics(\n- metrics=metrics, mode=mode, suffix=self.batch_log_suffix\n+ metrics=metrics,\n+ mode=mode,\n+ suffix=self.batch_log_suffix,\n+ commit=True\n )\n \n def _run_epoch(self, stage: str, epoch: int):\n@@ -136,17 +141,26 @@\n )\n for mode, metrics in mode_metrics.items():\n self._log_metrics(\n- metrics=metrics, mode=mode, suffix=self.epoch_log_suffix\n+ metrics=metrics,\n+ mode=mode,\n+ suffix=self.epoch_log_suffix,\n+ commit=False\n )\n+ wandb.log(commit=True)\n \n def run_experiment(self, experiment: Experiment):\n+ \"\"\"Starts experiment\n+\n+ Args:\n+ experiment (Experiment): experiment class\n+ \"\"\"\n self._pre_experiment_hook(experiment=experiment)\n super().run_experiment(experiment=experiment)\n self._post_experiment_hook(experiment=experiment)\n \n \n class SupervisedWandbRunner(WandbRunner, SupervisedRunner):\n- pass\n+ \"\"\"SupervisedRunner with WandB\"\"\"\n \n \n __all__ = [\"WandbRunner\", \"SupervisedWandbRunner\"]\n", "issue": "SupervisedWandbRunner logs wrong number of epochs to WandB\n**Describe the bug**\r\nCatalyst 20.02.4\r\n`WandbRunner` is logging wrong number of epochs to WandB\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n```\r\nfrom catalyst import dl\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.utils.data import DataLoader, TensorDataset\r\n\r\nclass Projector(nn.Module):\r\n def __init__(self, input_size):\r\n super().__init__()\r\n self.linear = nn.Linear(input_size, 1)\r\n \r\n def forward(self, X):\r\n return self.linear(X).squeeze(-1)\r\n\r\nX = torch.rand(16, 10)\r\ny = torch.rand(X.shape[0])\r\nmodel = Projector(X.shape[1])\r\ndataset = TensorDataset(X, y)\r\nloader = DataLoader(dataset, batch_size=8)\r\nrunner = dl.SupervisedWandbRunner()\r\n\r\nrunner.train(\r\n model=model,\r\n loaders={\r\n \"train\": loader,\r\n \"valid\": loader\r\n },\r\n criterion=nn.MSELoss(),\r\n optimizer=optim.Adam(model.parameters()),\r\n logdir=\"log_xxx_000\",\r\n monitoring_params={\r\n \"project\": \"wandb_wrong_epochs\"\r\n },\r\n num_epochs=10\r\n)\r\n```\r\n\r\n**Expected behavior**\r\nIn WandB I see two plots with `MSELoss` with exactly 10 epochs\r\n\r\n**Actual behaviour**\r\nIn WandB I see two plots with `MSELoss` with 20 epochs (but each has exactly 10 dots)\r\n\r\n**Screenshots**\r\nLook on number of steps. It has 20 steps. But should have 10.\r\n\r\n\n", "before_files": [{"content": "from typing import Dict, List # isort:skip\nfrom pathlib import Path\nimport shutil\n\nimport wandb\n\nfrom catalyst.dl import utils\nfrom catalyst.dl.core import Experiment, Runner\nfrom catalyst.dl.experiment import ConfigExperiment\nfrom catalyst.dl.runner import SupervisedRunner\n\n\nclass WandbRunner(Runner):\n \"\"\"\n Runner wrapper with wandb integration hooks.\n \"\"\"\n @staticmethod\n def _log_metrics(metrics: Dict, mode: str, suffix: str = \"\"):\n def key_locate(key: str):\n \"\"\"\n Wandb uses first symbol _ for it service purposes\n because of that fact, we can not send original metric names\n\n Args:\n key: metric name\n Returns:\n formatted metric name\n \"\"\"\n if key.startswith(\"_\"):\n return key[1:]\n return key\n\n metrics = {\n f\"{key_locate(key)}/{mode}{suffix}\": value\n for key, value in metrics.items()\n }\n wandb.log(metrics)\n\n def _init(\n self,\n log_on_batch_end: bool = False,\n log_on_epoch_end: bool = True,\n checkpoints_glob: List = None,\n ):\n super()._init()\n self.log_on_batch_end = log_on_batch_end\n self.log_on_epoch_end = log_on_epoch_end\n self.checkpoints_glob = checkpoints_glob\n\n if (self.log_on_batch_end and not self.log_on_epoch_end) \\\n or (not self.log_on_batch_end and self.log_on_epoch_end):\n self.batch_log_suffix = \"\"\n self.epoch_log_suffix = \"\"\n else:\n self.batch_log_suffix = \"_batch\"\n self.epoch_log_suffix = \"_epoch\"\n\n def _pre_experiment_hook(self, experiment: Experiment):\n monitoring_params = experiment.monitoring_params\n monitoring_params[\"dir\"] = str(Path(experiment.logdir).absolute())\n\n log_on_batch_end: bool = \\\n monitoring_params.pop(\"log_on_batch_end\", False)\n log_on_epoch_end: bool = \\\n monitoring_params.pop(\"log_on_epoch_end\", True)\n checkpoints_glob: List[str] = \\\n monitoring_params.pop(\"checkpoints_glob\", [])\n self._init(\n log_on_batch_end=log_on_batch_end,\n log_on_epoch_end=log_on_epoch_end,\n checkpoints_glob=checkpoints_glob,\n )\n if isinstance(experiment, ConfigExperiment):\n exp_config = utils.flatten_dict(experiment.stages_config)\n wandb.init(**monitoring_params, config=exp_config)\n else:\n wandb.init(**monitoring_params)\n\n def _post_experiment_hook(self, experiment: Experiment):\n # @TODO: add params for artefacts logging\n logdir_src = Path(experiment.logdir)\n # logdir_dst = wandb.run.dir\n #\n # exclude = [\"wandb\", \"checkpoints\"]\n # logdir_files = list(logdir_src.glob(\"*\"))\n # logdir_files = list(\n # filter(\n # lambda x: all(z not in str(x) for z in exclude), logdir_files\n # )\n # )\n #\n # for subdir in logdir_files:\n # if subdir.is_dir():\n # os.makedirs(f\"{logdir_dst}/{subdir.name}\", exist_ok=True)\n # shutil.rmtree(f\"{logdir_dst}/{subdir.name}\")\n # shutil.copytree(\n # f\"{str(subdir.absolute())}\",\n # f\"{logdir_dst}/{subdir.name}\"\n # )\n # else:\n # shutil.copy2(\n # f\"{str(subdir.absolute())}\",\n # f\"{logdir_dst}/{subdir.name}\"\n # )\n #\n checkpoints_src = logdir_src.joinpath(\"checkpoints\")\n checkpoints_dst = Path(wandb.run.dir).joinpath(\"checkpoints\")\n # os.makedirs(checkpoints_dst, exist_ok=True)\n\n checkpoint_paths = []\n for glob in self.checkpoints_glob:\n checkpoint_paths.extend(list(checkpoints_src.glob(glob)))\n checkpoint_paths = list(set(checkpoint_paths))\n for checkpoint_path in checkpoint_paths:\n shutil.copy2(\n f\"{str(checkpoint_path.absolute())}\",\n f\"{checkpoints_dst}/{checkpoint_path.name}\"\n )\n\n def _run_batch(self, batch):\n super()._run_batch(batch=batch)\n if self.log_on_batch_end:\n mode = self.state.loader_name\n metrics = self.state.batch_metrics\n self._log_metrics(\n metrics=metrics, mode=mode, suffix=self.batch_log_suffix\n )\n\n def _run_epoch(self, stage: str, epoch: int):\n super()._run_epoch(stage=stage, epoch=epoch)\n if self.log_on_epoch_end:\n mode_metrics = utils.split_dict_to_subdicts(\n dct=self.state.epoch_metrics,\n prefixes=list(self.state.loaders.keys()),\n extra_key=\"_base\",\n )\n for mode, metrics in mode_metrics.items():\n self._log_metrics(\n metrics=metrics, mode=mode, suffix=self.epoch_log_suffix\n )\n\n def run_experiment(self, experiment: Experiment):\n self._pre_experiment_hook(experiment=experiment)\n super().run_experiment(experiment=experiment)\n self._post_experiment_hook(experiment=experiment)\n\n\nclass SupervisedWandbRunner(WandbRunner, SupervisedRunner):\n pass\n\n\n__all__ = [\"WandbRunner\", \"SupervisedWandbRunner\"]\n", "path": "catalyst/contrib/dl/runner/wandb.py"}], "after_files": [{"content": "from typing import Dict, List # isort:skip\nfrom pathlib import Path\nimport shutil\n\nimport wandb\n\nfrom catalyst.dl import utils\nfrom catalyst.dl.core import Experiment, Runner\nfrom catalyst.dl.experiment import ConfigExperiment\nfrom catalyst.dl.runner import SupervisedRunner\n\n\nclass WandbRunner(Runner):\n \"\"\"\n Runner wrapper with wandb integration hooks.\n \"\"\"\n @staticmethod\n def _log_metrics(\n metrics: Dict, mode: str, suffix: str = \"\", commit: bool = True\n ):\n def key_locate(key: str):\n \"\"\"\n Wandb uses first symbol _ for it service purposes\n because of that fact, we can not send original metric names\n\n Args:\n key: metric name\n Returns:\n formatted metric name\n \"\"\"\n if key.startswith(\"_\"):\n return key[1:]\n return key\n\n metrics = {\n f\"{key_locate(key)}/{mode}{suffix}\": value\n for key, value in metrics.items()\n }\n wandb.log(metrics, commit=commit)\n\n def _init(\n self,\n log_on_batch_end: bool = False,\n log_on_epoch_end: bool = True,\n checkpoints_glob: List = None,\n ):\n super()._init()\n self.log_on_batch_end = log_on_batch_end\n self.log_on_epoch_end = log_on_epoch_end\n self.checkpoints_glob = checkpoints_glob\n\n if (self.log_on_batch_end and not self.log_on_epoch_end) \\\n or (not self.log_on_batch_end and self.log_on_epoch_end):\n self.batch_log_suffix = \"\"\n self.epoch_log_suffix = \"\"\n else:\n self.batch_log_suffix = \"_batch\"\n self.epoch_log_suffix = \"_epoch\"\n\n def _pre_experiment_hook(self, experiment: Experiment):\n monitoring_params = experiment.monitoring_params\n monitoring_params[\"dir\"] = str(Path(experiment.logdir).absolute())\n\n log_on_batch_end: bool = \\\n monitoring_params.pop(\"log_on_batch_end\", False)\n log_on_epoch_end: bool = \\\n monitoring_params.pop(\"log_on_epoch_end\", True)\n checkpoints_glob: List[str] = \\\n monitoring_params.pop(\"checkpoints_glob\", [])\n self._init(\n log_on_batch_end=log_on_batch_end,\n log_on_epoch_end=log_on_epoch_end,\n checkpoints_glob=checkpoints_glob,\n )\n if isinstance(experiment, ConfigExperiment):\n exp_config = utils.flatten_dict(experiment.stages_config)\n wandb.init(**monitoring_params, config=exp_config)\n else:\n wandb.init(**monitoring_params)\n\n def _post_experiment_hook(self, experiment: Experiment):\n # @TODO: add params for artefacts logging\n logdir_src = Path(experiment.logdir)\n # logdir_dst = wandb.run.dir\n #\n # exclude = [\"wandb\", \"checkpoints\"]\n # logdir_files = list(logdir_src.glob(\"*\"))\n # logdir_files = list(\n # filter(\n # lambda x: all(z not in str(x) for z in exclude), logdir_files\n # )\n # )\n #\n # for subdir in logdir_files:\n # if subdir.is_dir():\n # os.makedirs(f\"{logdir_dst}/{subdir.name}\", exist_ok=True)\n # shutil.rmtree(f\"{logdir_dst}/{subdir.name}\")\n # shutil.copytree(\n # f\"{str(subdir.absolute())}\",\n # f\"{logdir_dst}/{subdir.name}\"\n # )\n # else:\n # shutil.copy2(\n # f\"{str(subdir.absolute())}\",\n # f\"{logdir_dst}/{subdir.name}\"\n # )\n #\n checkpoints_src = logdir_src.joinpath(\"checkpoints\")\n checkpoints_dst = Path(wandb.run.dir).joinpath(\"checkpoints\")\n # os.makedirs(checkpoints_dst, exist_ok=True)\n\n checkpoint_paths = []\n for glob in self.checkpoints_glob:\n checkpoint_paths.extend(list(checkpoints_src.glob(glob)))\n checkpoint_paths = list(set(checkpoint_paths))\n for checkpoint_path in checkpoint_paths:\n shutil.copy2(\n f\"{str(checkpoint_path.absolute())}\",\n f\"{checkpoints_dst}/{checkpoint_path.name}\"\n )\n\n def _run_batch(self, batch):\n super()._run_batch(batch=batch)\n if self.log_on_batch_end:\n mode = self.state.loader_name\n metrics = self.state.batch_metrics\n self._log_metrics(\n metrics=metrics,\n mode=mode,\n suffix=self.batch_log_suffix,\n commit=True\n )\n\n def _run_epoch(self, stage: str, epoch: int):\n super()._run_epoch(stage=stage, epoch=epoch)\n if self.log_on_epoch_end:\n mode_metrics = utils.split_dict_to_subdicts(\n dct=self.state.epoch_metrics,\n prefixes=list(self.state.loaders.keys()),\n extra_key=\"_base\",\n )\n for mode, metrics in mode_metrics.items():\n self._log_metrics(\n metrics=metrics,\n mode=mode,\n suffix=self.epoch_log_suffix,\n commit=False\n )\n wandb.log(commit=True)\n\n def run_experiment(self, experiment: Experiment):\n \"\"\"Starts experiment\n\n Args:\n experiment (Experiment): experiment class\n \"\"\"\n self._pre_experiment_hook(experiment=experiment)\n super().run_experiment(experiment=experiment)\n self._post_experiment_hook(experiment=experiment)\n\n\nclass SupervisedWandbRunner(WandbRunner, SupervisedRunner):\n \"\"\"SupervisedRunner with WandB\"\"\"\n\n\n__all__ = [\"WandbRunner\", \"SupervisedWandbRunner\"]\n", "path": "catalyst/contrib/dl/runner/wandb.py"}]}
| 2,210 | 529 |
gh_patches_debug_32933
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-524
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sample of image_transformer does not work
/kind bug
Sample under docs/samples/transformer/image_transformer is broken, there's python error in it.
**What steps did you take and what happened:**
[A clear and concise description of what the bug is.]
It's due to PR #492, kfmodel and kfserver is refactored now but the sample still inherit from transformer which does not exist now. Also some other symbols need be renamed.
**What did you expect to happen:**
Sample still works
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/samples/transformer/image_transformer/image_transformer/__main__.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import kfserving
16 import argparse
17 from .image_transformer import ImageTransformer
18
19 DEFAULT_MODEL_NAME = "model"
20
21 parser = argparse.ArgumentParser(parents=[kfserving.server.parser])
22 parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,
23 help='The name that the model is served under.')
24 parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)
25
26 args, _ = parser.parse_known_args()
27
28 if __name__ == "__main__":
29 transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,
30 protocol=args.protocol)
31 kfserver = kfserving.KFServer()
32 kfserver.start(models=[transformer])
33
```
Path: `docs/samples/transformer/image_transformer/image_transformer/image_transformer.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import kfserving
16 from typing import List, Dict
17 from kfserving.transformer import Transformer
18 from PIL import Image
19 import torchvision.transforms as transforms
20 import logging
21 import io
22 import numpy as np
23 import base64
24
25 logging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)
26
27 transform = transforms.Compose(
28 [transforms.ToTensor(),
29 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
30
31
32 def image_transform(instance):
33 byte_array = base64.b64decode(instance['image_bytes']['b64'])
34 image = Image.open(io.BytesIO(byte_array))
35 a = np.asarray(image)
36 im = Image.fromarray(a)
37 res = transform(im)
38 logging.info(res)
39 return res.tolist()
40
41
42 class ImageTransformer(Transformer):
43
44 def preprocess(self, inputs: Dict) -> Dict:
45 return {'instances': [image_transform(instance) for instance in inputs['instances']]}
46
47 def postprocess(self, inputs: List) -> List:
48 return inputs
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/samples/transformer/image_transformer/image_transformer/__main__.py b/docs/samples/transformer/image_transformer/image_transformer/__main__.py
--- a/docs/samples/transformer/image_transformer/image_transformer/__main__.py
+++ b/docs/samples/transformer/image_transformer/image_transformer/__main__.py
@@ -18,7 +18,7 @@
DEFAULT_MODEL_NAME = "model"
-parser = argparse.ArgumentParser(parents=[kfserving.server.parser])
+parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])
parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,
help='The name that the model is served under.')
parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)
@@ -26,7 +26,6 @@
args, _ = parser.parse_known_args()
if __name__ == "__main__":
- transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,
- protocol=args.protocol)
+ transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host)
kfserver = kfserving.KFServer()
kfserver.start(models=[transformer])
diff --git a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py
--- a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py
+++ b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py
@@ -14,7 +14,6 @@
import kfserving
from typing import List, Dict
-from kfserving.transformer import Transformer
from PIL import Image
import torchvision.transforms as transforms
import logging
@@ -39,7 +38,10 @@
return res.tolist()
-class ImageTransformer(Transformer):
+class ImageTransformer(kfserving.KFModel):
+ def __init__(self, name: str, predictor_host: str):
+ super().__init__(name)
+ self.predictor_host = predictor_host
def preprocess(self, inputs: Dict) -> Dict:
return {'instances': [image_transform(instance) for instance in inputs['instances']]}
|
{"golden_diff": "diff --git a/docs/samples/transformer/image_transformer/image_transformer/__main__.py b/docs/samples/transformer/image_transformer/image_transformer/__main__.py\n--- a/docs/samples/transformer/image_transformer/image_transformer/__main__.py\n+++ b/docs/samples/transformer/image_transformer/image_transformer/__main__.py\n@@ -18,7 +18,7 @@\n \n DEFAULT_MODEL_NAME = \"model\"\n \n-parser = argparse.ArgumentParser(parents=[kfserving.server.parser])\n+parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])\n parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\n parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)\n@@ -26,7 +26,6 @@\n args, _ = parser.parse_known_args()\n \n if __name__ == \"__main__\":\n- transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,\n- protocol=args.protocol)\n+ transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host)\n kfserver = kfserving.KFServer()\n kfserver.start(models=[transformer])\ndiff --git a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py\n--- a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py\n+++ b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py\n@@ -14,7 +14,6 @@\n \n import kfserving\n from typing import List, Dict\n-from kfserving.transformer import Transformer\n from PIL import Image\n import torchvision.transforms as transforms\n import logging\n@@ -39,7 +38,10 @@\n return res.tolist()\n \n \n-class ImageTransformer(Transformer):\n+class ImageTransformer(kfserving.KFModel):\n+ def __init__(self, name: str, predictor_host: str):\n+ super().__init__(name)\n+ self.predictor_host = predictor_host\n \n def preprocess(self, inputs: Dict) -> Dict:\n return {'instances': [image_transform(instance) for instance in inputs['instances']]}\n", "issue": "Sample of image_transformer does not work\n/kind bug\r\nSample under docs/samples/transformer/image_transformer is broken, there's python error in it.\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nIt's due to PR #492, kfmodel and kfserver is refactored now but the sample still inherit from transformer which does not exist now. Also some other symbols need be renamed.\r\n\r\n**What did you expect to happen:**\r\nSample still works\r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nimport argparse\nfrom .image_transformer import ImageTransformer\n\nDEFAULT_MODEL_NAME = \"model\"\n\nparser = argparse.ArgumentParser(parents=[kfserving.server.parser])\nparser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\nparser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)\n\nargs, _ = parser.parse_known_args()\n\nif __name__ == \"__main__\":\n transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,\n protocol=args.protocol)\n kfserver = kfserving.KFServer()\n kfserver.start(models=[transformer])\n", "path": "docs/samples/transformer/image_transformer/image_transformer/__main__.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nfrom typing import List, Dict\nfrom kfserving.transformer import Transformer\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport logging\nimport io\nimport numpy as np\nimport base64\n\nlogging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\ndef image_transform(instance):\n byte_array = base64.b64decode(instance['image_bytes']['b64'])\n image = Image.open(io.BytesIO(byte_array))\n a = np.asarray(image)\n im = Image.fromarray(a)\n res = transform(im)\n logging.info(res)\n return res.tolist()\n\n\nclass ImageTransformer(Transformer):\n\n def preprocess(self, inputs: Dict) -> Dict:\n return {'instances': [image_transform(instance) for instance in inputs['instances']]}\n\n def postprocess(self, inputs: List) -> List:\n return inputs\n", "path": "docs/samples/transformer/image_transformer/image_transformer/image_transformer.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nimport argparse\nfrom .image_transformer import ImageTransformer\n\nDEFAULT_MODEL_NAME = \"model\"\n\nparser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])\nparser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\nparser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)\n\nargs, _ = parser.parse_known_args()\n\nif __name__ == \"__main__\":\n transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host)\n kfserver = kfserving.KFServer()\n kfserver.start(models=[transformer])\n", "path": "docs/samples/transformer/image_transformer/image_transformer/__main__.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nfrom typing import List, Dict\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport logging\nimport io\nimport numpy as np\nimport base64\n\nlogging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\ndef image_transform(instance):\n byte_array = base64.b64decode(instance['image_bytes']['b64'])\n image = Image.open(io.BytesIO(byte_array))\n a = np.asarray(image)\n im = Image.fromarray(a)\n res = transform(im)\n logging.info(res)\n return res.tolist()\n\n\nclass ImageTransformer(kfserving.KFModel):\n def __init__(self, name: str, predictor_host: str):\n super().__init__(name)\n self.predictor_host = predictor_host\n\n def preprocess(self, inputs: Dict) -> Dict:\n return {'instances': [image_transform(instance) for instance in inputs['instances']]}\n\n def postprocess(self, inputs: List) -> List:\n return inputs\n", "path": "docs/samples/transformer/image_transformer/image_transformer/image_transformer.py"}]}
| 1,203 | 487 |
gh_patches_debug_22638
|
rasdani/github-patches
|
git_diff
|
hedyorg__hedy-1308
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Class names can be duplicate
**Describe the bug**
Currently, we are able to create multiple classes with an identical name. I think this is undesirable and should be prevented. Might be best to tackle this issue at the same time as #1152.
**Paste the Hedy code & level**
It is super helpful if we can copy-paste the Hedy code to test, so please paste the code here, and don't forget to tell us what level you were in.
**Add a screenshot (optional)**
Make a picture or screenshot to show the issue. Tip! You can make a screenshot and simply paste the image into GitHub with ctrl-v.
**Expected behavior**
A clear and concise description of what you expected to happen.
**What machine and browser you were using (optional)**
If the issue concerns things in the website, let us know:
- What computer you are using (Windows, Mac, Linux?)
- What browser you were using (Chrome, Edge, Safari)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/teacher.py`
Content:
```
1 from website.auth import requires_login, is_teacher, current_user
2 import utils
3 import uuid
4 from flask import g, request, jsonify, redirect
5 from flask_helpers import render_template
6 import os
7 import hedyweb
8 TRANSLATIONS = hedyweb.Translations ()
9 from config import config
10 cookie_name = config ['session'] ['cookie_name']
11
12 def routes (app, database):
13 global DATABASE
14 DATABASE = database
15
16 from app import render_main_menu
17
18 @app.route('/classes', methods=['GET'])
19 @requires_login
20 def get_classes (user):
21 if not is_teacher(user):
22 return 'Only teachers can retrieve classes', 403
23 return jsonify (DATABASE.get_teacher_classes (user ['username'], True))
24
25 @app.route('/class/<class_id>', methods=['GET'])
26 @requires_login
27 def get_class (user, class_id):
28 if not is_teacher(user):
29 return 'Only teachers can retrieve classes', 403
30 Class = DATABASE.get_class (class_id)
31 if not Class or Class ['teacher'] != user ['username']:
32 return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('no_such_class'))
33 students = []
34 for student_username in Class.get ('students', []):
35 student = DATABASE.user_by_username (student_username)
36 programs = DATABASE.programs_for_user(student_username)
37 highest_level = max(program['level'] for program in programs) if len(programs) else 0
38 sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))
39 if sorted_public_programs:
40 latest_shared = sorted_public_programs[-1]
41 latest_shared['link'] = os.getenv ('BASE_URL') + f"/hedy/{latest_shared['id']}/view"
42 else:
43 latest_shared = None
44 students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})
45
46 if utils.is_testing_request (request):
47 return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})
48 return render_template ('class-overview.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), menu=render_main_menu('my-profile'), current_page='my-profile', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})
49
50 @app.route('/class', methods=['POST'])
51 @requires_login
52 def create_class (user):
53 if not is_teacher(user):
54 return 'Only teachers can create classes', 403
55
56 body = request.json
57 # Validations
58 if not isinstance(body, dict):
59 return 'body must be an object', 400
60 if not isinstance(body.get('name'), str):
61 return 'name must be a string', 400
62
63 Class = {
64 'id': uuid.uuid4().hex,
65 'date': utils.timems (),
66 'teacher': user ['username'],
67 'link': utils.random_id_generator (7),
68 'name': body ['name']
69 }
70
71 DATABASE.store_class (Class)
72
73 return {}, 200
74
75 @app.route('/class/<class_id>', methods=['PUT'])
76 @requires_login
77 def update_class (user, class_id):
78 if not is_teacher(user):
79 return 'Only teachers can update classes', 403
80
81 body = request.json
82 # Validations
83 if not isinstance(body, dict):
84 return 'body must be an object', 400
85 if not isinstance(body.get('name'), str):
86 return 'name must be a string', 400
87
88 Class = DATABASE.get_class (class_id)
89 if not Class or Class ['teacher'] != user ['username']:
90 return 'No such class', 404
91
92 Class = DATABASE.update_class (class_id, body ['name'])
93
94 return {}, 200
95
96 @app.route('/class/<class_id>', methods=['DELETE'])
97 @requires_login
98 def delete_class (user, class_id):
99 Class = DATABASE.get_class (class_id)
100 if not Class or Class ['teacher'] != user ['username']:
101 return 'No such class', 404
102
103 DATABASE.delete_class (Class)
104
105 return {}, 200
106
107 @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])
108 def prejoin_class (class_id, link):
109 Class = DATABASE.get_class (class_id)
110 if not Class or Class ['link'] != link:
111 return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))
112 user = {}
113 if request.cookies.get (cookie_name):
114 token = DATABASE.get_token(request.cookies.get (cookie_name))
115 if token:
116 if token ['username'] in Class.get ('students', []):
117 return render_template ('class-already-joined.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), menu=render_main_menu('my-profile'), current_page='my-profile', class_info={'name': Class ['name']})
118 user = DATABASE.user_by_username(token ['username'])
119
120 return render_template ('class-prejoin.html',
121 auth=TRANSLATIONS.get_translations (g.lang, 'Auth'),
122 menu=render_main_menu('my-profile'),
123 current_page='my-profile',
124 class_info={
125 'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + g.lang,
126 'name': Class ['name'],
127 })
128
129 @app.route('/class/<class_id>/join/<link>', methods=['GET'])
130 @requires_login
131 def join_class (user, class_id, link):
132 Class = DATABASE.get_class (class_id)
133 if not Class or Class ['link'] != link:
134 return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))
135
136 DATABASE.add_student_to_class (Class ['id'], user ['username'])
137
138 return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)
139
140 @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])
141 @requires_login
142 def leave_class (user, class_id, student_id):
143
144 Class = DATABASE.get_class (class_id)
145 if not Class or Class ['teacher'] != user ['username']:
146 return 'No such class', 404
147
148 DATABASE.remove_student_from_class (Class ['id'], student_id)
149
150 return {}, 200
151
152 @app.route('/hedy/l/<link_id>', methods=['GET'])
153 def resolve_class_link (link_id):
154 Class = DATABASE.resolve_class_link (link_id)
155 if not Class:
156 return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))
157 return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/teacher.py b/website/teacher.py
--- a/website/teacher.py
+++ b/website/teacher.py
@@ -60,6 +60,12 @@
if not isinstance(body.get('name'), str):
return 'name must be a string', 400
+ # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate
+ Classes = DATABASE.get_teacher_classes(user['username'], True)
+ for Class in Classes:
+ if Class['name'] == body['name']:
+ return "duplicate", 200
+
Class = {
'id': uuid.uuid4().hex,
'date': utils.timems (),
@@ -89,6 +95,12 @@
if not Class or Class ['teacher'] != user ['username']:
return 'No such class', 404
+ # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate
+ Classes = DATABASE.get_teacher_classes(user ['username'], True)
+ for Class in Classes:
+ if Class['name'] == body['name']:
+ return "duplicate", 200
+
Class = DATABASE.update_class (class_id, body ['name'])
return {}, 200
|
{"golden_diff": "diff --git a/website/teacher.py b/website/teacher.py\n--- a/website/teacher.py\n+++ b/website/teacher.py\n@@ -60,6 +60,12 @@\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n \n+ # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n+ Classes = DATABASE.get_teacher_classes(user['username'], True)\n+ for Class in Classes:\n+ if Class['name'] == body['name']:\n+ return \"duplicate\", 200\n+\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n@@ -89,6 +95,12 @@\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n \n+ # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n+ Classes = DATABASE.get_teacher_classes(user ['username'], True)\n+ for Class in Classes:\n+ if Class['name'] == body['name']:\n+ return \"duplicate\", 200\n+\n Class = DATABASE.update_class (class_id, body ['name'])\n \n return {}, 200\n", "issue": "[BUG] Class names can be duplicate\n**Describe the bug**\r\nCurrently, we are able to create multiple classes with an identical name. I think this is undesirable and should be prevented. Might be best to tackle this issue at the same time as #1152.\r\n\r\n**Paste the Hedy code & level**\r\nIt is super helpful if we can copy-paste the Hedy code to test, so please paste the code here, and don't forget to tell us what level you were in.\r\n\r\n**Add a screenshot (optional)**\r\nMake a picture or screenshot to show the issue. Tip! You can make a screenshot and simply paste the image into GitHub with ctrl-v.\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**What machine and browser you were using (optional)**\r\nIf the issue concerns things in the website, let us know:\r\n- What computer you are using (Windows, Mac, Linux?)\r\n- What browser you were using (Chrome, Edge, Safari)\r\n\n", "before_files": [{"content": "from website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import g, request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\ndef routes (app, database):\n global DATABASE\n DATABASE = database\n\n from app import render_main_menu\n\n @app.route('/classes', methods=['GET'])\n @requires_login\n def get_classes (user):\n if not is_teacher(user):\n return 'Only teachers can retrieve classes', 403\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n\n @app.route('/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can retrieve classes', 403\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('no_such_class'))\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = os.getenv ('BASE_URL') + f\"/hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n return render_template ('class-overview.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), menu=render_main_menu('my-profile'), current_page='my-profile', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher(user):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n Class = DATABASE.update_class (class_id, body ['name'])\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n return render_template ('class-already-joined.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), menu=render_main_menu('my-profile'), current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html',\n auth=TRANSLATIONS.get_translations (g.lang, 'Auth'),\n menu=render_main_menu('my-profile'),\n current_page='my-profile',\n class_info={\n 'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + g.lang,\n 'name': Class ['name'],\n })\n\n @app.route('/class/<class_id>/join/<link>', methods=['GET'])\n @requires_login\n def join_class (user, class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n\n DATABASE.add_student_to_class (Class ['id'], user ['username'])\n\n return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}], "after_files": [{"content": "from website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import g, request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\ndef routes (app, database):\n global DATABASE\n DATABASE = database\n\n from app import render_main_menu\n\n @app.route('/classes', methods=['GET'])\n @requires_login\n def get_classes (user):\n if not is_teacher(user):\n return 'Only teachers can retrieve classes', 403\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n\n @app.route('/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can retrieve classes', 403\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('no_such_class'))\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = os.getenv ('BASE_URL') + f\"/hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n return render_template ('class-overview.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), menu=render_main_menu('my-profile'), current_page='my-profile', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher(user):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user ['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = DATABASE.update_class (class_id, body ['name'])\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n return render_template ('class-already-joined.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), menu=render_main_menu('my-profile'), current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html',\n auth=TRANSLATIONS.get_translations (g.lang, 'Auth'),\n menu=render_main_menu('my-profile'),\n current_page='my-profile',\n class_info={\n 'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + g.lang,\n 'name': Class ['name'],\n })\n\n @app.route('/class/<class_id>/join/<link>', methods=['GET'])\n @requires_login\n def join_class (user, class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n\n DATABASE.add_student_to_class (Class ['id'], user ['username'])\n\n return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return utils.page_404 (TRANSLATIONS, render_main_menu('my-profile'), current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}]}
| 2,527 | 296 |
gh_patches_debug_53989
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-1329
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide length of TableOfContents
Currently, you can only iter over `TableOfContents`. I would like to know the length of it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/toc.py`
Content:
```
1 # coding: utf-8
2
3 """
4 Deals with generating the per-page table of contents.
5
6 For the sake of simplicity we use an existing markdown extension to generate
7 an HTML table of contents, and then parse that into the underlying data.
8
9 The steps we take to generate a table of contents are:
10
11 * Pre-process the markdown, injecting a [TOC] marker.
12 * Generate HTML from markdown.
13 * Post-process the HTML, spliting the content and the table of contents.
14 * Parse table of contents HTML into the underlying data structure.
15 """
16
17 from __future__ import unicode_literals
18
19 try: # pragma: no cover
20 from html.parser import HTMLParser # noqa
21 except ImportError: # pragma: no cover
22 from HTMLParser import HTMLParser # noqa
23
24
25 class TableOfContents(object):
26 """
27 Represents the table of contents for a given page.
28 """
29 def __init__(self, html):
30 self.items = _parse_html_table_of_contents(html)
31
32 def __iter__(self):
33 return iter(self.items)
34
35 def __str__(self):
36 return ''.join([str(item) for item in self])
37
38
39 class AnchorLink(object):
40 """
41 A single entry in the table of contents.
42 """
43 def __init__(self, title, url):
44 self.title, self.url = title, url
45 self.children = []
46
47 def __str__(self):
48 return self.indent_print()
49
50 def indent_print(self, depth=0):
51 indent = ' ' * depth
52 ret = '%s%s - %s\n' % (indent, self.title, self.url)
53 for item in self.children:
54 ret += item.indent_print(depth + 1)
55 return ret
56
57
58 class TOCParser(HTMLParser):
59
60 def __init__(self):
61 HTMLParser.__init__(self)
62 self.links = []
63
64 self.in_anchor = False
65 self.attrs = None
66 self.title = ''
67
68 # Prior to Python3.4 no convert_charrefs keyword existed.
69 # However, in Python3.5 the default was changed to True.
70 # We need the False behavior in all versions but can only
71 # set it if it exists.
72 if hasattr(self, 'convert_charrefs'):
73 self.convert_charrefs = False
74
75 def handle_starttag(self, tag, attrs):
76
77 if not self.in_anchor:
78 if tag == 'a':
79 self.in_anchor = True
80 self.attrs = dict(attrs)
81
82 def handle_endtag(self, tag):
83 if tag == 'a':
84 self.in_anchor = False
85
86 def handle_data(self, data):
87
88 if self.in_anchor:
89 self.title += data
90
91 def handle_charref(self, ref):
92 self.handle_entityref("#" + ref)
93
94 def handle_entityref(self, ref):
95 self.handle_data("&%s;" % ref)
96
97
98 def _parse_html_table_of_contents(html):
99 """
100 Given a table of contents string that has been automatically generated by
101 the markdown library, parse it into a tree of AnchorLink instances.
102
103 Returns a list of all the parent AnchorLink instances.
104 """
105 lines = html.splitlines()[2:-2]
106 parents = []
107 ret = []
108 for line in lines:
109 parser = TOCParser()
110 parser.feed(line)
111 if parser.title:
112 try:
113 href = parser.attrs['href']
114 except KeyError:
115 continue
116 title = parser.title
117 nav = AnchorLink(title, href)
118 # Add the item to its parent if required. If it is a topmost
119 # item then instead append it to our return value.
120 if parents:
121 parents[-1].children.append(nav)
122 else:
123 ret.append(nav)
124 # If this item has children, store it as the current parent
125 if line.endswith('<ul>'):
126 parents.append(nav)
127 elif line.startswith('</ul>'):
128 if parents:
129 parents.pop()
130
131 # For the table of contents, always mark the first element as active
132 if ret:
133 ret[0].active = True
134
135 return ret
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkdocs/toc.py b/mkdocs/toc.py
--- a/mkdocs/toc.py
+++ b/mkdocs/toc.py
@@ -32,6 +32,9 @@
def __iter__(self):
return iter(self.items)
+ def __len__(self):
+ return len(self.items)
+
def __str__(self):
return ''.join([str(item) for item in self])
|
{"golden_diff": "diff --git a/mkdocs/toc.py b/mkdocs/toc.py\n--- a/mkdocs/toc.py\n+++ b/mkdocs/toc.py\n@@ -32,6 +32,9 @@\n def __iter__(self):\n return iter(self.items)\n \n+ def __len__(self):\n+ return len(self.items)\n+\n def __str__(self):\n return ''.join([str(item) for item in self])\n", "issue": "Provide length of TableOfContents\nCurrently, you can only iter over `TableOfContents`. I would like to know the length of it.\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the per-page table of contents.\n\nFor the sake of simplicity we use an existing markdown extension to generate\nan HTML table of contents, and then parse that into the underlying data.\n\nThe steps we take to generate a table of contents are:\n\n* Pre-process the markdown, injecting a [TOC] marker.\n* Generate HTML from markdown.\n* Post-process the HTML, spliting the content and the table of contents.\n* Parse table of contents HTML into the underlying data structure.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\ntry: # pragma: no cover\n from html.parser import HTMLParser # noqa\nexcept ImportError: # pragma: no cover\n from HTMLParser import HTMLParser # noqa\n\n\nclass TableOfContents(object):\n \"\"\"\n Represents the table of contents for a given page.\n \"\"\"\n def __init__(self, html):\n self.items = _parse_html_table_of_contents(html)\n\n def __iter__(self):\n return iter(self.items)\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n\nclass AnchorLink(object):\n \"\"\"\n A single entry in the table of contents.\n \"\"\"\n def __init__(self, title, url):\n self.title, self.url = title, url\n self.children = []\n\n def __str__(self):\n return self.indent_print()\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n ret = '%s%s - %s\\n' % (indent, self.title, self.url)\n for item in self.children:\n ret += item.indent_print(depth + 1)\n return ret\n\n\nclass TOCParser(HTMLParser):\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.links = []\n\n self.in_anchor = False\n self.attrs = None\n self.title = ''\n\n # Prior to Python3.4 no convert_charrefs keyword existed.\n # However, in Python3.5 the default was changed to True.\n # We need the False behavior in all versions but can only\n # set it if it exists.\n if hasattr(self, 'convert_charrefs'):\n self.convert_charrefs = False\n\n def handle_starttag(self, tag, attrs):\n\n if not self.in_anchor:\n if tag == 'a':\n self.in_anchor = True\n self.attrs = dict(attrs)\n\n def handle_endtag(self, tag):\n if tag == 'a':\n self.in_anchor = False\n\n def handle_data(self, data):\n\n if self.in_anchor:\n self.title += data\n\n def handle_charref(self, ref):\n self.handle_entityref(\"#\" + ref)\n\n def handle_entityref(self, ref):\n self.handle_data(\"&%s;\" % ref)\n\n\ndef _parse_html_table_of_contents(html):\n \"\"\"\n Given a table of contents string that has been automatically generated by\n the markdown library, parse it into a tree of AnchorLink instances.\n\n Returns a list of all the parent AnchorLink instances.\n \"\"\"\n lines = html.splitlines()[2:-2]\n parents = []\n ret = []\n for line in lines:\n parser = TOCParser()\n parser.feed(line)\n if parser.title:\n try:\n href = parser.attrs['href']\n except KeyError:\n continue\n title = parser.title\n nav = AnchorLink(title, href)\n # Add the item to its parent if required. If it is a topmost\n # item then instead append it to our return value.\n if parents:\n parents[-1].children.append(nav)\n else:\n ret.append(nav)\n # If this item has children, store it as the current parent\n if line.endswith('<ul>'):\n parents.append(nav)\n elif line.startswith('</ul>'):\n if parents:\n parents.pop()\n\n # For the table of contents, always mark the first element as active\n if ret:\n ret[0].active = True\n\n return ret\n", "path": "mkdocs/toc.py"}], "after_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the per-page table of contents.\n\nFor the sake of simplicity we use an existing markdown extension to generate\nan HTML table of contents, and then parse that into the underlying data.\n\nThe steps we take to generate a table of contents are:\n\n* Pre-process the markdown, injecting a [TOC] marker.\n* Generate HTML from markdown.\n* Post-process the HTML, spliting the content and the table of contents.\n* Parse table of contents HTML into the underlying data structure.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\ntry: # pragma: no cover\n from html.parser import HTMLParser # noqa\nexcept ImportError: # pragma: no cover\n from HTMLParser import HTMLParser # noqa\n\n\nclass TableOfContents(object):\n \"\"\"\n Represents the table of contents for a given page.\n \"\"\"\n def __init__(self, html):\n self.items = _parse_html_table_of_contents(html)\n\n def __iter__(self):\n return iter(self.items)\n\n def __len__(self):\n return len(self.items)\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n\nclass AnchorLink(object):\n \"\"\"\n A single entry in the table of contents.\n \"\"\"\n def __init__(self, title, url):\n self.title, self.url = title, url\n self.children = []\n\n def __str__(self):\n return self.indent_print()\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n ret = '%s%s - %s\\n' % (indent, self.title, self.url)\n for item in self.children:\n ret += item.indent_print(depth + 1)\n return ret\n\n\nclass TOCParser(HTMLParser):\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.links = []\n\n self.in_anchor = False\n self.attrs = None\n self.title = ''\n\n # Prior to Python3.4 no convert_charrefs keyword existed.\n # However, in Python3.5 the default was changed to True.\n # We need the False behavior in all versions but can only\n # set it if it exists.\n if hasattr(self, 'convert_charrefs'):\n self.convert_charrefs = False\n\n def handle_starttag(self, tag, attrs):\n\n if not self.in_anchor:\n if tag == 'a':\n self.in_anchor = True\n self.attrs = dict(attrs)\n\n def handle_endtag(self, tag):\n if tag == 'a':\n self.in_anchor = False\n\n def handle_data(self, data):\n\n if self.in_anchor:\n self.title += data\n\n def handle_charref(self, ref):\n self.handle_entityref(\"#\" + ref)\n\n def handle_entityref(self, ref):\n self.handle_data(\"&%s;\" % ref)\n\n\ndef _parse_html_table_of_contents(html):\n \"\"\"\n Given a table of contents string that has been automatically generated by\n the markdown library, parse it into a tree of AnchorLink instances.\n\n Returns a list of all the parent AnchorLink instances.\n \"\"\"\n lines = html.splitlines()[2:-2]\n parents = []\n ret = []\n for line in lines:\n parser = TOCParser()\n parser.feed(line)\n if parser.title:\n try:\n href = parser.attrs['href']\n except KeyError:\n continue\n title = parser.title\n nav = AnchorLink(title, href)\n # Add the item to its parent if required. If it is a topmost\n # item then instead append it to our return value.\n if parents:\n parents[-1].children.append(nav)\n else:\n ret.append(nav)\n # If this item has children, store it as the current parent\n if line.endswith('<ul>'):\n parents.append(nav)\n elif line.startswith('</ul>'):\n if parents:\n parents.pop()\n\n # For the table of contents, always mark the first element as active\n if ret:\n ret[0].active = True\n\n return ret\n", "path": "mkdocs/toc.py"}]}
| 1,468 | 98 |
gh_patches_debug_1235
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-5586
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docstring of `functions.forget` is incorrect as `+` doesn't retain inputs anymore
The docstring says that `(x + y) + x` retains the immediate variable holding `x + y`.
```
Let ``f`` be a function defined as:
>>> def f(a, b):
... return a + b + a
and, ``x`` and ``y`` be :class:`~chainer.Variable`\\ s:
>>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
>>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
When ``z`` is calculated as ``z = f(x, y)``, its intermediate result
``x + y`` is stored in memory. Instead, if you call ``f`` with
``F.forget``:
>>> z = F.forget(f, x, y)
intermediate ``x + y`` is forgotten.
```
But this isn't true for new-style function of `+`, because addition don't requires book-kept inputs for backpropagation.
I checked the behavior by the following script, which traverses retained variables.
```python
import chainer
import chainer.functions as F
import numpy as np
def f(a, b):
return (a + b) + a
def recur_check_vars(v, x, y):
creator = v.creator_node
if creator is None:
return
for pnode in creator.inputs:
p = pnode.get_variable()
assert p.data is None or p is x or p is y
print(p)
recur_check_vars(p, x, y)
def main():
x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
print(x)
print(y)
print()
z = f(x, y)
recur_check_vars(z, x, y)
if __name__ == '__main__':
main()
```
The script doesn't fail, and the output is as follows. We can see that`x + y` is discarded. Living variables `x` and `y` are retrieved, as each `VariableNode` instance has a weakref to the corresponding variable.
```
variable([-0.7699733 -0.50523347 -0.20869003 -0.7912116 0.92058474])
variable([ 0.58832335 -0.06183117 0.1939743 0.9021316 -0.19973369])
variable(None)
variable([-0.7699733 -0.50523347 -0.20869003 -0.7912116 0.92058474])
variable([ 0.58832335 -0.06183117 0.1939743 0.9021316 -0.19973369])
variable([-0.7699733 -0.50523347 -0.20869003 -0.7912116 0.92058474])
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/util/forget.py`
Content:
```
1 import chainer
2 from chainer import function
3 from chainer import function_node
4 from chainer import variable
5
6
7 def _call_func(func, xs):
8 outs = func(*xs)
9
10 if isinstance(outs, tuple):
11 for i, out in enumerate(outs):
12 if isinstance(out, variable.Variable):
13 continue
14 n = i + 1
15 suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(
16 n if n < 20 else n % 10, 'th')
17 msg = ('{}{} element of a returned tuple is not Variable, '
18 'but is {}').format(n, suffix, type(out))
19 raise RuntimeError(msg)
20 elif isinstance(outs, variable.Variable):
21 outs = (outs,)
22 else:
23 msg = ('A tuple of Variables or a Variable are expected, but {} '
24 'is returned.'.format(type(outs)))
25 raise RuntimeError(msg)
26
27 return outs
28
29
30 class Forget(function_node.FunctionNode):
31
32 def __init__(self, func):
33 if not callable(func):
34 raise TypeError('func must be callable')
35 self.func = func
36
37 def forward(self, inputs):
38 self.retain_inputs(tuple(range(len(inputs))))
39 with function.no_backprop_mode():
40 xs = [variable.Variable(x) for x in inputs]
41 outs = _call_func(self.func, xs)
42 return tuple(out.data for out in outs)
43
44 def backward(self, indexes, grad_outputs):
45 # Double backprop is not allowed
46 if chainer.config.enable_backprop:
47 raise RuntimeError('double backpropagation in functions.forget is '
48 'not allowed.')
49
50 inputs = self.get_retained_inputs()
51 # Create new variables that have no creators
52 dummy_inputs = tuple([variable.Variable(inp.array) for inp in inputs])
53
54 with function.force_backprop_mode():
55 outs = _call_func(self.func, dummy_inputs)
56 assert len(outs) == len(grad_outputs)
57 if len(outs) > 1:
58 # Avoid doing backward multiple times when `outs` is a tuple
59 outs = chainer.functions.identity(*outs)
60
61 for out, grad_output in zip(outs, grad_outputs):
62 out.grad_var = grad_output
63 outs[0].backward()
64
65 return tuple([inp.grad_var for inp in dummy_inputs])
66
67
68 def forget(func, *xs):
69 """Calls a function without storing intermediate results.
70
71 On a forward propagation, Chainer normally stores all intermediate results
72 of :class:`~chainer.variable.VariableNode`\\ s on a computational graph as
73 they are required on backward propagation.
74 Sometimes these results consume too much memory.
75 ``F.forget`` *forgets* such intermediate results on forward propagation,
76 and still supports backpropagation with recalculation.
77
78 On a forward propagation, ``F.forget`` calls a given function with given
79 variables without creating a computational graph. That means, no
80 intermediate results are stored.
81 On a backward propagation, ``F.forget`` calls the given function again to
82 create a computational graph for backpropagation.
83
84 ``F.forget`` reduces internal memory usage, whereas it requires more
85 calculation time as it calls the function twice.
86
87 .. admonition:: Example
88
89 Let ``f`` be a function defined as:
90
91 >>> def f(a, b):
92 ... return a + b + a
93
94 and, ``x`` and ``y`` be :class:`~chainer.Variable`\\ s:
95
96 >>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
97 >>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
98
99 When ``z`` is calculated as ``z = f(x, y)``, its intermediate result
100 ``x + y`` is stored in memory. Instead, if you call ``f`` with
101 ``F.forget``:
102
103 >>> z = F.forget(f, x, y)
104
105 intermediate ``x + y`` is forgotten.
106
107 .. note::
108
109 ``F.forget`` does not support functions which behave differently in
110 multiple calls with the same inputs, such as
111 :meth:`F.dropout() <chainer.functions.dropout>` and
112 :meth:`F.negative_sampling() <chainer.functions.negative_sampling>`.
113
114 .. note::
115
116 In case input argument variables are of class :class:`numpy.ndarray` or
117 :class:`cupy.ndarray` objects, arguments will automatically be
118 converted to :class:`~chainer.Variable`\\ s.
119 This conversion takes place to ensure that this function is included
120 in the computational graph to enable backward computations.
121
122 .. note::
123
124 ``F.forget`` does not support double backpropagation.
125
126 Args:
127 func (callable): A function to call. It needs to be called with
128 :class:`~chainer.Variable` object(s) and to return a
129 :class:`~chainer.Variable` object or a tuple of
130 :class:`~chainer.Variable` objects.
131 xs (~chainer.Variable): Argument variables of the function.
132
133 Returns:
134 ~chainer.Variable: A variable ``func`` returns. If it returns a tuple,
135 the method returns a tuple too.
136
137 """
138 xs = tuple(x if isinstance(x, variable.Variable) else
139 variable.Variable(x, requires_grad=True) for x in xs)
140 y = Forget(func).apply(xs)
141 if len(y) == 1:
142 y, = y
143 return y
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/functions/util/forget.py b/chainer/functions/util/forget.py
--- a/chainer/functions/util/forget.py
+++ b/chainer/functions/util/forget.py
@@ -89,7 +89,7 @@
Let ``f`` be a function defined as:
>>> def f(a, b):
- ... return a + b + a
+ ... return (a + b) * a
and, ``x`` and ``y`` be :class:`~chainer.Variable`\\ s:
|
{"golden_diff": "diff --git a/chainer/functions/util/forget.py b/chainer/functions/util/forget.py\n--- a/chainer/functions/util/forget.py\n+++ b/chainer/functions/util/forget.py\n@@ -89,7 +89,7 @@\n Let ``f`` be a function defined as:\n \n >>> def f(a, b):\n- ... return a + b + a\n+ ... return (a + b) * a\n \n and, ``x`` and ``y`` be :class:`~chainer.Variable`\\\\ s:\n", "issue": "Docstring of `functions.forget` is incorrect as `+` doesn't retain inputs anymore\nThe docstring says that `(x + y) + x` retains the immediate variable holding `x + y`. \r\n\r\n```\r\n Let ``f`` be a function defined as:\r\n >>> def f(a, b):\r\n ... return a + b + a\r\n and, ``x`` and ``y`` be :class:`~chainer.Variable`\\\\ s:\r\n >>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\r\n >>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\r\n When ``z`` is calculated as ``z = f(x, y)``, its intermediate result\r\n ``x + y`` is stored in memory. Instead, if you call ``f`` with\r\n ``F.forget``:\r\n >>> z = F.forget(f, x, y)\r\n intermediate ``x + y`` is forgotten.\r\n```\r\n\r\nBut this isn't true for new-style function of `+`, because addition don't requires book-kept inputs for backpropagation.\r\n\r\nI checked the behavior by the following script, which traverses retained variables.\r\n\r\n```python\r\nimport chainer\r\nimport chainer.functions as F\r\nimport numpy as np \r\n\r\n\r\ndef f(a, b):\r\n return (a + b) + a\r\n\r\n\r\ndef recur_check_vars(v, x, y):\r\n creator = v.creator_node\r\n if creator is None:\r\n return\r\n for pnode in creator.inputs:\r\n p = pnode.get_variable()\r\n assert p.data is None or p is x or p is y\r\n print(p)\r\n recur_check_vars(p, x, y) \r\n\r\n\r\ndef main():\r\n x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\r\n y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\r\n print(x)\r\n print(y)\r\n print()\r\n z = f(x, y) \r\n recur_check_vars(z, x, y) \r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\r\n\r\nThe script doesn't fail, and the output is as follows. We can see that`x + y` is discarded. Living variables `x` and `y` are retrieved, as each `VariableNode` instance has a weakref to the corresponding variable.\r\n\r\n```\r\nvariable([-0.7699733 -0.50523347 -0.20869003 -0.7912116 0.92058474])\r\nvariable([ 0.58832335 -0.06183117 0.1939743 0.9021316 -0.19973369])\r\n\r\nvariable(None)\r\nvariable([-0.7699733 -0.50523347 -0.20869003 -0.7912116 0.92058474])\r\nvariable([ 0.58832335 -0.06183117 0.1939743 0.9021316 -0.19973369])\r\nvariable([-0.7699733 -0.50523347 -0.20869003 -0.7912116 0.92058474])\r\n```\n", "before_files": [{"content": "import chainer\nfrom chainer import function\nfrom chainer import function_node\nfrom chainer import variable\n\n\ndef _call_func(func, xs):\n outs = func(*xs)\n\n if isinstance(outs, tuple):\n for i, out in enumerate(outs):\n if isinstance(out, variable.Variable):\n continue\n n = i + 1\n suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(\n n if n < 20 else n % 10, 'th')\n msg = ('{}{} element of a returned tuple is not Variable, '\n 'but is {}').format(n, suffix, type(out))\n raise RuntimeError(msg)\n elif isinstance(outs, variable.Variable):\n outs = (outs,)\n else:\n msg = ('A tuple of Variables or a Variable are expected, but {} '\n 'is returned.'.format(type(outs)))\n raise RuntimeError(msg)\n\n return outs\n\n\nclass Forget(function_node.FunctionNode):\n\n def __init__(self, func):\n if not callable(func):\n raise TypeError('func must be callable')\n self.func = func\n\n def forward(self, inputs):\n self.retain_inputs(tuple(range(len(inputs))))\n with function.no_backprop_mode():\n xs = [variable.Variable(x) for x in inputs]\n outs = _call_func(self.func, xs)\n return tuple(out.data for out in outs)\n\n def backward(self, indexes, grad_outputs):\n # Double backprop is not allowed\n if chainer.config.enable_backprop:\n raise RuntimeError('double backpropagation in functions.forget is '\n 'not allowed.')\n\n inputs = self.get_retained_inputs()\n # Create new variables that have no creators\n dummy_inputs = tuple([variable.Variable(inp.array) for inp in inputs])\n\n with function.force_backprop_mode():\n outs = _call_func(self.func, dummy_inputs)\n assert len(outs) == len(grad_outputs)\n if len(outs) > 1:\n # Avoid doing backward multiple times when `outs` is a tuple\n outs = chainer.functions.identity(*outs)\n\n for out, grad_output in zip(outs, grad_outputs):\n out.grad_var = grad_output\n outs[0].backward()\n\n return tuple([inp.grad_var for inp in dummy_inputs])\n\n\ndef forget(func, *xs):\n \"\"\"Calls a function without storing intermediate results.\n\n On a forward propagation, Chainer normally stores all intermediate results\n of :class:`~chainer.variable.VariableNode`\\\\ s on a computational graph as\n they are required on backward propagation.\n Sometimes these results consume too much memory.\n ``F.forget`` *forgets* such intermediate results on forward propagation,\n and still supports backpropagation with recalculation.\n\n On a forward propagation, ``F.forget`` calls a given function with given\n variables without creating a computational graph. That means, no\n intermediate results are stored.\n On a backward propagation, ``F.forget`` calls the given function again to\n create a computational graph for backpropagation.\n\n ``F.forget`` reduces internal memory usage, whereas it requires more\n calculation time as it calls the function twice.\n\n .. admonition:: Example\n\n Let ``f`` be a function defined as:\n\n >>> def f(a, b):\n ... return a + b + a\n\n and, ``x`` and ``y`` be :class:`~chainer.Variable`\\\\ s:\n\n >>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\n >>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\n\n When ``z`` is calculated as ``z = f(x, y)``, its intermediate result\n ``x + y`` is stored in memory. Instead, if you call ``f`` with\n ``F.forget``:\n\n >>> z = F.forget(f, x, y)\n\n intermediate ``x + y`` is forgotten.\n\n .. note::\n\n ``F.forget`` does not support functions which behave differently in\n multiple calls with the same inputs, such as\n :meth:`F.dropout() <chainer.functions.dropout>` and\n :meth:`F.negative_sampling() <chainer.functions.negative_sampling>`.\n\n .. note::\n\n In case input argument variables are of class :class:`numpy.ndarray` or\n :class:`cupy.ndarray` objects, arguments will automatically be\n converted to :class:`~chainer.Variable`\\\\ s.\n This conversion takes place to ensure that this function is included\n in the computational graph to enable backward computations.\n\n .. note::\n\n ``F.forget`` does not support double backpropagation.\n\n Args:\n func (callable): A function to call. It needs to be called with\n :class:`~chainer.Variable` object(s) and to return a\n :class:`~chainer.Variable` object or a tuple of\n :class:`~chainer.Variable` objects.\n xs (~chainer.Variable): Argument variables of the function.\n\n Returns:\n ~chainer.Variable: A variable ``func`` returns. If it returns a tuple,\n the method returns a tuple too.\n\n \"\"\"\n xs = tuple(x if isinstance(x, variable.Variable) else\n variable.Variable(x, requires_grad=True) for x in xs)\n y = Forget(func).apply(xs)\n if len(y) == 1:\n y, = y\n return y\n", "path": "chainer/functions/util/forget.py"}], "after_files": [{"content": "import chainer\nfrom chainer import function\nfrom chainer import function_node\nfrom chainer import variable\n\n\ndef _call_func(func, xs):\n outs = func(*xs)\n\n if isinstance(outs, tuple):\n for i, out in enumerate(outs):\n if isinstance(out, variable.Variable):\n continue\n n = i + 1\n suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(\n n if n < 20 else n % 10, 'th')\n msg = ('{}{} element of a returned tuple is not Variable, '\n 'but is {}').format(n, suffix, type(out))\n raise RuntimeError(msg)\n elif isinstance(outs, variable.Variable):\n outs = (outs,)\n else:\n msg = ('A tuple of Variables or a Variable are expected, but {} '\n 'is returned.'.format(type(outs)))\n raise RuntimeError(msg)\n\n return outs\n\n\nclass Forget(function_node.FunctionNode):\n\n def __init__(self, func):\n if not callable(func):\n raise TypeError('func must be callable')\n self.func = func\n\n def forward(self, inputs):\n self.retain_inputs(tuple(range(len(inputs))))\n with function.no_backprop_mode():\n xs = [variable.Variable(x) for x in inputs]\n outs = _call_func(self.func, xs)\n return tuple(out.data for out in outs)\n\n def backward(self, indexes, grad_outputs):\n # Double backprop is not allowed\n if chainer.config.enable_backprop:\n raise RuntimeError('double backpropagation in functions.forget is '\n 'not allowed.')\n\n inputs = self.get_retained_inputs()\n # Create new variables that have no creators\n dummy_inputs = tuple([variable.Variable(inp.array) for inp in inputs])\n\n with function.force_backprop_mode():\n outs = _call_func(self.func, dummy_inputs)\n assert len(outs) == len(grad_outputs)\n if len(outs) > 1:\n # Avoid doing backward multiple times when `outs` is a tuple\n outs = chainer.functions.identity(*outs)\n\n for out, grad_output in zip(outs, grad_outputs):\n out.grad_var = grad_output\n outs[0].backward()\n\n return tuple([inp.grad_var for inp in dummy_inputs])\n\n\ndef forget(func, *xs):\n \"\"\"Calls a function without storing intermediate results.\n\n On a forward propagation, Chainer normally stores all intermediate results\n of :class:`~chainer.variable.VariableNode`\\\\ s on a computational graph as\n they are required on backward propagation.\n Sometimes these results consume too much memory.\n ``F.forget`` *forgets* such intermediate results on forward propagation,\n and still supports backpropagation with recalculation.\n\n On a forward propagation, ``F.forget`` calls a given function with given\n variables without creating a computational graph. That means, no\n intermediate results are stored.\n On a backward propagation, ``F.forget`` calls the given function again to\n create a computational graph for backpropagation.\n\n ``F.forget`` reduces internal memory usage, whereas it requires more\n calculation time as it calls the function twice.\n\n .. admonition:: Example\n\n Let ``f`` be a function defined as:\n\n >>> def f(a, b):\n ... return (a + b) * a\n\n and, ``x`` and ``y`` be :class:`~chainer.Variable`\\\\ s:\n\n >>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\n >>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))\n\n When ``z`` is calculated as ``z = f(x, y)``, its intermediate result\n ``x + y`` is stored in memory. Instead, if you call ``f`` with\n ``F.forget``:\n\n >>> z = F.forget(f, x, y)\n\n intermediate ``x + y`` is forgotten.\n\n .. note::\n\n ``F.forget`` does not support functions which behave differently in\n multiple calls with the same inputs, such as\n :meth:`F.dropout() <chainer.functions.dropout>` and\n :meth:`F.negative_sampling() <chainer.functions.negative_sampling>`.\n\n .. note::\n\n In case input argument variables are of class :class:`numpy.ndarray` or\n :class:`cupy.ndarray` objects, arguments will automatically be\n converted to :class:`~chainer.Variable`\\\\ s.\n This conversion takes place to ensure that this function is included\n in the computational graph to enable backward computations.\n\n .. note::\n\n ``F.forget`` does not support double backpropagation.\n\n Args:\n func (callable): A function to call. It needs to be called with\n :class:`~chainer.Variable` object(s) and to return a\n :class:`~chainer.Variable` object or a tuple of\n :class:`~chainer.Variable` objects.\n xs (~chainer.Variable): Argument variables of the function.\n\n Returns:\n ~chainer.Variable: A variable ``func`` returns. If it returns a tuple,\n the method returns a tuple too.\n\n \"\"\"\n xs = tuple(x if isinstance(x, variable.Variable) else\n variable.Variable(x, requires_grad=True) for x in xs)\n y = Forget(func).apply(xs)\n if len(y) == 1:\n y, = y\n return y\n", "path": "chainer/functions/util/forget.py"}]}
| 2,608 | 117 |
gh_patches_debug_24326
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.general-2731
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
xfconf fails to set double value when LC_NUMERIC is set to nb_NO.UTF-8
### Summary
In https://github.com/ansible-collections/community.general/pull/744 `LANGUAGE` is used to force `xfconf-query` to return doubles using the expected format. This fails when `LC_NUMERIC` is set. From the article linked to in https://github.com/ansible-collections/community.general/pull/744, it seems like setting `LANGUAGE` should override `LC_NUMERIC`, but that isn't actually the case.
The correct variable to use in this case is probably `LC_ALL`.
I've attached a terminal recording showing the results. You'll notice that in the first run, the `previous_value` is `0,200000`, while after setting `LC_ALL=C`, the `previous_value` becomes `0.200000` which matches the input and no change is needed.
I've also attached the test-play I used (with a `.txt` extension because github doesn't like `.yml`).
[recording.txt](https://github.com/ansible-collections/community.general/files/6597487/recording.txt)
[test_play.yml](https://github.com/ansible-collections/community.general/files/6597488/test_play.yml.txt)
### Issue Type
Bug Report
### Component Name
xfconf
### Ansible Version
```console (paste below)
$ ansible --version
ansible 2.10.5
config file = /home/mortenlj/.ansible.cfg
configured module search path = ['/home/mortenlj/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3/dist-packages/ansible
executable location = /usr/bin/ansible
python version = 3.9.5 (default, May 11 2021, 08:20:37) [GCC 10.3.0]
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
DEFAULT_GATHERING(/home/mortenlj/.ansible.cfg) = smart
DEFAULT_HOST_LIST(/home/mortenlj/.ansible.cfg) = ['/home/mortenlj/code/personal/ansible/hosts']
DEFAULT_VAULT_PASSWORD_FILE(/home/mortenlj/.ansible.cfg) = /home/mortenlj/.ansible/vault-pass
```
### OS / Environment
```
$ cat /etc/os-release
NAME="Ubuntu"
VERSION="21.04 (Hirsute Hippo)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 21.04"
VERSION_ID="21.04"
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
VERSION_CODENAME=hirsute
UBUNTU_CODENAME=hirsute
$ apt list xfce4-settings
Listing... Done
xfce4-settings/hirsute,now 4.16.0-1ubuntu1 amd64 [installed,automatic]
```
### Steps to Reproduce
See summary for links.
### Expected Results
I expect the play to not "change" the xfconf property on every run, because the new value should match the already set value.
### Actual Results
See summary. I realise I only used `-vvv`, but I don't think the extra `v` would make it any clearer.
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/module_utils/mh/mixins/cmd.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # (c) 2020, Alexei Znamensky <[email protected]>
3 # Copyright: (c) 2020, Ansible Project
4 # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
5
6 from __future__ import absolute_import, division, print_function
7 __metaclass__ = type
8
9 from functools import partial
10
11
12 class ArgFormat(object):
13 """
14 Argument formatter for use as a command line parameter. Used in CmdMixin.
15 """
16 BOOLEAN = 0
17 PRINTF = 1
18 FORMAT = 2
19
20 @staticmethod
21 def stars_deco(num):
22 if num == 1:
23 def deco(f):
24 return lambda v: f(*v)
25 return deco
26 elif num == 2:
27 def deco(f):
28 return lambda v: f(**v)
29 return deco
30
31 return lambda f: f
32
33 def __init__(self, name, fmt=None, style=FORMAT, stars=0):
34 """
35 Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for
36 the CLI command execution.
37 :param name: Name of the argument to be formatted
38 :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
39 :param style: Whether arg_format (as str) should use printf-style formatting.
40 Ignored if arg_format is None or not a str (should be callable).
41 :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value
42 """
43 def printf_fmt(_fmt, v):
44 try:
45 return [_fmt % v]
46 except TypeError as e:
47 if e.args[0] != 'not all arguments converted during string formatting':
48 raise
49 return [_fmt]
50
51 _fmts = {
52 ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
53 ArgFormat.PRINTF: printf_fmt,
54 ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
55 }
56
57 self.name = name
58 self.stars = stars
59
60 if fmt is None:
61 fmt = "{0}"
62 style = ArgFormat.FORMAT
63
64 if isinstance(fmt, str):
65 func = _fmts[style]
66 self.arg_format = partial(func, fmt)
67 elif isinstance(fmt, list) or isinstance(fmt, tuple):
68 self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]
69 elif hasattr(fmt, '__call__'):
70 self.arg_format = fmt
71 else:
72 raise TypeError('Parameter fmt must be either: a string, a list/tuple of '
73 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))
74
75 if stars:
76 self.arg_format = (self.stars_deco(stars))(self.arg_format)
77
78 def to_text(self, value):
79 if value is None:
80 return []
81 func = self.arg_format
82 return [str(p) for p in func(value)]
83
84
85 class CmdMixin(object):
86 """
87 Mixin for mapping module options to running a CLI command with its arguments.
88 """
89 command = None
90 command_args_formats = {}
91 run_command_fixed_options = {}
92 check_rc = False
93 force_lang = "C"
94
95 @property
96 def module_formats(self):
97 result = {}
98 for param in self.module.params.keys():
99 result[param] = ArgFormat(param)
100 return result
101
102 @property
103 def custom_formats(self):
104 result = {}
105 for param, fmt_spec in self.command_args_formats.items():
106 result[param] = ArgFormat(param, **fmt_spec)
107 return result
108
109 def _calculate_args(self, extra_params=None, params=None):
110 def add_arg_formatted_param(_cmd_args, arg_format, _value):
111 args = list(arg_format.to_text(_value))
112 return _cmd_args + args
113
114 def find_format(_param):
115 return self.custom_formats.get(_param, self.module_formats.get(_param))
116
117 extra_params = extra_params or dict()
118 cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)
119 try:
120 cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)
121 except ValueError:
122 pass
123 param_list = params if params else self.vars.keys()
124
125 for param in param_list:
126 if isinstance(param, dict):
127 if len(param) != 1:
128 raise self.ModuleHelperException("run_command parameter as a dict must "
129 "contain only one key: {0}".format(param))
130 _param = list(param.keys())[0]
131 fmt = find_format(_param)
132 value = param[_param]
133 elif isinstance(param, str):
134 if param in self.vars.keys():
135 fmt = find_format(param)
136 value = self.vars[param]
137 elif param in extra_params:
138 fmt = find_format(param)
139 value = extra_params[param]
140 else:
141 self.module.deprecate("Cannot determine value for parameter: {0}. "
142 "From version 4.0.0 onwards this will generate an exception".format(param),
143 version="4.0.0", collection_name="community.general")
144 continue
145
146 else:
147 raise self.ModuleHelperException("run_command parameter must be either a str or a dict: {0}".format(param))
148 cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
149
150 return cmd_args
151
152 def process_command_output(self, rc, out, err):
153 return rc, out, err
154
155 def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs):
156 self.vars.cmd_args = self._calculate_args(extra_params, params)
157 options = dict(self.run_command_fixed_options)
158 env_update = dict(options.get('environ_update', {}))
159 options['check_rc'] = options.get('check_rc', self.check_rc)
160 if self.force_lang:
161 env_update.update({'LANGUAGE': self.force_lang})
162 self.update_output(force_lang=self.force_lang)
163 options['environ_update'] = env_update
164 options.update(kwargs)
165 rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)
166 self.update_output(rc=rc, stdout=out, stderr=err)
167 if process_output is None:
168 _process = self.process_command_output
169 else:
170 _process = process_output
171
172 return _process(rc, out, err)
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py
--- a/plugins/module_utils/mh/mixins/cmd.py
+++ b/plugins/module_utils/mh/mixins/cmd.py
@@ -155,13 +155,16 @@
def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs):
self.vars.cmd_args = self._calculate_args(extra_params, params)
options = dict(self.run_command_fixed_options)
- env_update = dict(options.get('environ_update', {}))
options['check_rc'] = options.get('check_rc', self.check_rc)
+ options.update(kwargs)
+ env_update = dict(options.get('environ_update', {}))
if self.force_lang:
- env_update.update({'LANGUAGE': self.force_lang})
+ env_update.update({
+ 'LANGUAGE': self.force_lang,
+ 'LC_ALL': self.force_lang,
+ })
self.update_output(force_lang=self.force_lang)
options['environ_update'] = env_update
- options.update(kwargs)
rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)
self.update_output(rc=rc, stdout=out, stderr=err)
if process_output is None:
|
{"golden_diff": "diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py\n--- a/plugins/module_utils/mh/mixins/cmd.py\n+++ b/plugins/module_utils/mh/mixins/cmd.py\n@@ -155,13 +155,16 @@\n def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs):\n self.vars.cmd_args = self._calculate_args(extra_params, params)\n options = dict(self.run_command_fixed_options)\n- env_update = dict(options.get('environ_update', {}))\n options['check_rc'] = options.get('check_rc', self.check_rc)\n+ options.update(kwargs)\n+ env_update = dict(options.get('environ_update', {}))\n if self.force_lang:\n- env_update.update({'LANGUAGE': self.force_lang})\n+ env_update.update({\n+ 'LANGUAGE': self.force_lang,\n+ 'LC_ALL': self.force_lang,\n+ })\n self.update_output(force_lang=self.force_lang)\n options['environ_update'] = env_update\n- options.update(kwargs)\n rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)\n self.update_output(rc=rc, stdout=out, stderr=err)\n if process_output is None:\n", "issue": "xfconf fails to set double value when LC_NUMERIC is set to nb_NO.UTF-8\n### Summary\r\n\r\nIn https://github.com/ansible-collections/community.general/pull/744 `LANGUAGE` is used to force `xfconf-query` to return doubles using the expected format. This fails when `LC_NUMERIC` is set. From the article linked to in https://github.com/ansible-collections/community.general/pull/744, it seems like setting `LANGUAGE` should override `LC_NUMERIC`, but that isn't actually the case.\r\n\r\nThe correct variable to use in this case is probably `LC_ALL`.\r\n\r\nI've attached a terminal recording showing the results. You'll notice that in the first run, the `previous_value` is `0,200000`, while after setting `LC_ALL=C`, the `previous_value` becomes `0.200000` which matches the input and no change is needed.\r\n\r\nI've also attached the test-play I used (with a `.txt` extension because github doesn't like `.yml`).\r\n\r\n[recording.txt](https://github.com/ansible-collections/community.general/files/6597487/recording.txt)\r\n\r\n[test_play.yml](https://github.com/ansible-collections/community.general/files/6597488/test_play.yml.txt)\r\n\r\n\r\n### Issue Type\r\n\r\nBug Report\r\n\r\n### Component Name\r\n\r\nxfconf\r\n\r\n### Ansible Version\r\n\r\n```console (paste below)\r\n$ ansible --version\r\nansible 2.10.5\r\n config file = /home/mortenlj/.ansible.cfg\r\n configured module search path = ['/home/mortenlj/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3/dist-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 3.9.5 (default, May 11 2021, 08:20:37) [GCC 10.3.0]\r\n```\r\n\r\n\r\n### Configuration\r\n\r\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\nDEFAULT_GATHERING(/home/mortenlj/.ansible.cfg) = smart\r\nDEFAULT_HOST_LIST(/home/mortenlj/.ansible.cfg) = ['/home/mortenlj/code/personal/ansible/hosts']\r\nDEFAULT_VAULT_PASSWORD_FILE(/home/mortenlj/.ansible.cfg) = /home/mortenlj/.ansible/vault-pass\r\n```\r\n\r\n\r\n### OS / Environment\r\n\r\n```\r\n$ cat /etc/os-release \r\nNAME=\"Ubuntu\"\r\nVERSION=\"21.04 (Hirsute Hippo)\"\r\nID=ubuntu\r\nID_LIKE=debian\r\nPRETTY_NAME=\"Ubuntu 21.04\"\r\nVERSION_ID=\"21.04\"\r\nHOME_URL=\"https://www.ubuntu.com/\"\r\nSUPPORT_URL=\"https://help.ubuntu.com/\"\r\nBUG_REPORT_URL=\"https://bugs.launchpad.net/ubuntu/\"\r\nPRIVACY_POLICY_URL=\"https://www.ubuntu.com/legal/terms-and-policies/privacy-policy\"\r\nVERSION_CODENAME=hirsute\r\nUBUNTU_CODENAME=hirsute\r\n\r\n$ apt list xfce4-settings\r\nListing... Done\r\nxfce4-settings/hirsute,now 4.16.0-1ubuntu1 amd64 [installed,automatic]\r\n```\r\n\r\n\r\n### Steps to Reproduce\r\n\r\nSee summary for links.\r\n\r\n### Expected Results\r\n\r\nI expect the play to not \"change\" the xfconf property on every run, because the new value should match the already set value.\r\n\r\n### Actual Results\r\n\r\nSee summary. I realise I only used `-vvv`, but I don't think the extra `v` would make it any clearer.\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# (c) 2020, Alexei Znamensky <[email protected]>\n# Copyright: (c) 2020, Ansible Project\n# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nfrom functools import partial\n\n\nclass ArgFormat(object):\n \"\"\"\n Argument formatter for use as a command line parameter. Used in CmdMixin.\n \"\"\"\n BOOLEAN = 0\n PRINTF = 1\n FORMAT = 2\n\n @staticmethod\n def stars_deco(num):\n if num == 1:\n def deco(f):\n return lambda v: f(*v)\n return deco\n elif num == 2:\n def deco(f):\n return lambda v: f(**v)\n return deco\n\n return lambda f: f\n\n def __init__(self, name, fmt=None, style=FORMAT, stars=0):\n \"\"\"\n Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for\n the CLI command execution.\n :param name: Name of the argument to be formatted\n :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that\n :param style: Whether arg_format (as str) should use printf-style formatting.\n Ignored if arg_format is None or not a str (should be callable).\n :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value\n \"\"\"\n def printf_fmt(_fmt, v):\n try:\n return [_fmt % v]\n except TypeError as e:\n if e.args[0] != 'not all arguments converted during string formatting':\n raise\n return [_fmt]\n\n _fmts = {\n ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),\n ArgFormat.PRINTF: printf_fmt,\n ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],\n }\n\n self.name = name\n self.stars = stars\n\n if fmt is None:\n fmt = \"{0}\"\n style = ArgFormat.FORMAT\n\n if isinstance(fmt, str):\n func = _fmts[style]\n self.arg_format = partial(func, fmt)\n elif isinstance(fmt, list) or isinstance(fmt, tuple):\n self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]\n elif hasattr(fmt, '__call__'):\n self.arg_format = fmt\n else:\n raise TypeError('Parameter fmt must be either: a string, a list/tuple of '\n 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))\n\n if stars:\n self.arg_format = (self.stars_deco(stars))(self.arg_format)\n\n def to_text(self, value):\n if value is None:\n return []\n func = self.arg_format\n return [str(p) for p in func(value)]\n\n\nclass CmdMixin(object):\n \"\"\"\n Mixin for mapping module options to running a CLI command with its arguments.\n \"\"\"\n command = None\n command_args_formats = {}\n run_command_fixed_options = {}\n check_rc = False\n force_lang = \"C\"\n\n @property\n def module_formats(self):\n result = {}\n for param in self.module.params.keys():\n result[param] = ArgFormat(param)\n return result\n\n @property\n def custom_formats(self):\n result = {}\n for param, fmt_spec in self.command_args_formats.items():\n result[param] = ArgFormat(param, **fmt_spec)\n return result\n\n def _calculate_args(self, extra_params=None, params=None):\n def add_arg_formatted_param(_cmd_args, arg_format, _value):\n args = list(arg_format.to_text(_value))\n return _cmd_args + args\n\n def find_format(_param):\n return self.custom_formats.get(_param, self.module_formats.get(_param))\n\n extra_params = extra_params or dict()\n cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)\n try:\n cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)\n except ValueError:\n pass\n param_list = params if params else self.vars.keys()\n\n for param in param_list:\n if isinstance(param, dict):\n if len(param) != 1:\n raise self.ModuleHelperException(\"run_command parameter as a dict must \"\n \"contain only one key: {0}\".format(param))\n _param = list(param.keys())[0]\n fmt = find_format(_param)\n value = param[_param]\n elif isinstance(param, str):\n if param in self.vars.keys():\n fmt = find_format(param)\n value = self.vars[param]\n elif param in extra_params:\n fmt = find_format(param)\n value = extra_params[param]\n else:\n self.module.deprecate(\"Cannot determine value for parameter: {0}. \"\n \"From version 4.0.0 onwards this will generate an exception\".format(param),\n version=\"4.0.0\", collection_name=\"community.general\")\n continue\n\n else:\n raise self.ModuleHelperException(\"run_command parameter must be either a str or a dict: {0}\".format(param))\n cmd_args = add_arg_formatted_param(cmd_args, fmt, value)\n\n return cmd_args\n\n def process_command_output(self, rc, out, err):\n return rc, out, err\n\n def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs):\n self.vars.cmd_args = self._calculate_args(extra_params, params)\n options = dict(self.run_command_fixed_options)\n env_update = dict(options.get('environ_update', {}))\n options['check_rc'] = options.get('check_rc', self.check_rc)\n if self.force_lang:\n env_update.update({'LANGUAGE': self.force_lang})\n self.update_output(force_lang=self.force_lang)\n options['environ_update'] = env_update\n options.update(kwargs)\n rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)\n self.update_output(rc=rc, stdout=out, stderr=err)\n if process_output is None:\n _process = self.process_command_output\n else:\n _process = process_output\n\n return _process(rc, out, err)\n", "path": "plugins/module_utils/mh/mixins/cmd.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# (c) 2020, Alexei Znamensky <[email protected]>\n# Copyright: (c) 2020, Ansible Project\n# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nfrom functools import partial\n\n\nclass ArgFormat(object):\n \"\"\"\n Argument formatter for use as a command line parameter. Used in CmdMixin.\n \"\"\"\n BOOLEAN = 0\n PRINTF = 1\n FORMAT = 2\n\n @staticmethod\n def stars_deco(num):\n if num == 1:\n def deco(f):\n return lambda v: f(*v)\n return deco\n elif num == 2:\n def deco(f):\n return lambda v: f(**v)\n return deco\n\n return lambda f: f\n\n def __init__(self, name, fmt=None, style=FORMAT, stars=0):\n \"\"\"\n Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for\n the CLI command execution.\n :param name: Name of the argument to be formatted\n :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that\n :param style: Whether arg_format (as str) should use printf-style formatting.\n Ignored if arg_format is None or not a str (should be callable).\n :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value\n \"\"\"\n def printf_fmt(_fmt, v):\n try:\n return [_fmt % v]\n except TypeError as e:\n if e.args[0] != 'not all arguments converted during string formatting':\n raise\n return [_fmt]\n\n _fmts = {\n ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),\n ArgFormat.PRINTF: printf_fmt,\n ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],\n }\n\n self.name = name\n self.stars = stars\n\n if fmt is None:\n fmt = \"{0}\"\n style = ArgFormat.FORMAT\n\n if isinstance(fmt, str):\n func = _fmts[style]\n self.arg_format = partial(func, fmt)\n elif isinstance(fmt, list) or isinstance(fmt, tuple):\n self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]\n elif hasattr(fmt, '__call__'):\n self.arg_format = fmt\n else:\n raise TypeError('Parameter fmt must be either: a string, a list/tuple of '\n 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))\n\n if stars:\n self.arg_format = (self.stars_deco(stars))(self.arg_format)\n\n def to_text(self, value):\n if value is None:\n return []\n func = self.arg_format\n return [str(p) for p in func(value)]\n\n\nclass CmdMixin(object):\n \"\"\"\n Mixin for mapping module options to running a CLI command with its arguments.\n \"\"\"\n command = None\n command_args_formats = {}\n run_command_fixed_options = {}\n check_rc = False\n force_lang = \"C\"\n\n @property\n def module_formats(self):\n result = {}\n for param in self.module.params.keys():\n result[param] = ArgFormat(param)\n return result\n\n @property\n def custom_formats(self):\n result = {}\n for param, fmt_spec in self.command_args_formats.items():\n result[param] = ArgFormat(param, **fmt_spec)\n return result\n\n def _calculate_args(self, extra_params=None, params=None):\n def add_arg_formatted_param(_cmd_args, arg_format, _value):\n args = list(arg_format.to_text(_value))\n return _cmd_args + args\n\n def find_format(_param):\n return self.custom_formats.get(_param, self.module_formats.get(_param))\n\n extra_params = extra_params or dict()\n cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)\n try:\n cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)\n except ValueError:\n pass\n param_list = params if params else self.vars.keys()\n\n for param in param_list:\n if isinstance(param, dict):\n if len(param) != 1:\n raise self.ModuleHelperException(\"run_command parameter as a dict must \"\n \"contain only one key: {0}\".format(param))\n _param = list(param.keys())[0]\n fmt = find_format(_param)\n value = param[_param]\n elif isinstance(param, str):\n if param in self.vars.keys():\n fmt = find_format(param)\n value = self.vars[param]\n elif param in extra_params:\n fmt = find_format(param)\n value = extra_params[param]\n else:\n self.module.deprecate(\"Cannot determine value for parameter: {0}. \"\n \"From version 4.0.0 onwards this will generate an exception\".format(param),\n version=\"4.0.0\", collection_name=\"community.general\")\n continue\n\n else:\n raise self.ModuleHelperException(\"run_command parameter must be either a str or a dict: {0}\".format(param))\n cmd_args = add_arg_formatted_param(cmd_args, fmt, value)\n\n return cmd_args\n\n def process_command_output(self, rc, out, err):\n return rc, out, err\n\n def run_command(self, extra_params=None, params=None, process_output=None, *args, **kwargs):\n self.vars.cmd_args = self._calculate_args(extra_params, params)\n options = dict(self.run_command_fixed_options)\n options['check_rc'] = options.get('check_rc', self.check_rc)\n options.update(kwargs)\n env_update = dict(options.get('environ_update', {}))\n if self.force_lang:\n env_update.update({\n 'LANGUAGE': self.force_lang,\n 'LC_ALL': self.force_lang,\n })\n self.update_output(force_lang=self.force_lang)\n options['environ_update'] = env_update\n rc, out, err = self.module.run_command(self.vars.cmd_args, *args, **options)\n self.update_output(rc=rc, stdout=out, stderr=err)\n if process_output is None:\n _process = self.process_command_output\n else:\n _process = process_output\n\n return _process(rc, out, err)\n", "path": "plugins/module_utils/mh/mixins/cmd.py"}]}
| 2,916 | 286 |
gh_patches_debug_20847
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-2754
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Has anyone successfully compiled the C++ API on Windows, including the cuda code?
I have tried many versions of torchvision, none seems to work for me. (on Windows)
1, The old version 0.5.0 simply does not include cuda code into the build.
2, Then I tried 0.6.0, it has the "-openmp" not support error. After a lot of searching and trying, I solved it by deleting one line in the torchvision.vcxproj.
3, 0.6.0 finally gets to compile, but it gives "unresolved external symbol" error if i want to use nms_cuda, according to this issue #2139, the cuda impl of operators are not included in torchvision.lib.
4, Then I tried the recent 0.7.0 tag and it gives "A single input file is required for a non-link phase when an outputfile is specified" error, this one (#2677) simply no one here knows how to solve it. Other resources suggests it might due to misplaced white space...
Could you point me to a version that works on Windows? Or simply no support for Windows?
cc @peterjc123 @nbcsm @guyang3532 @maxluk @gunandrose4u @smartcat2010 @mszhanyi
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `.circleci/regenerate.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """
4 This script should use a very simple, functional programming style.
5 Avoid Jinja macros in favor of native Python functions.
6
7 Don't go overboard on code generation; use Python only to generate
8 content that can't be easily declared statically using CircleCI's YAML API.
9
10 Data declarations (e.g. the nested loops for defining the configuration matrix)
11 should be at the top of the file for easy updating.
12
13 See this comment for design rationale:
14 https://github.com/pytorch/vision/pull/1321#issuecomment-531033978
15 """
16
17 import jinja2
18 import yaml
19 import os.path
20
21
22 PYTHON_VERSIONS = ["3.6", "3.7", "3.8"]
23
24
25 def build_workflows(prefix='', filter_branch=None, upload=False, indentation=6, windows_latest_only=False):
26 w = []
27 for btype in ["wheel", "conda"]:
28 for os_type in ["linux", "macos", "win"]:
29 python_versions = PYTHON_VERSIONS
30 cu_versions_dict = {"linux": ["cpu", "cu92", "cu101", "cu102", "cu110"],
31 "win": ["cpu", "cu101", "cu102", "cu110"],
32 "macos": ["cpu"]}
33 cu_versions = cu_versions_dict[os_type]
34 for python_version in python_versions:
35 for cu_version in cu_versions:
36 for unicode in ([False, True] if btype == "wheel" and python_version == "2.7" else [False]):
37 fb = filter_branch
38 if windows_latest_only and os_type == "win" and filter_branch is None and \
39 (python_version != python_versions[-1] or
40 (cu_version not in [cu_versions[0], cu_versions[-1]])):
41 fb = "master"
42 w += workflow_pair(
43 btype, os_type, python_version, cu_version,
44 unicode, prefix, upload, filter_branch=fb)
45
46 return indent(indentation, w)
47
48
49 def workflow_pair(btype, os_type, python_version, cu_version, unicode, prefix='', upload=False, *, filter_branch=None):
50
51 w = []
52 unicode_suffix = "u" if unicode else ""
53 base_workflow_name = f"{prefix}binary_{os_type}_{btype}_py{python_version}{unicode_suffix}_{cu_version}"
54
55 w.append(generate_base_workflow(
56 base_workflow_name, python_version, cu_version,
57 unicode, os_type, btype, filter_branch=filter_branch))
58
59 if upload:
60 w.append(generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, filter_branch=filter_branch))
61 if filter_branch == 'nightly' and os_type in ['linux', 'win']:
62 pydistro = 'pip' if btype == 'wheel' else 'conda'
63 w.append(generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type))
64
65 return w
66
67
68 manylinux_images = {
69 "cu92": "pytorch/manylinux-cuda92",
70 "cu101": "pytorch/manylinux-cuda101",
71 "cu102": "pytorch/manylinux-cuda102",
72 "cu110": "pytorch/manylinux-cuda110",
73 }
74
75
76 def get_manylinux_image(cu_version):
77 cu_suffix = "102"
78 if cu_version.startswith('cu'):
79 cu_suffix = cu_version[len('cu'):]
80 return f"pytorch/manylinux-cuda{cu_suffix}"
81
82
83 def generate_base_workflow(base_workflow_name, python_version, cu_version,
84 unicode, os_type, btype, *, filter_branch=None):
85
86 d = {
87 "name": base_workflow_name,
88 "python_version": python_version,
89 "cu_version": cu_version,
90 }
91
92 if os_type != "win" and unicode:
93 d["unicode_abi"] = '1'
94
95 if os_type != "win":
96 d["wheel_docker_image"] = get_manylinux_image(cu_version)
97
98 if filter_branch is not None:
99 d["filters"] = {
100 "branches": {
101 "only": filter_branch
102 },
103 "tags": {
104 # Using a raw string here to avoid having to escape
105 # anything
106 "only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
107 }
108 }
109
110 w = f"binary_{os_type}_{btype}"
111 return {w: d}
112
113
114 def gen_filter_branch_tree(*branches):
115 return {"branches": {"only": [b for b in branches]}}
116
117
118 def generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, *, filter_branch=None):
119 d = {
120 "name": f"{base_workflow_name}_upload",
121 "context": "org-member",
122 "requires": [base_workflow_name],
123 }
124
125 if btype == 'wheel':
126 d["subfolder"] = "" if os_type == 'macos' else cu_version + "/"
127
128 if filter_branch is not None:
129 d["filters"] = {
130 "branches": {
131 "only": filter_branch
132 },
133 "tags": {
134 # Using a raw string here to avoid having to escape
135 # anything
136 "only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
137 }
138 }
139
140 return {f"binary_{btype}_upload": d}
141
142
143 def generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type):
144
145 required_build_suffix = "_upload"
146 required_build_name = base_workflow_name + required_build_suffix
147
148 smoke_suffix = f"smoke_test_{pydistro}"
149 d = {
150 "name": f"{base_workflow_name}_{smoke_suffix}",
151 "requires": [required_build_name],
152 "python_version": python_version,
153 }
154
155 if filter_branch:
156 d["filters"] = gen_filter_branch_tree(filter_branch)
157
158 return {"smoke_test_{os_type}_{pydistro}".format(os_type=os_type, pydistro=pydistro): d}
159
160
161 def indent(indentation, data_list):
162 return ("\n" + " " * indentation).join(
163 yaml.dump(data_list, default_flow_style=False).splitlines())
164
165
166 def unittest_workflows(indentation=6):
167 jobs = []
168 for os_type in ["linux", "windows", "macos"]:
169 for device_type in ["cpu", "gpu"]:
170 if os_type == "macos" and device_type == "gpu":
171 continue
172 for i, python_version in enumerate(PYTHON_VERSIONS):
173 job = {
174 "name": f"unittest_{os_type}_{device_type}_py{python_version}",
175 "python_version": python_version,
176 }
177
178 if device_type == 'gpu':
179 if python_version != "3.8":
180 job['filters'] = gen_filter_branch_tree('master', 'nightly')
181 job['cu_version'] = 'cu101'
182 else:
183 job['cu_version'] = 'cpu'
184
185 jobs.append({f"unittest_{os_type}_{device_type}": job})
186
187 return indent(indentation, jobs)
188
189
190 def cmake_workflows(indentation=6):
191 jobs = []
192 python_version = '3.8'
193 for os_type in ['linux', 'windows', 'macos']:
194 # Right now CMake builds are failling on Windows (GPU)
195 device_types = ['cpu', 'gpu'] if os_type == 'linux' else ['cpu']
196 for device in device_types:
197 job = {
198 'name': f'cmake_{os_type}_{device}',
199 'python_version': python_version
200 }
201
202 job['cu_version'] = 'cu101' if device == 'gpu' else 'cpu'
203 if device == 'gpu':
204 job['wheel_docker_image'] = 'pytorch/manylinux-cuda101'
205 jobs.append({f'cmake_{os_type}_{device}': job})
206 return indent(indentation, jobs)
207
208
209 if __name__ == "__main__":
210 d = os.path.dirname(__file__)
211 env = jinja2.Environment(
212 loader=jinja2.FileSystemLoader(d),
213 lstrip_blocks=True,
214 autoescape=False,
215 )
216
217 with open(os.path.join(d, 'config.yml'), 'w') as f:
218 f.write(env.get_template('config.yml.in').render(
219 build_workflows=build_workflows,
220 unittest_workflows=unittest_workflows,
221 cmake_workflows=cmake_workflows,
222 ))
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/.circleci/regenerate.py b/.circleci/regenerate.py
--- a/.circleci/regenerate.py
+++ b/.circleci/regenerate.py
@@ -191,8 +191,8 @@
jobs = []
python_version = '3.8'
for os_type in ['linux', 'windows', 'macos']:
- # Right now CMake builds are failling on Windows (GPU)
- device_types = ['cpu', 'gpu'] if os_type == 'linux' else ['cpu']
+ # Skip OSX CUDA
+ device_types = ['cpu', 'gpu'] if os_type != 'macos' else ['cpu']
for device in device_types:
job = {
'name': f'cmake_{os_type}_{device}',
@@ -200,7 +200,7 @@
}
job['cu_version'] = 'cu101' if device == 'gpu' else 'cpu'
- if device == 'gpu':
+ if device == 'gpu' and os_type == 'linux':
job['wheel_docker_image'] = 'pytorch/manylinux-cuda101'
jobs.append({f'cmake_{os_type}_{device}': job})
return indent(indentation, jobs)
|
{"golden_diff": "diff --git a/.circleci/regenerate.py b/.circleci/regenerate.py\n--- a/.circleci/regenerate.py\n+++ b/.circleci/regenerate.py\n@@ -191,8 +191,8 @@\n jobs = []\n python_version = '3.8'\n for os_type in ['linux', 'windows', 'macos']:\n- # Right now CMake builds are failling on Windows (GPU)\n- device_types = ['cpu', 'gpu'] if os_type == 'linux' else ['cpu']\n+ # Skip OSX CUDA\n+ device_types = ['cpu', 'gpu'] if os_type != 'macos' else ['cpu']\n for device in device_types:\n job = {\n 'name': f'cmake_{os_type}_{device}',\n@@ -200,7 +200,7 @@\n }\n \n job['cu_version'] = 'cu101' if device == 'gpu' else 'cpu'\n- if device == 'gpu':\n+ if device == 'gpu' and os_type == 'linux':\n job['wheel_docker_image'] = 'pytorch/manylinux-cuda101'\n jobs.append({f'cmake_{os_type}_{device}': job})\n return indent(indentation, jobs)\n", "issue": "Has anyone successfully compiled the C++ API on Windows, including the cuda code?\nI have tried many versions of torchvision, none seems to work for me. (on Windows)\r\n1, The old version 0.5.0 simply does not include cuda code into the build. \r\n2, Then I tried 0.6.0, it has the \"-openmp\" not support error. After a lot of searching and trying, I solved it by deleting one line in the torchvision.vcxproj.\r\n3, 0.6.0 finally gets to compile, but it gives \"unresolved external symbol\" error if i want to use nms_cuda, according to this issue #2139, the cuda impl of operators are not included in torchvision.lib.\r\n4, Then I tried the recent 0.7.0 tag and it gives \"A single input file is required for a non-link phase when an outputfile is specified\" error, this one (#2677) simply no one here knows how to solve it. Other resources suggests it might due to misplaced white space...\r\n\r\nCould you point me to a version that works on Windows? Or simply no support for Windows?\r\n\n\ncc @peterjc123 @nbcsm @guyang3532 @maxluk @gunandrose4u @smartcat2010 @mszhanyi\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nThis script should use a very simple, functional programming style.\nAvoid Jinja macros in favor of native Python functions.\n\nDon't go overboard on code generation; use Python only to generate\ncontent that can't be easily declared statically using CircleCI's YAML API.\n\nData declarations (e.g. the nested loops for defining the configuration matrix)\nshould be at the top of the file for easy updating.\n\nSee this comment for design rationale:\nhttps://github.com/pytorch/vision/pull/1321#issuecomment-531033978\n\"\"\"\n\nimport jinja2\nimport yaml\nimport os.path\n\n\nPYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\"]\n\n\ndef build_workflows(prefix='', filter_branch=None, upload=False, indentation=6, windows_latest_only=False):\n w = []\n for btype in [\"wheel\", \"conda\"]:\n for os_type in [\"linux\", \"macos\", \"win\"]:\n python_versions = PYTHON_VERSIONS\n cu_versions_dict = {\"linux\": [\"cpu\", \"cu92\", \"cu101\", \"cu102\", \"cu110\"],\n \"win\": [\"cpu\", \"cu101\", \"cu102\", \"cu110\"],\n \"macos\": [\"cpu\"]}\n cu_versions = cu_versions_dict[os_type]\n for python_version in python_versions:\n for cu_version in cu_versions:\n for unicode in ([False, True] if btype == \"wheel\" and python_version == \"2.7\" else [False]):\n fb = filter_branch\n if windows_latest_only and os_type == \"win\" and filter_branch is None and \\\n (python_version != python_versions[-1] or\n (cu_version not in [cu_versions[0], cu_versions[-1]])):\n fb = \"master\"\n w += workflow_pair(\n btype, os_type, python_version, cu_version,\n unicode, prefix, upload, filter_branch=fb)\n\n return indent(indentation, w)\n\n\ndef workflow_pair(btype, os_type, python_version, cu_version, unicode, prefix='', upload=False, *, filter_branch=None):\n\n w = []\n unicode_suffix = \"u\" if unicode else \"\"\n base_workflow_name = f\"{prefix}binary_{os_type}_{btype}_py{python_version}{unicode_suffix}_{cu_version}\"\n\n w.append(generate_base_workflow(\n base_workflow_name, python_version, cu_version,\n unicode, os_type, btype, filter_branch=filter_branch))\n\n if upload:\n w.append(generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, filter_branch=filter_branch))\n if filter_branch == 'nightly' and os_type in ['linux', 'win']:\n pydistro = 'pip' if btype == 'wheel' else 'conda'\n w.append(generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type))\n\n return w\n\n\nmanylinux_images = {\n \"cu92\": \"pytorch/manylinux-cuda92\",\n \"cu101\": \"pytorch/manylinux-cuda101\",\n \"cu102\": \"pytorch/manylinux-cuda102\",\n \"cu110\": \"pytorch/manylinux-cuda110\",\n}\n\n\ndef get_manylinux_image(cu_version):\n cu_suffix = \"102\"\n if cu_version.startswith('cu'):\n cu_suffix = cu_version[len('cu'):]\n return f\"pytorch/manylinux-cuda{cu_suffix}\"\n\n\ndef generate_base_workflow(base_workflow_name, python_version, cu_version,\n unicode, os_type, btype, *, filter_branch=None):\n\n d = {\n \"name\": base_workflow_name,\n \"python_version\": python_version,\n \"cu_version\": cu_version,\n }\n\n if os_type != \"win\" and unicode:\n d[\"unicode_abi\"] = '1'\n\n if os_type != \"win\":\n d[\"wheel_docker_image\"] = get_manylinux_image(cu_version)\n\n if filter_branch is not None:\n d[\"filters\"] = {\n \"branches\": {\n \"only\": filter_branch\n },\n \"tags\": {\n # Using a raw string here to avoid having to escape\n # anything\n \"only\": r\"/v[0-9]+(\\.[0-9]+)*-rc[0-9]+/\"\n }\n }\n\n w = f\"binary_{os_type}_{btype}\"\n return {w: d}\n\n\ndef gen_filter_branch_tree(*branches):\n return {\"branches\": {\"only\": [b for b in branches]}}\n\n\ndef generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, *, filter_branch=None):\n d = {\n \"name\": f\"{base_workflow_name}_upload\",\n \"context\": \"org-member\",\n \"requires\": [base_workflow_name],\n }\n\n if btype == 'wheel':\n d[\"subfolder\"] = \"\" if os_type == 'macos' else cu_version + \"/\"\n\n if filter_branch is not None:\n d[\"filters\"] = {\n \"branches\": {\n \"only\": filter_branch\n },\n \"tags\": {\n # Using a raw string here to avoid having to escape\n # anything\n \"only\": r\"/v[0-9]+(\\.[0-9]+)*-rc[0-9]+/\"\n }\n }\n\n return {f\"binary_{btype}_upload\": d}\n\n\ndef generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type):\n\n required_build_suffix = \"_upload\"\n required_build_name = base_workflow_name + required_build_suffix\n\n smoke_suffix = f\"smoke_test_{pydistro}\"\n d = {\n \"name\": f\"{base_workflow_name}_{smoke_suffix}\",\n \"requires\": [required_build_name],\n \"python_version\": python_version,\n }\n\n if filter_branch:\n d[\"filters\"] = gen_filter_branch_tree(filter_branch)\n\n return {\"smoke_test_{os_type}_{pydistro}\".format(os_type=os_type, pydistro=pydistro): d}\n\n\ndef indent(indentation, data_list):\n return (\"\\n\" + \" \" * indentation).join(\n yaml.dump(data_list, default_flow_style=False).splitlines())\n\n\ndef unittest_workflows(indentation=6):\n jobs = []\n for os_type in [\"linux\", \"windows\", \"macos\"]:\n for device_type in [\"cpu\", \"gpu\"]:\n if os_type == \"macos\" and device_type == \"gpu\":\n continue\n for i, python_version in enumerate(PYTHON_VERSIONS):\n job = {\n \"name\": f\"unittest_{os_type}_{device_type}_py{python_version}\",\n \"python_version\": python_version,\n }\n\n if device_type == 'gpu':\n if python_version != \"3.8\":\n job['filters'] = gen_filter_branch_tree('master', 'nightly')\n job['cu_version'] = 'cu101'\n else:\n job['cu_version'] = 'cpu'\n\n jobs.append({f\"unittest_{os_type}_{device_type}\": job})\n\n return indent(indentation, jobs)\n\n\ndef cmake_workflows(indentation=6):\n jobs = []\n python_version = '3.8'\n for os_type in ['linux', 'windows', 'macos']:\n # Right now CMake builds are failling on Windows (GPU)\n device_types = ['cpu', 'gpu'] if os_type == 'linux' else ['cpu']\n for device in device_types:\n job = {\n 'name': f'cmake_{os_type}_{device}',\n 'python_version': python_version\n }\n\n job['cu_version'] = 'cu101' if device == 'gpu' else 'cpu'\n if device == 'gpu':\n job['wheel_docker_image'] = 'pytorch/manylinux-cuda101'\n jobs.append({f'cmake_{os_type}_{device}': job})\n return indent(indentation, jobs)\n\n\nif __name__ == \"__main__\":\n d = os.path.dirname(__file__)\n env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(d),\n lstrip_blocks=True,\n autoescape=False,\n )\n\n with open(os.path.join(d, 'config.yml'), 'w') as f:\n f.write(env.get_template('config.yml.in').render(\n build_workflows=build_workflows,\n unittest_workflows=unittest_workflows,\n cmake_workflows=cmake_workflows,\n ))\n", "path": ".circleci/regenerate.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nThis script should use a very simple, functional programming style.\nAvoid Jinja macros in favor of native Python functions.\n\nDon't go overboard on code generation; use Python only to generate\ncontent that can't be easily declared statically using CircleCI's YAML API.\n\nData declarations (e.g. the nested loops for defining the configuration matrix)\nshould be at the top of the file for easy updating.\n\nSee this comment for design rationale:\nhttps://github.com/pytorch/vision/pull/1321#issuecomment-531033978\n\"\"\"\n\nimport jinja2\nimport yaml\nimport os.path\n\n\nPYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\"]\n\n\ndef build_workflows(prefix='', filter_branch=None, upload=False, indentation=6, windows_latest_only=False):\n w = []\n for btype in [\"wheel\", \"conda\"]:\n for os_type in [\"linux\", \"macos\", \"win\"]:\n python_versions = PYTHON_VERSIONS\n cu_versions_dict = {\"linux\": [\"cpu\", \"cu92\", \"cu101\", \"cu102\", \"cu110\"],\n \"win\": [\"cpu\", \"cu101\", \"cu102\", \"cu110\"],\n \"macos\": [\"cpu\"]}\n cu_versions = cu_versions_dict[os_type]\n for python_version in python_versions:\n for cu_version in cu_versions:\n for unicode in ([False, True] if btype == \"wheel\" and python_version == \"2.7\" else [False]):\n fb = filter_branch\n if windows_latest_only and os_type == \"win\" and filter_branch is None and \\\n (python_version != python_versions[-1] or\n (cu_version not in [cu_versions[0], cu_versions[-1]])):\n fb = \"master\"\n w += workflow_pair(\n btype, os_type, python_version, cu_version,\n unicode, prefix, upload, filter_branch=fb)\n\n return indent(indentation, w)\n\n\ndef workflow_pair(btype, os_type, python_version, cu_version, unicode, prefix='', upload=False, *, filter_branch=None):\n\n w = []\n unicode_suffix = \"u\" if unicode else \"\"\n base_workflow_name = f\"{prefix}binary_{os_type}_{btype}_py{python_version}{unicode_suffix}_{cu_version}\"\n\n w.append(generate_base_workflow(\n base_workflow_name, python_version, cu_version,\n unicode, os_type, btype, filter_branch=filter_branch))\n\n if upload:\n w.append(generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, filter_branch=filter_branch))\n if filter_branch == 'nightly' and os_type in ['linux', 'win']:\n pydistro = 'pip' if btype == 'wheel' else 'conda'\n w.append(generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type))\n\n return w\n\n\nmanylinux_images = {\n \"cu92\": \"pytorch/manylinux-cuda92\",\n \"cu101\": \"pytorch/manylinux-cuda101\",\n \"cu102\": \"pytorch/manylinux-cuda102\",\n \"cu110\": \"pytorch/manylinux-cuda110\",\n}\n\n\ndef get_manylinux_image(cu_version):\n cu_suffix = \"102\"\n if cu_version.startswith('cu'):\n cu_suffix = cu_version[len('cu'):]\n return f\"pytorch/manylinux-cuda{cu_suffix}\"\n\n\ndef generate_base_workflow(base_workflow_name, python_version, cu_version,\n unicode, os_type, btype, *, filter_branch=None):\n\n d = {\n \"name\": base_workflow_name,\n \"python_version\": python_version,\n \"cu_version\": cu_version,\n }\n\n if os_type != \"win\" and unicode:\n d[\"unicode_abi\"] = '1'\n\n if os_type != \"win\":\n d[\"wheel_docker_image\"] = get_manylinux_image(cu_version)\n\n if filter_branch is not None:\n d[\"filters\"] = {\n \"branches\": {\n \"only\": filter_branch\n },\n \"tags\": {\n # Using a raw string here to avoid having to escape\n # anything\n \"only\": r\"/v[0-9]+(\\.[0-9]+)*-rc[0-9]+/\"\n }\n }\n\n w = f\"binary_{os_type}_{btype}\"\n return {w: d}\n\n\ndef gen_filter_branch_tree(*branches):\n return {\"branches\": {\"only\": [b for b in branches]}}\n\n\ndef generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, *, filter_branch=None):\n d = {\n \"name\": f\"{base_workflow_name}_upload\",\n \"context\": \"org-member\",\n \"requires\": [base_workflow_name],\n }\n\n if btype == 'wheel':\n d[\"subfolder\"] = \"\" if os_type == 'macos' else cu_version + \"/\"\n\n if filter_branch is not None:\n d[\"filters\"] = {\n \"branches\": {\n \"only\": filter_branch\n },\n \"tags\": {\n # Using a raw string here to avoid having to escape\n # anything\n \"only\": r\"/v[0-9]+(\\.[0-9]+)*-rc[0-9]+/\"\n }\n }\n\n return {f\"binary_{btype}_upload\": d}\n\n\ndef generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type):\n\n required_build_suffix = \"_upload\"\n required_build_name = base_workflow_name + required_build_suffix\n\n smoke_suffix = f\"smoke_test_{pydistro}\"\n d = {\n \"name\": f\"{base_workflow_name}_{smoke_suffix}\",\n \"requires\": [required_build_name],\n \"python_version\": python_version,\n }\n\n if filter_branch:\n d[\"filters\"] = gen_filter_branch_tree(filter_branch)\n\n return {\"smoke_test_{os_type}_{pydistro}\".format(os_type=os_type, pydistro=pydistro): d}\n\n\ndef indent(indentation, data_list):\n return (\"\\n\" + \" \" * indentation).join(\n yaml.dump(data_list, default_flow_style=False).splitlines())\n\n\ndef unittest_workflows(indentation=6):\n jobs = []\n for os_type in [\"linux\", \"windows\", \"macos\"]:\n for device_type in [\"cpu\", \"gpu\"]:\n if os_type == \"macos\" and device_type == \"gpu\":\n continue\n for i, python_version in enumerate(PYTHON_VERSIONS):\n job = {\n \"name\": f\"unittest_{os_type}_{device_type}_py{python_version}\",\n \"python_version\": python_version,\n }\n\n if device_type == 'gpu':\n if python_version != \"3.8\":\n job['filters'] = gen_filter_branch_tree('master', 'nightly')\n job['cu_version'] = 'cu101'\n else:\n job['cu_version'] = 'cpu'\n\n jobs.append({f\"unittest_{os_type}_{device_type}\": job})\n\n return indent(indentation, jobs)\n\n\ndef cmake_workflows(indentation=6):\n jobs = []\n python_version = '3.8'\n for os_type in ['linux', 'windows', 'macos']:\n # Skip OSX CUDA\n device_types = ['cpu', 'gpu'] if os_type != 'macos' else ['cpu']\n for device in device_types:\n job = {\n 'name': f'cmake_{os_type}_{device}',\n 'python_version': python_version\n }\n\n job['cu_version'] = 'cu101' if device == 'gpu' else 'cpu'\n if device == 'gpu' and os_type == 'linux':\n job['wheel_docker_image'] = 'pytorch/manylinux-cuda101'\n jobs.append({f'cmake_{os_type}_{device}': job})\n return indent(indentation, jobs)\n\n\nif __name__ == \"__main__\":\n d = os.path.dirname(__file__)\n env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(d),\n lstrip_blocks=True,\n autoescape=False,\n )\n\n with open(os.path.join(d, 'config.yml'), 'w') as f:\n f.write(env.get_template('config.yml.in').render(\n build_workflows=build_workflows,\n unittest_workflows=unittest_workflows,\n cmake_workflows=cmake_workflows,\n ))\n", "path": ".circleci/regenerate.py"}]}
| 3,009 | 280 |
gh_patches_debug_47845
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-550
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
About page requires login
**Describe the bug**
Accessing the "About this server" link (https://bookwyrm.social/about) redirects to login
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://bookwyrm.social/about
2. redirected to login instead of seeing an about page (the URL is login/?next=/about)
**Expected behavior**
Access to information about this site / server
**Desktop (please complete the following information):**
- OS: linux
- Browser firefox
- Version 85 (developer edition)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/landing.py`
Content:
```
1 ''' non-interactive pages '''
2 from django.contrib.auth.decorators import login_required
3 from django.core.paginator import Paginator
4 from django.db.models import Avg, Max
5 from django.template.response import TemplateResponse
6 from django.utils import timezone
7 from django.utils.decorators import method_decorator
8 from django.views import View
9
10 from bookwyrm import forms, models
11 from bookwyrm.settings import PAGE_LENGTH
12 from .helpers import get_activity_feed
13
14
15 # pylint: disable= no-self-use
16 @method_decorator(login_required, name='dispatch')
17 class About(View):
18 ''' create invites '''
19 def get(self, request):
20 ''' more information about the instance '''
21 data = {
22 'title': 'About',
23 }
24 return TemplateResponse(request, 'about.html', data)
25
26 class Home(View):
27 ''' discover page or home feed depending on auth '''
28 def get(self, request):
29 ''' this is the same as the feed on the home tab '''
30 if request.user.is_authenticated:
31 feed_view = Feed.as_view()
32 return feed_view(request, 'home')
33 discover_view = Discover.as_view()
34 return discover_view(request)
35
36 class Discover(View):
37 ''' preview of recently reviewed books '''
38 def get(self, request):
39 ''' tiled book activity page '''
40 books = models.Edition.objects.filter(
41 review__published_date__isnull=False,
42 review__user__local=True,
43 review__privacy__in=['public', 'unlisted'],
44 ).exclude(
45 cover__exact=''
46 ).annotate(
47 Max('review__published_date')
48 ).order_by('-review__published_date__max')[:6]
49
50 ratings = {}
51 for book in books:
52 reviews = models.Review.objects.filter(
53 book__in=book.parent_work.editions.all()
54 )
55 reviews = get_activity_feed(
56 request.user, ['public', 'unlisted'], queryset=reviews)
57 ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']
58 data = {
59 'title': 'Discover',
60 'register_form': forms.RegisterForm(),
61 'books': list(set(books)),
62 'ratings': ratings
63 }
64 return TemplateResponse(request, 'discover.html', data)
65
66
67 @method_decorator(login_required, name='dispatch')
68 class Feed(View):
69 ''' activity stream '''
70 def get(self, request, tab):
71 ''' user's homepage with activity feed '''
72 try:
73 page = int(request.GET.get('page', 1))
74 except ValueError:
75 page = 1
76
77 suggested_books = get_suggested_books(request.user)
78
79 if tab == 'home':
80 activities = get_activity_feed(
81 request.user, ['public', 'unlisted', 'followers'],
82 following_only=True)
83 elif tab == 'local':
84 activities = get_activity_feed(
85 request.user, ['public', 'followers'], local_only=True)
86 else:
87 activities = get_activity_feed(
88 request.user, ['public', 'followers'])
89 paginated = Paginator(activities, PAGE_LENGTH)
90
91 goal = models.AnnualGoal.objects.filter(
92 user=request.user, year=timezone.now().year
93 ).first()
94 data = {
95 'title': 'Updates Feed',
96 'user': request.user,
97 'suggested_books': suggested_books,
98 'activities': paginated.page(page),
99 'tab': tab,
100 'goal': goal,
101 'goal_form': forms.GoalForm(),
102 }
103 return TemplateResponse(request, 'feed.html', data)
104
105
106 def get_suggested_books(user, max_books=5):
107 ''' helper to get a user's recent books '''
108 book_count = 0
109 preset_shelves = [
110 ('reading', max_books), ('read', 2), ('to-read', max_books)
111 ]
112 suggested_books = []
113 for (preset, shelf_max) in preset_shelves:
114 limit = shelf_max if shelf_max < (max_books - book_count) \
115 else max_books - book_count
116 shelf = user.shelf_set.get(identifier=preset)
117
118 shelf_books = shelf.shelfbook_set.order_by(
119 '-updated_date'
120 ).all()[:limit]
121 if not shelf_books:
122 continue
123 shelf_preview = {
124 'name': shelf.name,
125 'books': [s.book for s in shelf_books]
126 }
127 suggested_books.append(shelf_preview)
128 book_count += len(shelf_preview['books'])
129 return suggested_books
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/views/landing.py b/bookwyrm/views/landing.py
--- a/bookwyrm/views/landing.py
+++ b/bookwyrm/views/landing.py
@@ -13,7 +13,6 @@
# pylint: disable= no-self-use
-@method_decorator(login_required, name='dispatch')
class About(View):
''' create invites '''
def get(self, request):
|
{"golden_diff": "diff --git a/bookwyrm/views/landing.py b/bookwyrm/views/landing.py\n--- a/bookwyrm/views/landing.py\n+++ b/bookwyrm/views/landing.py\n@@ -13,7 +13,6 @@\n \n \n # pylint: disable= no-self-use\n-@method_decorator(login_required, name='dispatch')\n class About(View):\n ''' create invites '''\n def get(self, request):\n", "issue": "About page requires login\n**Describe the bug**\r\nAccessing the \"About this server\" link (https://bookwyrm.social/about) redirects to login\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to https://bookwyrm.social/about\r\n2. redirected to login instead of seeing an about page (the URL is login/?next=/about)\r\n\r\n**Expected behavior**\r\nAccess to information about this site / server\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: linux\r\n - Browser firefox\r\n - Version 85 (developer edition)\r\n\n", "before_files": [{"content": "''' non-interactive pages '''\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Avg, Max\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom .helpers import get_activity_feed\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass About(View):\n ''' create invites '''\n def get(self, request):\n ''' more information about the instance '''\n data = {\n 'title': 'About',\n }\n return TemplateResponse(request, 'about.html', data)\n\nclass Home(View):\n ''' discover page or home feed depending on auth '''\n def get(self, request):\n ''' this is the same as the feed on the home tab '''\n if request.user.is_authenticated:\n feed_view = Feed.as_view()\n return feed_view(request, 'home')\n discover_view = Discover.as_view()\n return discover_view(request)\n\nclass Discover(View):\n ''' preview of recently reviewed books '''\n def get(self, request):\n ''' tiled book activity page '''\n books = models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__user__local=True,\n review__privacy__in=['public', 'unlisted'],\n ).exclude(\n cover__exact=''\n ).annotate(\n Max('review__published_date')\n ).order_by('-review__published_date__max')[:6]\n\n ratings = {}\n for book in books:\n reviews = models.Review.objects.filter(\n book__in=book.parent_work.editions.all()\n )\n reviews = get_activity_feed(\n request.user, ['public', 'unlisted'], queryset=reviews)\n ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']\n data = {\n 'title': 'Discover',\n 'register_form': forms.RegisterForm(),\n 'books': list(set(books)),\n 'ratings': ratings\n }\n return TemplateResponse(request, 'discover.html', data)\n\n\n@method_decorator(login_required, name='dispatch')\nclass Feed(View):\n ''' activity stream '''\n def get(self, request, tab):\n ''' user's homepage with activity feed '''\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n\n suggested_books = get_suggested_books(request.user)\n\n if tab == 'home':\n activities = get_activity_feed(\n request.user, ['public', 'unlisted', 'followers'],\n following_only=True)\n elif tab == 'local':\n activities = get_activity_feed(\n request.user, ['public', 'followers'], local_only=True)\n else:\n activities = get_activity_feed(\n request.user, ['public', 'followers'])\n paginated = Paginator(activities, PAGE_LENGTH)\n\n goal = models.AnnualGoal.objects.filter(\n user=request.user, year=timezone.now().year\n ).first()\n data = {\n 'title': 'Updates Feed',\n 'user': request.user,\n 'suggested_books': suggested_books,\n 'activities': paginated.page(page),\n 'tab': tab,\n 'goal': goal,\n 'goal_form': forms.GoalForm(),\n }\n return TemplateResponse(request, 'feed.html', data)\n\n\ndef get_suggested_books(user, max_books=5):\n ''' helper to get a user's recent books '''\n book_count = 0\n preset_shelves = [\n ('reading', max_books), ('read', 2), ('to-read', max_books)\n ]\n suggested_books = []\n for (preset, shelf_max) in preset_shelves:\n limit = shelf_max if shelf_max < (max_books - book_count) \\\n else max_books - book_count\n shelf = user.shelf_set.get(identifier=preset)\n\n shelf_books = shelf.shelfbook_set.order_by(\n '-updated_date'\n ).all()[:limit]\n if not shelf_books:\n continue\n shelf_preview = {\n 'name': shelf.name,\n 'books': [s.book for s in shelf_books]\n }\n suggested_books.append(shelf_preview)\n book_count += len(shelf_preview['books'])\n return suggested_books\n", "path": "bookwyrm/views/landing.py"}], "after_files": [{"content": "''' non-interactive pages '''\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Avg, Max\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom .helpers import get_activity_feed\n\n\n# pylint: disable= no-self-use\nclass About(View):\n ''' create invites '''\n def get(self, request):\n ''' more information about the instance '''\n data = {\n 'title': 'About',\n }\n return TemplateResponse(request, 'about.html', data)\n\nclass Home(View):\n ''' discover page or home feed depending on auth '''\n def get(self, request):\n ''' this is the same as the feed on the home tab '''\n if request.user.is_authenticated:\n feed_view = Feed.as_view()\n return feed_view(request, 'home')\n discover_view = Discover.as_view()\n return discover_view(request)\n\nclass Discover(View):\n ''' preview of recently reviewed books '''\n def get(self, request):\n ''' tiled book activity page '''\n books = models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__user__local=True,\n review__privacy__in=['public', 'unlisted'],\n ).exclude(\n cover__exact=''\n ).annotate(\n Max('review__published_date')\n ).order_by('-review__published_date__max')[:6]\n\n ratings = {}\n for book in books:\n reviews = models.Review.objects.filter(\n book__in=book.parent_work.editions.all()\n )\n reviews = get_activity_feed(\n request.user, ['public', 'unlisted'], queryset=reviews)\n ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']\n data = {\n 'title': 'Discover',\n 'register_form': forms.RegisterForm(),\n 'books': list(set(books)),\n 'ratings': ratings\n }\n return TemplateResponse(request, 'discover.html', data)\n\n\n@method_decorator(login_required, name='dispatch')\nclass Feed(View):\n ''' activity stream '''\n def get(self, request, tab):\n ''' user's homepage with activity feed '''\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n\n suggested_books = get_suggested_books(request.user)\n\n if tab == 'home':\n activities = get_activity_feed(\n request.user, ['public', 'unlisted', 'followers'],\n following_only=True)\n elif tab == 'local':\n activities = get_activity_feed(\n request.user, ['public', 'followers'], local_only=True)\n else:\n activities = get_activity_feed(\n request.user, ['public', 'followers'])\n paginated = Paginator(activities, PAGE_LENGTH)\n\n goal = models.AnnualGoal.objects.filter(\n user=request.user, year=timezone.now().year\n ).first()\n data = {\n 'title': 'Updates Feed',\n 'user': request.user,\n 'suggested_books': suggested_books,\n 'activities': paginated.page(page),\n 'tab': tab,\n 'goal': goal,\n 'goal_form': forms.GoalForm(),\n }\n return TemplateResponse(request, 'feed.html', data)\n\n\ndef get_suggested_books(user, max_books=5):\n ''' helper to get a user's recent books '''\n book_count = 0\n preset_shelves = [\n ('reading', max_books), ('read', 2), ('to-read', max_books)\n ]\n suggested_books = []\n for (preset, shelf_max) in preset_shelves:\n limit = shelf_max if shelf_max < (max_books - book_count) \\\n else max_books - book_count\n shelf = user.shelf_set.get(identifier=preset)\n\n shelf_books = shelf.shelfbook_set.order_by(\n '-updated_date'\n ).all()[:limit]\n if not shelf_books:\n continue\n shelf_preview = {\n 'name': shelf.name,\n 'books': [s.book for s in shelf_books]\n }\n suggested_books.append(shelf_preview)\n book_count += len(shelf_preview['books'])\n return suggested_books\n", "path": "bookwyrm/views/landing.py"}]}
| 1,591 | 89 |
gh_patches_debug_15620
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-451
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Length limit check on Route53 TXT records is two characters short
*cfn-lint version: (`cfn-lint --version`)* master
*Description of issue.*
The length limit check on TXT records takes into account the starting and ending double quote characters, but these aren't counted on the API, so cfn-lint is really restricting to 253 characters rather than 255.
```
$ cat test.yml
Resources:
Example:
Type: AWS::Route53::RecordSet
Properties:
HostedZoneId: abc123
Name: example.com.
Type: TXT
TTL: '14400'
ResourceRecords:
# 255 "a" characters within appropriate quotes
- '"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"'
$ cfn-lint test.yml
E3020 The length of the TXT record (257) exceeds the limit (255)
test.yml:9:7
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/route53/RecordSet.py`
Content:
```
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import re
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20
21 from cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC
22
23 class RecordSet(CloudFormationLintRule):
24 """Check Route53 Recordset Configuration"""
25 id = 'E3020'
26 shortdesc = 'Validate Route53 RecordSets'
27 description = 'Check if all RecordSets are correctly configured'
28 source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'
29 tags = ['resources', 'route53', 'record_set']
30
31 # https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html
32 VALID_RECORD_TYPES = [
33 'A',
34 'AAAA',
35 'CAA',
36 'CNAME',
37 'MX',
38 'NAPTR',
39 'NS',
40 'PTR',
41 'SOA'
42 'SPF',
43 'SRV',
44 'TXT'
45 ]
46
47 REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])(.)$')
48
49 def check_a_record(self, path, recordset):
50 """Check A record Configuration"""
51 matches = []
52
53 resource_records = recordset.get('ResourceRecords')
54 for index, record in enumerate(resource_records):
55
56 if not isinstance(record, dict):
57 tree = path[:] + ['ResourceRecords', index]
58
59 # Check if a valid IPv4 address is specified
60 if not re.match(REGEX_IPV4, record):
61 message = 'A record ({}) is not a valid IPv4 address'
62 matches.append(RuleMatch(tree, message.format(record)))
63
64 return matches
65
66 def check_aaaa_record(self, path, recordset):
67 """Check AAAA record Configuration"""
68 matches = []
69
70 resource_records = recordset.get('ResourceRecords')
71 for index, record in enumerate(resource_records):
72
73 if not isinstance(record, dict):
74 tree = path[:] + ['ResourceRecords', index]
75
76 # Check if a valid IPv4 address is specified
77 if not re.match(REGEX_IPV6, record):
78 message = 'AAAA record ({}) is not a valid IPv6 address'
79 matches.append(RuleMatch(tree, message.format(record)))
80
81 return matches
82
83 def check_caa_record(self, path, recordset):
84 """Check CAA record Configuration"""
85 matches = []
86
87 resource_records = recordset.get('ResourceRecords')
88
89 for index, record in enumerate(resource_records):
90 tree = path[:] + ['ResourceRecords', index]
91
92 if not isinstance(record, dict):
93 # Split the record up to the mandatory settings (flags tag "value")
94 items = record.split(' ', 2)
95
96 # Check if the 3 settings are given.
97 if len(items) != 3:
98 message = 'CAA record must contain 3 settings (flags tag "value"), record contains {} settings.'
99 matches.append(RuleMatch(tree, message.format(len(items))))
100 else:
101 # Check the flag value
102 if not items[0].isdigit():
103 message = 'CAA record flag setting ({}) should be of type Integer.'
104 matches.append(RuleMatch(tree, message.format(items[0])))
105 else:
106 if int(items[0]) not in [0, 128]:
107 message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'
108 matches.append(RuleMatch(tree, message.format(items[0])))
109
110 # Check the tag value
111 if not re.match(REGEX_ALPHANUMERIC, items[1]):
112 message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'
113 matches.append(RuleMatch(tree, message.format(items[0])))
114
115 # Check the value
116 if not items[2].startswith('"') or not items[2].endswith('"'):
117 message = 'CAA record value setting has to be enclosed in double quotation marks (").'
118 matches.append(RuleMatch(tree, message))
119
120 return matches
121
122 def check_cname_record(self, path, recordset):
123 """Check CNAME record Configuration"""
124 matches = []
125
126 resource_records = recordset.get('ResourceRecords')
127 if len(resource_records) > 1:
128 message = 'A CNAME recordset can only contain 1 value'
129 matches.append(RuleMatch(path + ['ResourceRecords'], message))
130 else:
131 for index, record in enumerate(resource_records):
132 if not isinstance(record, dict):
133 tree = path[:] + ['ResourceRecords', index]
134 if (not re.match(self.REGEX_CNAME, record)
135 # ACM Route 53 validation uses invalid CNAMEs starting with `_`,
136 # special-case them rather than complicate the regex.
137 and not record.endswith('.acm-validations.aws.')):
138 message = 'CNAME record ({}) does not contain a valid domain name'
139 matches.append(RuleMatch(tree, message.format(record)))
140
141 return matches
142
143 def check_txt_record(self, path, recordset):
144 """Check TXT record Configuration"""
145 matches = []
146
147 # Check quotation of the records
148 resource_records = recordset.get('ResourceRecords')
149
150 for index, record in enumerate(resource_records):
151 tree = path[:] + ['ResourceRecords', index]
152
153 if not isinstance(record, dict):
154 if not record.startswith('"') or not record.endswith('"'):
155 message = 'TXT record ({}) has to be enclosed in double quotation marks (")'
156 matches.append(RuleMatch(tree, message.format(record)))
157 elif len(record) > 255:
158 message = 'The length of the TXT record ({}) exceeds the limit (255)'
159 matches.append(RuleMatch(tree, message.format(len(record))))
160
161 return matches
162
163 def check_recordset(self, path, recordset):
164 """Check record configuration"""
165
166 matches = []
167 recordset_type = recordset.get('Type')
168
169 # Skip Intrinsic functions
170 if not isinstance(recordset_type, dict):
171 if recordset_type not in self.VALID_RECORD_TYPES:
172 message = 'Invalid record type "{0}" specified'
173 matches.append(RuleMatch(path + ['Type'], message.format(recordset_type)))
174 elif not recordset.get('AliasTarget'):
175 # Record type specific checks
176 if recordset_type == 'A':
177 matches.extend(self.check_a_record(path, recordset))
178 elif recordset_type == 'AAAA':
179 matches.extend(self.check_aaaa_record(path, recordset))
180 elif recordset_type == 'CAA':
181 matches.extend(self.check_caa_record(path, recordset))
182 elif recordset_type == 'CNAME':
183 matches.extend(self.check_cname_record(path, recordset))
184 elif recordset_type == 'TXT':
185 matches.extend(self.check_txt_record(path, recordset))
186
187 return matches
188
189 def match(self, cfn):
190 """Check RecordSets and RecordSetGroups Properties"""
191
192 matches = []
193
194 recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])
195
196 for name, recordset in recordsets.items():
197 path = ['Resources', name, 'Properties']
198
199 if isinstance(recordset, dict):
200 props = recordset.get('Properties')
201 if props:
202 matches.extend(self.check_recordset(path, props))
203
204 recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])
205
206 for recordsetgroup in recordsetgroups:
207 path = recordsetgroup['Path']
208 value = recordsetgroup['Value']
209 if isinstance(value, list):
210 for index, recordset in enumerate(value):
211 tree = path[:] + [index]
212 matches.extend(self.check_recordset(tree, recordset))
213
214 return matches
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/resources/route53/RecordSet.py b/src/cfnlint/rules/resources/route53/RecordSet.py
--- a/src/cfnlint/rules/resources/route53/RecordSet.py
+++ b/src/cfnlint/rules/resources/route53/RecordSet.py
@@ -154,7 +154,7 @@
if not record.startswith('"') or not record.endswith('"'):
message = 'TXT record ({}) has to be enclosed in double quotation marks (")'
matches.append(RuleMatch(tree, message.format(record)))
- elif len(record) > 255:
+ elif len(record) > 257: # 2 extra characters for start and end double quotation marks
message = 'The length of the TXT record ({}) exceeds the limit (255)'
matches.append(RuleMatch(tree, message.format(len(record))))
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/route53/RecordSet.py b/src/cfnlint/rules/resources/route53/RecordSet.py\n--- a/src/cfnlint/rules/resources/route53/RecordSet.py\n+++ b/src/cfnlint/rules/resources/route53/RecordSet.py\n@@ -154,7 +154,7 @@\n if not record.startswith('\"') or not record.endswith('\"'):\n message = 'TXT record ({}) has to be enclosed in double quotation marks (\")'\n matches.append(RuleMatch(tree, message.format(record)))\n- elif len(record) > 255:\n+ elif len(record) > 257: # 2 extra characters for start and end double quotation marks\n message = 'The length of the TXT record ({}) exceeds the limit (255)'\n matches.append(RuleMatch(tree, message.format(len(record))))\n", "issue": "Length limit check on Route53 TXT records is two characters short\n*cfn-lint version: (`cfn-lint --version`)* master\r\n\r\n*Description of issue.*\r\n\r\nThe length limit check on TXT records takes into account the starting and ending double quote characters, but these aren't counted on the API, so cfn-lint is really restricting to 253 characters rather than 255.\r\n\r\n```\r\n$ cat test.yml\r\nResources:\r\n Example:\r\n Type: AWS::Route53::RecordSet\r\n Properties:\r\n HostedZoneId: abc123\r\n Name: example.com.\r\n Type: TXT\r\n TTL: '14400'\r\n ResourceRecords:\r\n # 255 \"a\" characters within appropriate quotes\r\n - '\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"'\r\n$ cfn-lint test.yml\r\nE3020 The length of the TXT record (257) exceeds the limit (255)\r\ntest.yml:9:7\r\n```\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nfrom cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC\n\nclass RecordSet(CloudFormationLintRule):\n \"\"\"Check Route53 Recordset Configuration\"\"\"\n id = 'E3020'\n shortdesc = 'Validate Route53 RecordSets'\n description = 'Check if all RecordSets are correctly configured'\n source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'\n tags = ['resources', 'route53', 'record_set']\n\n # https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html\n VALID_RECORD_TYPES = [\n 'A',\n 'AAAA',\n 'CAA',\n 'CNAME',\n 'MX',\n 'NAPTR',\n 'NS',\n 'PTR',\n 'SOA'\n 'SPF',\n 'SRV',\n 'TXT'\n ]\n\n REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])(.)$')\n\n def check_a_record(self, path, recordset):\n \"\"\"Check A record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV4, record):\n message = 'A record ({}) is not a valid IPv4 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_aaaa_record(self, path, recordset):\n \"\"\"Check AAAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV6, record):\n message = 'AAAA record ({}) is not a valid IPv6 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_caa_record(self, path, recordset):\n \"\"\"Check CAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict):\n # Split the record up to the mandatory settings (flags tag \"value\")\n items = record.split(' ', 2)\n\n # Check if the 3 settings are given.\n if len(items) != 3:\n message = 'CAA record must contain 3 settings (flags tag \"value\"), record contains {} settings.'\n matches.append(RuleMatch(tree, message.format(len(items))))\n else:\n # Check the flag value\n if not items[0].isdigit():\n message = 'CAA record flag setting ({}) should be of type Integer.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n else:\n if int(items[0]) not in [0, 128]:\n message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the tag value\n if not re.match(REGEX_ALPHANUMERIC, items[1]):\n message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the value\n if not items[2].startswith('\"') or not items[2].endswith('\"'):\n message = 'CAA record value setting has to be enclosed in double quotation marks (\").'\n matches.append(RuleMatch(tree, message))\n\n return matches\n\n def check_cname_record(self, path, recordset):\n \"\"\"Check CNAME record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n if len(resource_records) > 1:\n message = 'A CNAME recordset can only contain 1 value'\n matches.append(RuleMatch(path + ['ResourceRecords'], message))\n else:\n for index, record in enumerate(resource_records):\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n if (not re.match(self.REGEX_CNAME, record)\n # ACM Route 53 validation uses invalid CNAMEs starting with `_`,\n # special-case them rather than complicate the regex.\n and not record.endswith('.acm-validations.aws.')):\n message = 'CNAME record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_txt_record(self, path, recordset):\n \"\"\"Check TXT record Configuration\"\"\"\n matches = []\n\n # Check quotation of the records\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict):\n if not record.startswith('\"') or not record.endswith('\"'):\n message = 'TXT record ({}) has to be enclosed in double quotation marks (\")'\n matches.append(RuleMatch(tree, message.format(record)))\n elif len(record) > 255:\n message = 'The length of the TXT record ({}) exceeds the limit (255)'\n matches.append(RuleMatch(tree, message.format(len(record))))\n\n return matches\n\n def check_recordset(self, path, recordset):\n \"\"\"Check record configuration\"\"\"\n\n matches = []\n recordset_type = recordset.get('Type')\n\n # Skip Intrinsic functions\n if not isinstance(recordset_type, dict):\n if recordset_type not in self.VALID_RECORD_TYPES:\n message = 'Invalid record type \"{0}\" specified'\n matches.append(RuleMatch(path + ['Type'], message.format(recordset_type)))\n elif not recordset.get('AliasTarget'):\n # Record type specific checks\n if recordset_type == 'A':\n matches.extend(self.check_a_record(path, recordset))\n elif recordset_type == 'AAAA':\n matches.extend(self.check_aaaa_record(path, recordset))\n elif recordset_type == 'CAA':\n matches.extend(self.check_caa_record(path, recordset))\n elif recordset_type == 'CNAME':\n matches.extend(self.check_cname_record(path, recordset))\n elif recordset_type == 'TXT':\n matches.extend(self.check_txt_record(path, recordset))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check RecordSets and RecordSetGroups Properties\"\"\"\n\n matches = []\n\n recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])\n\n for name, recordset in recordsets.items():\n path = ['Resources', name, 'Properties']\n\n if isinstance(recordset, dict):\n props = recordset.get('Properties')\n if props:\n matches.extend(self.check_recordset(path, props))\n\n recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])\n\n for recordsetgroup in recordsetgroups:\n path = recordsetgroup['Path']\n value = recordsetgroup['Value']\n if isinstance(value, list):\n for index, recordset in enumerate(value):\n tree = path[:] + [index]\n matches.extend(self.check_recordset(tree, recordset))\n\n return matches\n", "path": "src/cfnlint/rules/resources/route53/RecordSet.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nfrom cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC\n\nclass RecordSet(CloudFormationLintRule):\n \"\"\"Check Route53 Recordset Configuration\"\"\"\n id = 'E3020'\n shortdesc = 'Validate Route53 RecordSets'\n description = 'Check if all RecordSets are correctly configured'\n source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'\n tags = ['resources', 'route53', 'record_set']\n\n # https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html\n VALID_RECORD_TYPES = [\n 'A',\n 'AAAA',\n 'CAA',\n 'CNAME',\n 'MX',\n 'NAPTR',\n 'NS',\n 'PTR',\n 'SOA'\n 'SPF',\n 'SRV',\n 'TXT'\n ]\n\n REGEX_CNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])(.)$')\n\n def check_a_record(self, path, recordset):\n \"\"\"Check A record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV4, record):\n message = 'A record ({}) is not a valid IPv4 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_aaaa_record(self, path, recordset):\n \"\"\"Check AAAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n for index, record in enumerate(resource_records):\n\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV6, record):\n message = 'AAAA record ({}) is not a valid IPv6 address'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_caa_record(self, path, recordset):\n \"\"\"Check CAA record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict):\n # Split the record up to the mandatory settings (flags tag \"value\")\n items = record.split(' ', 2)\n\n # Check if the 3 settings are given.\n if len(items) != 3:\n message = 'CAA record must contain 3 settings (flags tag \"value\"), record contains {} settings.'\n matches.append(RuleMatch(tree, message.format(len(items))))\n else:\n # Check the flag value\n if not items[0].isdigit():\n message = 'CAA record flag setting ({}) should be of type Integer.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n else:\n if int(items[0]) not in [0, 128]:\n message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the tag value\n if not re.match(REGEX_ALPHANUMERIC, items[1]):\n message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'\n matches.append(RuleMatch(tree, message.format(items[0])))\n\n # Check the value\n if not items[2].startswith('\"') or not items[2].endswith('\"'):\n message = 'CAA record value setting has to be enclosed in double quotation marks (\").'\n matches.append(RuleMatch(tree, message))\n\n return matches\n\n def check_cname_record(self, path, recordset):\n \"\"\"Check CNAME record Configuration\"\"\"\n matches = []\n\n resource_records = recordset.get('ResourceRecords')\n if len(resource_records) > 1:\n message = 'A CNAME recordset can only contain 1 value'\n matches.append(RuleMatch(path + ['ResourceRecords'], message))\n else:\n for index, record in enumerate(resource_records):\n if not isinstance(record, dict):\n tree = path[:] + ['ResourceRecords', index]\n if (not re.match(self.REGEX_CNAME, record)\n # ACM Route 53 validation uses invalid CNAMEs starting with `_`,\n # special-case them rather than complicate the regex.\n and not record.endswith('.acm-validations.aws.')):\n message = 'CNAME record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(tree, message.format(record)))\n\n return matches\n\n def check_txt_record(self, path, recordset):\n \"\"\"Check TXT record Configuration\"\"\"\n matches = []\n\n # Check quotation of the records\n resource_records = recordset.get('ResourceRecords')\n\n for index, record in enumerate(resource_records):\n tree = path[:] + ['ResourceRecords', index]\n\n if not isinstance(record, dict):\n if not record.startswith('\"') or not record.endswith('\"'):\n message = 'TXT record ({}) has to be enclosed in double quotation marks (\")'\n matches.append(RuleMatch(tree, message.format(record)))\n elif len(record) > 257: # 2 extra characters for start and end double quotation marks\n message = 'The length of the TXT record ({}) exceeds the limit (255)'\n matches.append(RuleMatch(tree, message.format(len(record))))\n\n return matches\n\n def check_recordset(self, path, recordset):\n \"\"\"Check record configuration\"\"\"\n\n matches = []\n recordset_type = recordset.get('Type')\n\n # Skip Intrinsic functions\n if not isinstance(recordset_type, dict):\n if recordset_type not in self.VALID_RECORD_TYPES:\n message = 'Invalid record type \"{0}\" specified'\n matches.append(RuleMatch(path + ['Type'], message.format(recordset_type)))\n elif not recordset.get('AliasTarget'):\n # Record type specific checks\n if recordset_type == 'A':\n matches.extend(self.check_a_record(path, recordset))\n elif recordset_type == 'AAAA':\n matches.extend(self.check_aaaa_record(path, recordset))\n elif recordset_type == 'CAA':\n matches.extend(self.check_caa_record(path, recordset))\n elif recordset_type == 'CNAME':\n matches.extend(self.check_cname_record(path, recordset))\n elif recordset_type == 'TXT':\n matches.extend(self.check_txt_record(path, recordset))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check RecordSets and RecordSetGroups Properties\"\"\"\n\n matches = []\n\n recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])\n\n for name, recordset in recordsets.items():\n path = ['Resources', name, 'Properties']\n\n if isinstance(recordset, dict):\n props = recordset.get('Properties')\n if props:\n matches.extend(self.check_recordset(path, props))\n\n recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])\n\n for recordsetgroup in recordsetgroups:\n path = recordsetgroup['Path']\n value = recordsetgroup['Value']\n if isinstance(value, list):\n for index, recordset in enumerate(value):\n tree = path[:] + [index]\n matches.extend(self.check_recordset(tree, recordset))\n\n return matches\n", "path": "src/cfnlint/rules/resources/route53/RecordSet.py"}]}
| 2,979 | 197 |
gh_patches_debug_16896
|
rasdani/github-patches
|
git_diff
|
webkom__lego-1069
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong penalty count in email
The counter in the penalty email is still wrong:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lego/apps/feed/feed_handlers/penalty_handler.py`
Content:
```
1 from lego.apps.feed.activities import Activity
2 from lego.apps.feed.feed_handlers.base_handler import BaseHandler
3 from lego.apps.feed.feed_manager import feed_manager
4 from lego.apps.feed.feeds.notification_feed import NotificationFeed
5 from lego.apps.feed.registry import register_handler
6 from lego.apps.feed.verbs import PenaltyVerb
7 from lego.apps.users.models import Penalty
8 from lego.apps.users.notifications import PenaltyNotification
9
10
11 class PenaltyHandler(BaseHandler):
12 model = Penalty
13 manager = feed_manager
14
15 def get_activity(self, penalty):
16 return Activity(
17 actor=penalty.source_event, verb=PenaltyVerb, object=penalty, target=penalty.user,
18 time=penalty.created_at, extra_context={
19 'reason': penalty.reason,
20 'weight': penalty.weight,
21 'total': penalty.user.number_of_penalties()
22 }
23 )
24
25 def handle_create(self, penalty):
26 activity = self.get_activity(penalty)
27 self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])
28
29 # Send Notification
30 notification = PenaltyNotification(penalty.user, penalty=penalty)
31 notification.notify()
32
33 def handle_update(self, penalty):
34 activity = self.get_activity(penalty)
35 self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])
36
37 def handle_delete(self, penalty):
38 activity = self.get_activity(penalty)
39 self.manager.remove_activity(activity, [penalty.user.pk], [NotificationFeed])
40
41
42 register_handler(PenaltyHandler)
43
```
Path: `lego/apps/users/notifications.py`
Content:
```
1 from lego.apps.notifications.constants import PENALTY_CREATION
2 from lego.apps.notifications.notification import Notification
3
4
5 class PenaltyNotification(Notification):
6
7 name = PENALTY_CREATION
8
9 def generate_mail(self):
10 penalty = self.kwargs['penalty']
11
12 return self._delay_mail(
13 to_email=self.user.email,
14 context={
15 'name': self.user.full_name,
16 'weight': penalty.weight,
17 'event': penalty.source_event.title,
18 'reason': penalty.reason,
19 'total': self.user.number_of_penalties()
20 },
21 subject=f'Du har fått en ny prikk',
22 plain_template='users/email/penalty.txt',
23 html_template='users/email/penalty.html',
24 )
25
26 def generate_push(self):
27 penalty = self.kwargs['penalty']
28
29 return self._delay_push(
30 template='users/push/penalty.txt', context={
31 'weight': penalty.weight,
32 'event': penalty.source_event.title,
33 }, instance=penalty
34 )
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lego/apps/feed/feed_handlers/penalty_handler.py b/lego/apps/feed/feed_handlers/penalty_handler.py
--- a/lego/apps/feed/feed_handlers/penalty_handler.py
+++ b/lego/apps/feed/feed_handlers/penalty_handler.py
@@ -18,7 +18,6 @@
time=penalty.created_at, extra_context={
'reason': penalty.reason,
'weight': penalty.weight,
- 'total': penalty.user.number_of_penalties()
}
)
diff --git a/lego/apps/users/notifications.py b/lego/apps/users/notifications.py
--- a/lego/apps/users/notifications.py
+++ b/lego/apps/users/notifications.py
@@ -16,7 +16,6 @@
'weight': penalty.weight,
'event': penalty.source_event.title,
'reason': penalty.reason,
- 'total': self.user.number_of_penalties()
},
subject=f'Du har fått en ny prikk',
plain_template='users/email/penalty.txt',
|
{"golden_diff": "diff --git a/lego/apps/feed/feed_handlers/penalty_handler.py b/lego/apps/feed/feed_handlers/penalty_handler.py\n--- a/lego/apps/feed/feed_handlers/penalty_handler.py\n+++ b/lego/apps/feed/feed_handlers/penalty_handler.py\n@@ -18,7 +18,6 @@\n time=penalty.created_at, extra_context={\n 'reason': penalty.reason,\n 'weight': penalty.weight,\n- 'total': penalty.user.number_of_penalties()\n }\n )\n \ndiff --git a/lego/apps/users/notifications.py b/lego/apps/users/notifications.py\n--- a/lego/apps/users/notifications.py\n+++ b/lego/apps/users/notifications.py\n@@ -16,7 +16,6 @@\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n 'reason': penalty.reason,\n- 'total': self.user.number_of_penalties()\n },\n subject=f'Du har f\u00e5tt en ny prikk',\n plain_template='users/email/penalty.txt',\n", "issue": "Wrong penalty count in email\nThe counter in the penalty email is still wrong:\r\n\r\n\r\n\n", "before_files": [{"content": "from lego.apps.feed.activities import Activity\nfrom lego.apps.feed.feed_handlers.base_handler import BaseHandler\nfrom lego.apps.feed.feed_manager import feed_manager\nfrom lego.apps.feed.feeds.notification_feed import NotificationFeed\nfrom lego.apps.feed.registry import register_handler\nfrom lego.apps.feed.verbs import PenaltyVerb\nfrom lego.apps.users.models import Penalty\nfrom lego.apps.users.notifications import PenaltyNotification\n\n\nclass PenaltyHandler(BaseHandler):\n model = Penalty\n manager = feed_manager\n\n def get_activity(self, penalty):\n return Activity(\n actor=penalty.source_event, verb=PenaltyVerb, object=penalty, target=penalty.user,\n time=penalty.created_at, extra_context={\n 'reason': penalty.reason,\n 'weight': penalty.weight,\n 'total': penalty.user.number_of_penalties()\n }\n )\n\n def handle_create(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n # Send Notification\n notification = PenaltyNotification(penalty.user, penalty=penalty)\n notification.notify()\n\n def handle_update(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n def handle_delete(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.remove_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n\nregister_handler(PenaltyHandler)\n", "path": "lego/apps/feed/feed_handlers/penalty_handler.py"}, {"content": "from lego.apps.notifications.constants import PENALTY_CREATION\nfrom lego.apps.notifications.notification import Notification\n\n\nclass PenaltyNotification(Notification):\n\n name = PENALTY_CREATION\n\n def generate_mail(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n 'name': self.user.full_name,\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n 'reason': penalty.reason,\n 'total': self.user.number_of_penalties()\n },\n subject=f'Du har f\u00e5tt en ny prikk',\n plain_template='users/email/penalty.txt',\n html_template='users/email/penalty.html',\n )\n\n def generate_push(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_push(\n template='users/push/penalty.txt', context={\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n }, instance=penalty\n )\n", "path": "lego/apps/users/notifications.py"}], "after_files": [{"content": "from lego.apps.feed.activities import Activity\nfrom lego.apps.feed.feed_handlers.base_handler import BaseHandler\nfrom lego.apps.feed.feed_manager import feed_manager\nfrom lego.apps.feed.feeds.notification_feed import NotificationFeed\nfrom lego.apps.feed.registry import register_handler\nfrom lego.apps.feed.verbs import PenaltyVerb\nfrom lego.apps.users.models import Penalty\nfrom lego.apps.users.notifications import PenaltyNotification\n\n\nclass PenaltyHandler(BaseHandler):\n model = Penalty\n manager = feed_manager\n\n def get_activity(self, penalty):\n return Activity(\n actor=penalty.source_event, verb=PenaltyVerb, object=penalty, target=penalty.user,\n time=penalty.created_at, extra_context={\n 'reason': penalty.reason,\n 'weight': penalty.weight,\n }\n )\n\n def handle_create(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n # Send Notification\n notification = PenaltyNotification(penalty.user, penalty=penalty)\n notification.notify()\n\n def handle_update(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n def handle_delete(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.remove_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n\nregister_handler(PenaltyHandler)\n", "path": "lego/apps/feed/feed_handlers/penalty_handler.py"}, {"content": "from lego.apps.notifications.constants import PENALTY_CREATION\nfrom lego.apps.notifications.notification import Notification\n\n\nclass PenaltyNotification(Notification):\n\n name = PENALTY_CREATION\n\n def generate_mail(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n 'name': self.user.full_name,\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n 'reason': penalty.reason,\n },\n subject=f'Du har f\u00e5tt en ny prikk',\n plain_template='users/email/penalty.txt',\n html_template='users/email/penalty.html',\n )\n\n def generate_push(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_push(\n template='users/push/penalty.txt', context={\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n }, instance=penalty\n )\n", "path": "lego/apps/users/notifications.py"}]}
| 1,064 | 231 |
gh_patches_debug_5569
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-803
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`stages: [commit]` hooks will run with `pre-commit run otherhookid`
minor logic bug, good new-contributor ticket
Easy to reproduce on pre-commit itself:
```diff
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a146bd2..7bb382d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -3,6 +3,7 @@ repos:
rev: v1.2.3
hooks:
- id: trailing-whitespace
+ stages: [commit]
- id: end-of-file-fixer
- id: autopep8-wrapper
- id: check-docstring-first
```
```console
$ pre-commit run end-of-file-fixer --all-files
Trim Trailing Whitespace.................................................Passed
Fix End of Files.........................................................Passed
```
(it should have only run `end-of-file-fixer` but also run `trailing-whitespace` due to a logic error).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/run.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import logging
4 import os
5 import re
6 import subprocess
7 import sys
8
9 from identify.identify import tags_from_path
10
11 from pre_commit import color
12 from pre_commit import git
13 from pre_commit import output
14 from pre_commit.output import get_hook_message
15 from pre_commit.repository import repositories
16 from pre_commit.staged_files_only import staged_files_only
17 from pre_commit.util import cmd_output
18 from pre_commit.util import memoize_by_cwd
19 from pre_commit.util import noop_context
20
21
22 logger = logging.getLogger('pre_commit')
23
24
25 tags_from_path = memoize_by_cwd(tags_from_path)
26
27
28 def _get_skips(environ):
29 skips = environ.get('SKIP', '')
30 return {skip.strip() for skip in skips.split(',') if skip.strip()}
31
32
33 def _hook_msg_start(hook, verbose):
34 return '{}{}'.format(
35 '[{}] '.format(hook['id']) if verbose else '', hook['name'],
36 )
37
38
39 def _filter_by_include_exclude(filenames, include, exclude):
40 include_re, exclude_re = re.compile(include), re.compile(exclude)
41 return [
42 filename for filename in filenames
43 if (
44 include_re.search(filename) and
45 not exclude_re.search(filename) and
46 os.path.lexists(filename)
47 )
48 ]
49
50
51 def _filter_by_types(filenames, types, exclude_types):
52 types, exclude_types = frozenset(types), frozenset(exclude_types)
53 ret = []
54 for filename in filenames:
55 tags = tags_from_path(filename)
56 if tags >= types and not tags & exclude_types:
57 ret.append(filename)
58 return tuple(ret)
59
60
61 SKIPPED = 'Skipped'
62 NO_FILES = '(no files to check)'
63
64
65 def _run_single_hook(filenames, hook, repo, args, skips, cols):
66 include, exclude = hook['files'], hook['exclude']
67 filenames = _filter_by_include_exclude(filenames, include, exclude)
68 types, exclude_types = hook['types'], hook['exclude_types']
69 filenames = _filter_by_types(filenames, types, exclude_types)
70
71 if hook['language'] == 'pcre':
72 logger.warning(
73 '`{}` (from {}) uses the deprecated pcre language.\n'
74 'The pcre language is scheduled for removal in pre-commit 2.x.\n'
75 'The pygrep language is a more portable (and usually drop-in) '
76 'replacement.'.format(hook['id'], repo.repo_config['repo']),
77 )
78
79 if hook['id'] in skips:
80 output.write(get_hook_message(
81 _hook_msg_start(hook, args.verbose),
82 end_msg=SKIPPED,
83 end_color=color.YELLOW,
84 use_color=args.color,
85 cols=cols,
86 ))
87 return 0
88 elif not filenames and not hook['always_run']:
89 output.write(get_hook_message(
90 _hook_msg_start(hook, args.verbose),
91 postfix=NO_FILES,
92 end_msg=SKIPPED,
93 end_color=color.TURQUOISE,
94 use_color=args.color,
95 cols=cols,
96 ))
97 return 0
98
99 # Print the hook and the dots first in case the hook takes hella long to
100 # run.
101 output.write(get_hook_message(
102 _hook_msg_start(hook, args.verbose), end_len=6, cols=cols,
103 ))
104 sys.stdout.flush()
105
106 diff_before = cmd_output(
107 'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,
108 )
109 retcode, stdout, stderr = repo.run_hook(
110 hook, tuple(filenames) if hook['pass_filenames'] else (),
111 )
112 diff_after = cmd_output(
113 'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,
114 )
115
116 file_modifications = diff_before != diff_after
117
118 # If the hook makes changes, fail the commit
119 if file_modifications:
120 retcode = 1
121
122 if retcode:
123 retcode = 1
124 print_color = color.RED
125 pass_fail = 'Failed'
126 else:
127 retcode = 0
128 print_color = color.GREEN
129 pass_fail = 'Passed'
130
131 output.write_line(color.format_color(pass_fail, print_color, args.color))
132
133 if (
134 (stdout or stderr or file_modifications) and
135 (retcode or args.verbose or hook['verbose'])
136 ):
137 output.write_line('hookid: {}\n'.format(hook['id']))
138
139 # Print a message if failing due to file modifications
140 if file_modifications:
141 output.write('Files were modified by this hook.')
142
143 if stdout or stderr:
144 output.write_line(' Additional output:')
145
146 output.write_line()
147
148 for out in (stdout, stderr):
149 assert type(out) is bytes, type(out)
150 if out.strip():
151 output.write_line(out.strip(), logfile_name=hook['log_file'])
152 output.write_line()
153
154 return retcode
155
156
157 def _compute_cols(hooks, verbose):
158 """Compute the number of columns to display hook messages. The widest
159 that will be displayed is in the no files skipped case:
160
161 Hook name...(no files to check) Skipped
162
163 or in the verbose case
164
165 Hook name [hookid]...(no files to check) Skipped
166 """
167 if hooks:
168 name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)
169 else:
170 name_len = 0
171
172 cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)
173 return max(cols, 80)
174
175
176 def _all_filenames(args):
177 if args.origin and args.source:
178 return git.get_changed_files(args.origin, args.source)
179 elif args.hook_stage == 'commit-msg':
180 return (args.commit_msg_filename,)
181 elif args.files:
182 return args.files
183 elif args.all_files:
184 return git.get_all_files()
185 elif git.is_in_merge_conflict():
186 return git.get_conflicted_files()
187 else:
188 return git.get_staged_files()
189
190
191 def _run_hooks(config, repo_hooks, args, environ):
192 """Actually run the hooks."""
193 skips = _get_skips(environ)
194 cols = _compute_cols([hook for _, hook in repo_hooks], args.verbose)
195 filenames = _all_filenames(args)
196 filenames = _filter_by_include_exclude(filenames, '', config['exclude'])
197 retval = 0
198 for repo, hook in repo_hooks:
199 retval |= _run_single_hook(filenames, hook, repo, args, skips, cols)
200 if retval and config['fail_fast']:
201 break
202 if (
203 retval and
204 args.show_diff_on_failure and
205 subprocess.call(('git', 'diff', '--quiet', '--no-ext-diff')) != 0
206 ):
207 output.write_line('All changes made by hooks:')
208 subprocess.call(('git', '--no-pager', 'diff', '--no-ext-diff'))
209 return retval
210
211
212 def _has_unmerged_paths():
213 _, stdout, _ = cmd_output('git', 'ls-files', '--unmerged')
214 return bool(stdout.strip())
215
216
217 def _has_unstaged_config(runner):
218 retcode, _, _ = cmd_output(
219 'git', 'diff', '--no-ext-diff', '--exit-code', runner.config_file_path,
220 retcode=None,
221 )
222 # be explicit, other git errors don't mean it has an unstaged config.
223 return retcode == 1
224
225
226 def run(runner, store, args, environ=os.environ):
227 no_stash = args.all_files or bool(args.files)
228
229 # Check if we have unresolved merge conflict files and fail fast.
230 if _has_unmerged_paths():
231 logger.error('Unmerged files. Resolve before committing.')
232 return 1
233 if bool(args.source) != bool(args.origin):
234 logger.error('Specify both --origin and --source.')
235 return 1
236 if _has_unstaged_config(runner) and not no_stash:
237 logger.error(
238 'Your pre-commit configuration is unstaged.\n'
239 '`git add {}` to fix this.'.format(runner.config_file),
240 )
241 return 1
242
243 # Expose origin / source as environment variables for hooks to consume
244 if args.origin and args.source:
245 environ['PRE_COMMIT_ORIGIN'] = args.origin
246 environ['PRE_COMMIT_SOURCE'] = args.source
247
248 if no_stash:
249 ctx = noop_context()
250 else:
251 ctx = staged_files_only(store.directory)
252
253 with ctx:
254 repo_hooks = []
255 for repo in repositories(runner.config, store):
256 for _, hook in repo.hooks:
257 if (
258 (not args.hook or hook['id'] == args.hook) and
259 not hook['stages'] or args.hook_stage in hook['stages']
260 ):
261 repo_hooks.append((repo, hook))
262
263 if args.hook and not repo_hooks:
264 output.write_line('No hook with id `{}`'.format(args.hook))
265 return 1
266
267 for repo in {repo for repo, _ in repo_hooks}:
268 repo.require_installed()
269
270 return _run_hooks(runner.config, repo_hooks, args, environ)
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -256,7 +256,7 @@
for _, hook in repo.hooks:
if (
(not args.hook or hook['id'] == args.hook) and
- not hook['stages'] or args.hook_stage in hook['stages']
+ (not hook['stages'] or args.hook_stage in hook['stages'])
):
repo_hooks.append((repo, hook))
|
{"golden_diff": "diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py\n--- a/pre_commit/commands/run.py\n+++ b/pre_commit/commands/run.py\n@@ -256,7 +256,7 @@\n for _, hook in repo.hooks:\n if (\n (not args.hook or hook['id'] == args.hook) and\n- not hook['stages'] or args.hook_stage in hook['stages']\n+ (not hook['stages'] or args.hook_stage in hook['stages'])\n ):\n repo_hooks.append((repo, hook))\n", "issue": "`stages: [commit]` hooks will run with `pre-commit run otherhookid`\nminor logic bug, good new-contributor ticket\r\n\r\nEasy to reproduce on pre-commit itself:\r\n\r\n```diff\r\ndiff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml\r\nindex a146bd2..7bb382d 100644\r\n--- a/.pre-commit-config.yaml\r\n+++ b/.pre-commit-config.yaml\r\n@@ -3,6 +3,7 @@ repos:\r\n rev: v1.2.3\r\n hooks:\r\n - id: trailing-whitespace\r\n+ stages: [commit]\r\n - id: end-of-file-fixer\r\n - id: autopep8-wrapper\r\n - id: check-docstring-first\r\n```\r\n\r\n```console\r\n$ pre-commit run end-of-file-fixer --all-files\r\nTrim Trailing Whitespace.................................................Passed\r\nFix End of Files.........................................................Passed\r\n```\r\n\r\n(it should have only run `end-of-file-fixer` but also run `trailing-whitespace` due to a logic error).\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom identify.identify import tags_from_path\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.repository import repositories\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ntags_from_path = memoize_by_cwd(tags_from_path)\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return {skip.strip() for skip in skips.split(',') if skip.strip()}\n\n\ndef _hook_msg_start(hook, verbose):\n return '{}{}'.format(\n '[{}] '.format(hook['id']) if verbose else '', hook['name'],\n )\n\n\ndef _filter_by_include_exclude(filenames, include, exclude):\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n return [\n filename for filename in filenames\n if (\n include_re.search(filename) and\n not exclude_re.search(filename) and\n os.path.lexists(filename)\n )\n ]\n\n\ndef _filter_by_types(filenames, types, exclude_types):\n types, exclude_types = frozenset(types), frozenset(exclude_types)\n ret = []\n for filename in filenames:\n tags = tags_from_path(filename)\n if tags >= types and not tags & exclude_types:\n ret.append(filename)\n return tuple(ret)\n\n\nSKIPPED = 'Skipped'\nNO_FILES = '(no files to check)'\n\n\ndef _run_single_hook(filenames, hook, repo, args, skips, cols):\n include, exclude = hook['files'], hook['exclude']\n filenames = _filter_by_include_exclude(filenames, include, exclude)\n types, exclude_types = hook['types'], hook['exclude_types']\n filenames = _filter_by_types(filenames, types, exclude_types)\n\n if hook['language'] == 'pcre':\n logger.warning(\n '`{}` (from {}) uses the deprecated pcre language.\\n'\n 'The pcre language is scheduled for removal in pre-commit 2.x.\\n'\n 'The pygrep language is a more portable (and usually drop-in) '\n 'replacement.'.format(hook['id'], repo.repo_config['repo']),\n )\n\n if hook['id'] in skips:\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg=SKIPPED,\n end_color=color.YELLOW,\n use_color=args.color,\n cols=cols,\n ))\n return 0\n elif not filenames and not hook['always_run']:\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix=NO_FILES,\n end_msg=SKIPPED,\n end_color=color.TURQUOISE,\n use_color=args.color,\n cols=cols,\n ))\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose), end_len=6, cols=cols,\n ))\n sys.stdout.flush()\n\n diff_before = cmd_output(\n 'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,\n )\n retcode, stdout, stderr = repo.run_hook(\n hook, tuple(filenames) if hook['pass_filenames'] else (),\n )\n diff_after = cmd_output(\n 'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,\n )\n\n file_modifications = diff_before != diff_after\n\n # If the hook makes changes, fail the commit\n if file_modifications:\n retcode = 1\n\n if retcode:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n output.write_line(color.format_color(pass_fail, print_color, args.color))\n\n if (\n (stdout or stderr or file_modifications) and\n (retcode or args.verbose or hook['verbose'])\n ):\n output.write_line('hookid: {}\\n'.format(hook['id']))\n\n # Print a message if failing due to file modifications\n if file_modifications:\n output.write('Files were modified by this hook.')\n\n if stdout or stderr:\n output.write_line(' Additional output:')\n\n output.write_line()\n\n for out in (stdout, stderr):\n assert type(out) is bytes, type(out)\n if out.strip():\n output.write_line(out.strip(), logfile_name=hook['log_file'])\n output.write_line()\n\n return retcode\n\n\ndef _compute_cols(hooks, verbose):\n \"\"\"Compute the number of columns to display hook messages. The widest\n that will be displayed is in the no files skipped case:\n\n Hook name...(no files to check) Skipped\n\n or in the verbose case\n\n Hook name [hookid]...(no files to check) Skipped\n \"\"\"\n if hooks:\n name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)\n else:\n name_len = 0\n\n cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)\n return max(cols, 80)\n\n\ndef _all_filenames(args):\n if args.origin and args.source:\n return git.get_changed_files(args.origin, args.source)\n elif args.hook_stage == 'commit-msg':\n return (args.commit_msg_filename,)\n elif args.files:\n return args.files\n elif args.all_files:\n return git.get_all_files()\n elif git.is_in_merge_conflict():\n return git.get_conflicted_files()\n else:\n return git.get_staged_files()\n\n\ndef _run_hooks(config, repo_hooks, args, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n cols = _compute_cols([hook for _, hook in repo_hooks], args.verbose)\n filenames = _all_filenames(args)\n filenames = _filter_by_include_exclude(filenames, '', config['exclude'])\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(filenames, hook, repo, args, skips, cols)\n if retval and config['fail_fast']:\n break\n if (\n retval and\n args.show_diff_on_failure and\n subprocess.call(('git', 'diff', '--quiet', '--no-ext-diff')) != 0\n ):\n output.write_line('All changes made by hooks:')\n subprocess.call(('git', '--no-pager', 'diff', '--no-ext-diff'))\n return retval\n\n\ndef _has_unmerged_paths():\n _, stdout, _ = cmd_output('git', 'ls-files', '--unmerged')\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = cmd_output(\n 'git', 'diff', '--no-ext-diff', '--exit-code', runner.config_file_path,\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, store, args, environ=os.environ):\n no_stash = args.all_files or bool(args.files)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths():\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not no_stash:\n logger.error(\n 'Your pre-commit configuration is unstaged.\\n'\n '`git add {}` to fix this.'.format(runner.config_file),\n )\n return 1\n\n # Expose origin / source as environment variables for hooks to consume\n if args.origin and args.source:\n environ['PRE_COMMIT_ORIGIN'] = args.origin\n environ['PRE_COMMIT_SOURCE'] = args.source\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(store.directory)\n\n with ctx:\n repo_hooks = []\n for repo in repositories(runner.config, store):\n for _, hook in repo.hooks:\n if (\n (not args.hook or hook['id'] == args.hook) and\n not hook['stages'] or args.hook_stage in hook['stages']\n ):\n repo_hooks.append((repo, hook))\n\n if args.hook and not repo_hooks:\n output.write_line('No hook with id `{}`'.format(args.hook))\n return 1\n\n for repo in {repo for repo, _ in repo_hooks}:\n repo.require_installed()\n\n return _run_hooks(runner.config, repo_hooks, args, environ)\n", "path": "pre_commit/commands/run.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\n\nfrom identify.identify import tags_from_path\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.repository import repositories\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ntags_from_path = memoize_by_cwd(tags_from_path)\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return {skip.strip() for skip in skips.split(',') if skip.strip()}\n\n\ndef _hook_msg_start(hook, verbose):\n return '{}{}'.format(\n '[{}] '.format(hook['id']) if verbose else '', hook['name'],\n )\n\n\ndef _filter_by_include_exclude(filenames, include, exclude):\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n return [\n filename for filename in filenames\n if (\n include_re.search(filename) and\n not exclude_re.search(filename) and\n os.path.lexists(filename)\n )\n ]\n\n\ndef _filter_by_types(filenames, types, exclude_types):\n types, exclude_types = frozenset(types), frozenset(exclude_types)\n ret = []\n for filename in filenames:\n tags = tags_from_path(filename)\n if tags >= types and not tags & exclude_types:\n ret.append(filename)\n return tuple(ret)\n\n\nSKIPPED = 'Skipped'\nNO_FILES = '(no files to check)'\n\n\ndef _run_single_hook(filenames, hook, repo, args, skips, cols):\n include, exclude = hook['files'], hook['exclude']\n filenames = _filter_by_include_exclude(filenames, include, exclude)\n types, exclude_types = hook['types'], hook['exclude_types']\n filenames = _filter_by_types(filenames, types, exclude_types)\n\n if hook['language'] == 'pcre':\n logger.warning(\n '`{}` (from {}) uses the deprecated pcre language.\\n'\n 'The pcre language is scheduled for removal in pre-commit 2.x.\\n'\n 'The pygrep language is a more portable (and usually drop-in) '\n 'replacement.'.format(hook['id'], repo.repo_config['repo']),\n )\n\n if hook['id'] in skips:\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg=SKIPPED,\n end_color=color.YELLOW,\n use_color=args.color,\n cols=cols,\n ))\n return 0\n elif not filenames and not hook['always_run']:\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix=NO_FILES,\n end_msg=SKIPPED,\n end_color=color.TURQUOISE,\n use_color=args.color,\n cols=cols,\n ))\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose), end_len=6, cols=cols,\n ))\n sys.stdout.flush()\n\n diff_before = cmd_output(\n 'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,\n )\n retcode, stdout, stderr = repo.run_hook(\n hook, tuple(filenames) if hook['pass_filenames'] else (),\n )\n diff_after = cmd_output(\n 'git', 'diff', '--no-ext-diff', retcode=None, encoding=None,\n )\n\n file_modifications = diff_before != diff_after\n\n # If the hook makes changes, fail the commit\n if file_modifications:\n retcode = 1\n\n if retcode:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n output.write_line(color.format_color(pass_fail, print_color, args.color))\n\n if (\n (stdout or stderr or file_modifications) and\n (retcode or args.verbose or hook['verbose'])\n ):\n output.write_line('hookid: {}\\n'.format(hook['id']))\n\n # Print a message if failing due to file modifications\n if file_modifications:\n output.write('Files were modified by this hook.')\n\n if stdout or stderr:\n output.write_line(' Additional output:')\n\n output.write_line()\n\n for out in (stdout, stderr):\n assert type(out) is bytes, type(out)\n if out.strip():\n output.write_line(out.strip(), logfile_name=hook['log_file'])\n output.write_line()\n\n return retcode\n\n\ndef _compute_cols(hooks, verbose):\n \"\"\"Compute the number of columns to display hook messages. The widest\n that will be displayed is in the no files skipped case:\n\n Hook name...(no files to check) Skipped\n\n or in the verbose case\n\n Hook name [hookid]...(no files to check) Skipped\n \"\"\"\n if hooks:\n name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)\n else:\n name_len = 0\n\n cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)\n return max(cols, 80)\n\n\ndef _all_filenames(args):\n if args.origin and args.source:\n return git.get_changed_files(args.origin, args.source)\n elif args.hook_stage == 'commit-msg':\n return (args.commit_msg_filename,)\n elif args.files:\n return args.files\n elif args.all_files:\n return git.get_all_files()\n elif git.is_in_merge_conflict():\n return git.get_conflicted_files()\n else:\n return git.get_staged_files()\n\n\ndef _run_hooks(config, repo_hooks, args, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n cols = _compute_cols([hook for _, hook in repo_hooks], args.verbose)\n filenames = _all_filenames(args)\n filenames = _filter_by_include_exclude(filenames, '', config['exclude'])\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(filenames, hook, repo, args, skips, cols)\n if retval and config['fail_fast']:\n break\n if (\n retval and\n args.show_diff_on_failure and\n subprocess.call(('git', 'diff', '--quiet', '--no-ext-diff')) != 0\n ):\n output.write_line('All changes made by hooks:')\n subprocess.call(('git', '--no-pager', 'diff', '--no-ext-diff'))\n return retval\n\n\ndef _has_unmerged_paths():\n _, stdout, _ = cmd_output('git', 'ls-files', '--unmerged')\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = cmd_output(\n 'git', 'diff', '--no-ext-diff', '--exit-code', runner.config_file_path,\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, store, args, environ=os.environ):\n no_stash = args.all_files or bool(args.files)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths():\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not no_stash:\n logger.error(\n 'Your pre-commit configuration is unstaged.\\n'\n '`git add {}` to fix this.'.format(runner.config_file),\n )\n return 1\n\n # Expose origin / source as environment variables for hooks to consume\n if args.origin and args.source:\n environ['PRE_COMMIT_ORIGIN'] = args.origin\n environ['PRE_COMMIT_SOURCE'] = args.source\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(store.directory)\n\n with ctx:\n repo_hooks = []\n for repo in repositories(runner.config, store):\n for _, hook in repo.hooks:\n if (\n (not args.hook or hook['id'] == args.hook) and\n (not hook['stages'] or args.hook_stage in hook['stages'])\n ):\n repo_hooks.append((repo, hook))\n\n if args.hook and not repo_hooks:\n output.write_line('No hook with id `{}`'.format(args.hook))\n return 1\n\n for repo in {repo for repo, _ in repo_hooks}:\n repo.require_installed()\n\n return _run_hooks(runner.config, repo_hooks, args, environ)\n", "path": "pre_commit/commands/run.py"}]}
| 3,191 | 130 |
gh_patches_debug_24988
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-1681
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deleting a Careeropportunity in the dashboard does not actually delete
When trying to delete a career opportunity in the dashboard, it does not actually delete it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/careeropportunity/dashboard/views.py`
Content:
```
1 # -*- encoding: utf-8 -*-
2 from django.contrib import messages
3 from django.contrib.auth.decorators import login_required
4 from django.core.exceptions import PermissionDenied
5 from django.shortcuts import get_object_or_404, redirect, render
6 from django.utils import timezone
7 from guardian.decorators import permission_required
8
9 from apps.careeropportunity.forms import AddCareerOpportunityForm
10 from apps.careeropportunity.models import CareerOpportunity
11 from apps.dashboard.tools import get_base_context, has_access
12
13
14 @login_required
15 @permission_required('careeropportunity.view_careeropportunity', return_403=True)
16 def index(request):
17
18 if not has_access(request):
19 raise PermissionDenied
20
21 context = get_base_context(request)
22
23 # "cops" is short for "careeropportunities" which is a fucking long word
24 # "cop" is short for "careeropportunity" which also is a fucking long word
25 cops = CareerOpportunity.objects.all()
26 context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')
27 context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')
28
29 return render(request, 'careeropportunity/dashboard/index.html', context)
30
31
32 @login_required
33 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
34 def detail(request, opportunity_id=None):
35
36 if not has_access(request):
37 raise PermissionDenied
38
39 context = get_base_context(request)
40 cop = None
41 if opportunity_id:
42 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
43 context['cop'] = cop
44 context['form'] = AddCareerOpportunityForm(instance=cop)
45 else:
46 context['form'] = AddCareerOpportunityForm()
47
48 if request.method == 'POST':
49 if cop:
50 form = AddCareerOpportunityForm(data=request.POST, instance=cop)
51 else:
52 form = AddCareerOpportunityForm(data=request.POST)
53
54 if form.is_valid():
55 form.save()
56 messages.success(request, 'La til ny karrieremulighet')
57 return redirect(index)
58 else:
59 context['form'] = form
60 messages.error(request,
61 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for å se hva som gikk galt.')
62
63 return render(request, 'careeropportunity/dashboard/detail.html', context)
64
65
66 @login_required
67 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
68 def delete(request, opportunity_id=None):
69 if not has_access(request):
70 raise PermissionDenied
71
72 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
73 cop.delete()
74 messages.success(request, 'Slettet karrieremuligheten')
75 return redirect(index)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py
--- a/apps/careeropportunity/dashboard/views.py
+++ b/apps/careeropportunity/dashboard/views.py
@@ -1,4 +1,6 @@
# -*- encoding: utf-8 -*-
+import logging
+
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
@@ -32,6 +34,8 @@
@login_required
@permission_required('careeropportunity.change_careeropportunity', return_403=True)
def detail(request, opportunity_id=None):
+ logger = logging.getLogger(__name__)
+ logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))
if not has_access(request):
raise PermissionDenied
@@ -66,6 +70,8 @@
@login_required
@permission_required('careeropportunity.change_careeropportunity', return_403=True)
def delete(request, opportunity_id=None):
+ logger = logging.getLogger(__name__)
+ logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))
if not has_access(request):
raise PermissionDenied
|
{"golden_diff": "diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py\n--- a/apps/careeropportunity/dashboard/views.py\n+++ b/apps/careeropportunity/dashboard/views.py\n@@ -1,4 +1,6 @@\n # -*- encoding: utf-8 -*-\n+import logging\n+\n from django.contrib import messages\n from django.contrib.auth.decorators import login_required\n from django.core.exceptions import PermissionDenied\n@@ -32,6 +34,8 @@\n @login_required\n @permission_required('careeropportunity.change_careeropportunity', return_403=True)\n def detail(request, opportunity_id=None):\n+ logger = logging.getLogger(__name__)\n+ logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))\n \n if not has_access(request):\n raise PermissionDenied\n@@ -66,6 +70,8 @@\n @login_required\n @permission_required('careeropportunity.change_careeropportunity', return_403=True)\n def delete(request, opportunity_id=None):\n+ logger = logging.getLogger(__name__)\n+ logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))\n if not has_access(request):\n raise PermissionDenied\n", "issue": "Deleting a Careeropportunity in the dashboard does not actually delete\nWhen trying to delete a career opportunity in the dashboard, it does not actually delete it.\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom guardian.decorators import permission_required\n\nfrom apps.careeropportunity.forms import AddCareerOpportunityForm\nfrom apps.careeropportunity.models import CareerOpportunity\nfrom apps.dashboard.tools import get_base_context, has_access\n\n\n@login_required\n@permission_required('careeropportunity.view_careeropportunity', return_403=True)\ndef index(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n # \"cops\" is short for \"careeropportunities\" which is a fucking long word\n # \"cop\" is short for \"careeropportunity\" which also is a fucking long word\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n\n return render(request, 'careeropportunity/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef detail(request, opportunity_id=None):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n cop = None\n if opportunity_id:\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n context['cop'] = cop\n context['form'] = AddCareerOpportunityForm(instance=cop)\n else:\n context['form'] = AddCareerOpportunityForm()\n\n if request.method == 'POST':\n if cop:\n form = AddCareerOpportunityForm(data=request.POST, instance=cop)\n else:\n form = AddCareerOpportunityForm(data=request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'La til ny karrieremulighet')\n return redirect(index)\n else:\n context['form'] = form\n messages.error(request,\n 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for \u00e5 se hva som gikk galt.')\n\n return render(request, 'careeropportunity/dashboard/detail.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef delete(request, opportunity_id=None):\n if not has_access(request):\n raise PermissionDenied\n\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n cop.delete()\n messages.success(request, 'Slettet karrieremuligheten')\n return redirect(index)\n", "path": "apps/careeropportunity/dashboard/views.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom guardian.decorators import permission_required\n\nfrom apps.careeropportunity.forms import AddCareerOpportunityForm\nfrom apps.careeropportunity.models import CareerOpportunity\nfrom apps.dashboard.tools import get_base_context, has_access\n\n\n@login_required\n@permission_required('careeropportunity.view_careeropportunity', return_403=True)\ndef index(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n # \"cops\" is short for \"careeropportunities\" which is a fucking long word\n # \"cop\" is short for \"careeropportunity\" which also is a fucking long word\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n\n return render(request, 'careeropportunity/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef detail(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n cop = None\n if opportunity_id:\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n context['cop'] = cop\n context['form'] = AddCareerOpportunityForm(instance=cop)\n else:\n context['form'] = AddCareerOpportunityForm()\n\n if request.method == 'POST':\n if cop:\n form = AddCareerOpportunityForm(data=request.POST, instance=cop)\n else:\n form = AddCareerOpportunityForm(data=request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'La til ny karrieremulighet')\n return redirect(index)\n else:\n context['form'] = form\n messages.error(request,\n 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for \u00e5 se hva som gikk galt.')\n\n return render(request, 'careeropportunity/dashboard/detail.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef delete(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))\n if not has_access(request):\n raise PermissionDenied\n\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n cop.delete()\n messages.success(request, 'Slettet karrieremuligheten')\n return redirect(index)\n", "path": "apps/careeropportunity/dashboard/views.py"}]}
| 1,060 | 269 |
gh_patches_debug_13184
|
rasdani/github-patches
|
git_diff
|
huggingface__huggingface_hub-1218
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'NoneType' object has no attribute 'split'
### Describe the bug
When I try to log into my account with the token via `huggingface-cli login`, I get this error:
```
Exception in thread Thread-1 (_readerthread):
Traceback (most recent call last):
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\threading.py", line 1009, in _bootstrap_inner
self.run()
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\threading.py", line 946, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\subprocess.py", line 1494, in _readerthread
buffer.append(fh.read())
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xee in position 387: invalid continuation byte
Traceback (most recent call last):
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\Scripts\huggingface-cli.exe\__main__.py", line 7, in <module>
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\site-packages\huggingface_hub\commands\huggingface_cli.py", line 47, in main
service.run()
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\site-packages\huggingface_hub\commands\user.py", line 117, in run
login()
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\site-packages\huggingface_hub\_login.py", line 91, in login
interpreter_login()
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\site-packages\huggingface_hub\_login.py", line 137, in interpreter_login
_login(token=token, add_to_git_credential=add_to_git_credential)
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\site-packages\huggingface_hub\_login.py", line 231, in _login
if _is_git_credential_helper_configured():
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\site-packages\huggingface_hub\_login.py", line 251, in _is_git_credential_helper_configured
helpers = list_credential_helpers()
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\site-packages\huggingface_hub\utils\_git_credential.py", line 44, in list_credential_helpers
for line in output.split("\n")
AttributeError: 'NoneType' object has no attribute 'split'
```
Also, when using `huggingface-cli env`, I got this error. I dunno if it's related, but here it is anyway:
```
Exception in thread Thread-1 (_readerthread):
Traceback (most recent call last):
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\threading.py", line 1009, in _bootstrap_inner
self.run()
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\threading.py", line 946, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\subprocess.py", line 1494, in _readerthread
buffer.append(fh.read())
File "C:\Users\mikwee\AppData\Local\Programs\Python\Python310\lib\codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xee in position 387: invalid continuation byte
```
### Reproduction
The commands I've used:
```
git clone https://github.com/harishanand95/diffusers.git
cd diffusers && git checkout dml && pip install -e .
pip install transformers ftfy scipy
pip install ort_nightly_directml-1.13.0.dev20220901005-cp310-cp310-win_amd64.whl
cd ./diffusers/examples/inference
huggingface-cli login
```
### Logs
_No response_
### System info
```shell
- huggingface_hub version: 0.11.0
- Platform: Windows-10-10.0.19044-SP0
- Python version: 3.10.4
- Running in iPython ?: No
- Running in notebook ?: No
- Running in Google Colab ?: No
- Token path ?: C:\Users\mikwee\.huggingface\token
- Has saved token ?: False
- FastAI: N/A
- Tensorflow: N/A
- Torch: 1.13.0
- Jinja2: N/A
- Graphviz: N/A
- Pydot: N/A
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/huggingface_hub/utils/_subprocess.py`
Content:
```
1 #!/usr/bin/env python
2 # coding=utf-8
3 # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License
16 """Contains utilities to easily handle subprocesses in `huggingface_hub`."""
17 import os
18 import subprocess
19 from contextlib import contextmanager
20 from pathlib import Path
21 from typing import IO, Generator, List, Optional, Tuple, Union
22
23 from .logging import get_logger
24
25
26 logger = get_logger(__name__)
27
28
29 def run_subprocess(
30 command: Union[str, List[str]],
31 folder: Optional[Union[str, Path]] = None,
32 check=True,
33 **kwargs,
34 ) -> subprocess.CompletedProcess:
35 """
36 Method to run subprocesses. Calling this will capture the `stderr` and `stdout`,
37 please call `subprocess.run` manually in case you would like for them not to
38 be captured.
39
40 Args:
41 command (`str` or `List[str]`):
42 The command to execute as a string or list of strings.
43 folder (`str`, *optional*):
44 The folder in which to run the command. Defaults to current working
45 directory (from `os.getcwd()`).
46 check (`bool`, *optional*, defaults to `True`):
47 Setting `check` to `True` will raise a `subprocess.CalledProcessError`
48 when the subprocess has a non-zero exit code.
49 kwargs (`Dict[str]`):
50 Keyword arguments to be passed to the `subprocess.run` underlying command.
51
52 Returns:
53 `subprocess.CompletedProcess`: The completed process.
54 """
55 if isinstance(command, str):
56 command = command.split()
57
58 if isinstance(folder, Path):
59 folder = str(folder)
60
61 return subprocess.run(
62 command,
63 stderr=subprocess.PIPE,
64 stdout=subprocess.PIPE,
65 check=check,
66 encoding="utf-8",
67 cwd=folder or os.getcwd(),
68 **kwargs,
69 )
70
71
72 @contextmanager
73 def run_interactive_subprocess(
74 command: Union[str, List[str]],
75 folder: Optional[Union[str, Path]] = None,
76 **kwargs,
77 ) -> Generator[Tuple[IO[str], IO[str]], None, None]:
78 """Run a subprocess in an interactive mode in a context manager.
79
80 Args:
81 command (`str` or `List[str]`):
82 The command to execute as a string or list of strings.
83 folder (`str`, *optional*):
84 The folder in which to run the command. Defaults to current working
85 directory (from `os.getcwd()`).
86 kwargs (`Dict[str]`):
87 Keyword arguments to be passed to the `subprocess.run` underlying command.
88
89 Returns:
90 `Tuple[IO[str], IO[str]]`: A tuple with `stdin` and `stdout` to interact
91 with the process (input and output are utf-8 encoded).
92
93 Example:
94 ```python
95 with _interactive_subprocess("git credential-store get") as (stdin, stdout):
96 # Write to stdin
97 stdin.write("url=hf.co\nusername=obama\n".encode("utf-8"))
98 stdin.flush()
99
100 # Read from stdout
101 output = stdout.read().decode("utf-8")
102 ```
103 """
104 if isinstance(command, str):
105 command = command.split()
106
107 with subprocess.Popen(
108 command,
109 stdin=subprocess.PIPE,
110 stdout=subprocess.PIPE,
111 stderr=subprocess.STDOUT,
112 encoding="utf-8",
113 cwd=folder or os.getcwd(),
114 **kwargs,
115 ) as process:
116 assert process.stdin is not None, "subprocess is opened as subprocess.PIPE"
117 assert process.stdout is not None, "subprocess is opened as subprocess.PIPE"
118 yield process.stdin, process.stdout
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/huggingface_hub/utils/_subprocess.py b/src/huggingface_hub/utils/_subprocess.py
--- a/src/huggingface_hub/utils/_subprocess.py
+++ b/src/huggingface_hub/utils/_subprocess.py
@@ -64,6 +64,7 @@
stdout=subprocess.PIPE,
check=check,
encoding="utf-8",
+ errors="replace", # if not utf-8, replace char by �
cwd=folder or os.getcwd(),
**kwargs,
)
@@ -110,6 +111,7 @@
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
+ errors="replace", # if not utf-8, replace char by �
cwd=folder or os.getcwd(),
**kwargs,
) as process:
|
{"golden_diff": "diff --git a/src/huggingface_hub/utils/_subprocess.py b/src/huggingface_hub/utils/_subprocess.py\n--- a/src/huggingface_hub/utils/_subprocess.py\n+++ b/src/huggingface_hub/utils/_subprocess.py\n@@ -64,6 +64,7 @@\n stdout=subprocess.PIPE,\n check=check,\n encoding=\"utf-8\",\n+ errors=\"replace\", # if not utf-8, replace char by \ufffd\n cwd=folder or os.getcwd(),\n **kwargs,\n )\n@@ -110,6 +111,7 @@\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n+ errors=\"replace\", # if not utf-8, replace char by \ufffd\n cwd=folder or os.getcwd(),\n **kwargs,\n ) as process:\n", "issue": "AttributeError: 'NoneType' object has no attribute 'split'\n### Describe the bug\r\n\r\nWhen I try to log into my account with the token via `huggingface-cli login`, I get this error:\r\n```\r\nException in thread Thread-1 (_readerthread):\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\threading.py\", line 1009, in _bootstrap_inner\r\n self.run()\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\threading.py\", line 946, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\subprocess.py\", line 1494, in _readerthread\r\n buffer.append(fh.read())\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\codecs.py\", line 322, in decode\r\n (result, consumed) = self._buffer_decode(data, self.errors, final)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xee in position 387: invalid continuation byte\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\runpy.py\", line 196, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\runpy.py\", line 86, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\Scripts\\huggingface-cli.exe\\__main__.py\", line 7, in <module>\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\huggingface_hub\\commands\\huggingface_cli.py\", line 47, in main\r\n service.run()\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\huggingface_hub\\commands\\user.py\", line 117, in run\r\n login()\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\huggingface_hub\\_login.py\", line 91, in login\r\n interpreter_login()\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\huggingface_hub\\_login.py\", line 137, in interpreter_login\r\n _login(token=token, add_to_git_credential=add_to_git_credential)\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\huggingface_hub\\_login.py\", line 231, in _login\r\n if _is_git_credential_helper_configured():\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\huggingface_hub\\_login.py\", line 251, in _is_git_credential_helper_configured\r\n helpers = list_credential_helpers()\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\huggingface_hub\\utils\\_git_credential.py\", line 44, in list_credential_helpers\r\n for line in output.split(\"\\n\")\r\nAttributeError: 'NoneType' object has no attribute 'split'\r\n```\r\nAlso, when using `huggingface-cli env`, I got this error. I dunno if it's related, but here it is anyway:\r\n```\r\nException in thread Thread-1 (_readerthread):\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\threading.py\", line 1009, in _bootstrap_inner\r\n self.run()\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\threading.py\", line 946, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\subprocess.py\", line 1494, in _readerthread\r\n buffer.append(fh.read())\r\n File \"C:\\Users\\mikwee\\AppData\\Local\\Programs\\Python\\Python310\\lib\\codecs.py\", line 322, in decode\r\n (result, consumed) = self._buffer_decode(data, self.errors, final)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xee in position 387: invalid continuation byte\r\n```\r\n\r\n### Reproduction\r\n\r\nThe commands I've used:\r\n```\r\ngit clone https://github.com/harishanand95/diffusers.git\r\ncd diffusers && git checkout dml && pip install -e .\r\npip install transformers ftfy scipy\r\npip install ort_nightly_directml-1.13.0.dev20220901005-cp310-cp310-win_amd64.whl\r\ncd ./diffusers/examples/inference\r\nhuggingface-cli login\r\n```\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### System info\r\n\r\n```shell\r\n- huggingface_hub version: 0.11.0\r\n- Platform: Windows-10-10.0.19044-SP0\r\n- Python version: 3.10.4\r\n- Running in iPython ?: No\r\n- Running in notebook ?: No\r\n- Running in Google Colab ?: No\r\n- Token path ?: C:\\Users\\mikwee\\.huggingface\\token\r\n- Has saved token ?: False\r\n- FastAI: N/A\r\n- Tensorflow: N/A\r\n- Torch: 1.13.0\r\n- Jinja2: N/A\r\n- Graphviz: N/A\r\n- Pydot: N/A\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\"\"\"Contains utilities to easily handle subprocesses in `huggingface_hub`.\"\"\"\nimport os\nimport subprocess\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import IO, Generator, List, Optional, Tuple, Union\n\nfrom .logging import get_logger\n\n\nlogger = get_logger(__name__)\n\n\ndef run_subprocess(\n command: Union[str, List[str]],\n folder: Optional[Union[str, Path]] = None,\n check=True,\n **kwargs,\n) -> subprocess.CompletedProcess:\n \"\"\"\n Method to run subprocesses. Calling this will capture the `stderr` and `stdout`,\n please call `subprocess.run` manually in case you would like for them not to\n be captured.\n\n Args:\n command (`str` or `List[str]`):\n The command to execute as a string or list of strings.\n folder (`str`, *optional*):\n The folder in which to run the command. Defaults to current working\n directory (from `os.getcwd()`).\n check (`bool`, *optional*, defaults to `True`):\n Setting `check` to `True` will raise a `subprocess.CalledProcessError`\n when the subprocess has a non-zero exit code.\n kwargs (`Dict[str]`):\n Keyword arguments to be passed to the `subprocess.run` underlying command.\n\n Returns:\n `subprocess.CompletedProcess`: The completed process.\n \"\"\"\n if isinstance(command, str):\n command = command.split()\n\n if isinstance(folder, Path):\n folder = str(folder)\n\n return subprocess.run(\n command,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n check=check,\n encoding=\"utf-8\",\n cwd=folder or os.getcwd(),\n **kwargs,\n )\n\n\n@contextmanager\ndef run_interactive_subprocess(\n command: Union[str, List[str]],\n folder: Optional[Union[str, Path]] = None,\n **kwargs,\n) -> Generator[Tuple[IO[str], IO[str]], None, None]:\n \"\"\"Run a subprocess in an interactive mode in a context manager.\n\n Args:\n command (`str` or `List[str]`):\n The command to execute as a string or list of strings.\n folder (`str`, *optional*):\n The folder in which to run the command. Defaults to current working\n directory (from `os.getcwd()`).\n kwargs (`Dict[str]`):\n Keyword arguments to be passed to the `subprocess.run` underlying command.\n\n Returns:\n `Tuple[IO[str], IO[str]]`: A tuple with `stdin` and `stdout` to interact\n with the process (input and output are utf-8 encoded).\n\n Example:\n ```python\n with _interactive_subprocess(\"git credential-store get\") as (stdin, stdout):\n # Write to stdin\n stdin.write(\"url=hf.co\\nusername=obama\\n\".encode(\"utf-8\"))\n stdin.flush()\n\n # Read from stdout\n output = stdout.read().decode(\"utf-8\")\n ```\n \"\"\"\n if isinstance(command, str):\n command = command.split()\n\n with subprocess.Popen(\n command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n cwd=folder or os.getcwd(),\n **kwargs,\n ) as process:\n assert process.stdin is not None, \"subprocess is opened as subprocess.PIPE\"\n assert process.stdout is not None, \"subprocess is opened as subprocess.PIPE\"\n yield process.stdin, process.stdout\n", "path": "src/huggingface_hub/utils/_subprocess.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\"\"\"Contains utilities to easily handle subprocesses in `huggingface_hub`.\"\"\"\nimport os\nimport subprocess\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import IO, Generator, List, Optional, Tuple, Union\n\nfrom .logging import get_logger\n\n\nlogger = get_logger(__name__)\n\n\ndef run_subprocess(\n command: Union[str, List[str]],\n folder: Optional[Union[str, Path]] = None,\n check=True,\n **kwargs,\n) -> subprocess.CompletedProcess:\n \"\"\"\n Method to run subprocesses. Calling this will capture the `stderr` and `stdout`,\n please call `subprocess.run` manually in case you would like for them not to\n be captured.\n\n Args:\n command (`str` or `List[str]`):\n The command to execute as a string or list of strings.\n folder (`str`, *optional*):\n The folder in which to run the command. Defaults to current working\n directory (from `os.getcwd()`).\n check (`bool`, *optional*, defaults to `True`):\n Setting `check` to `True` will raise a `subprocess.CalledProcessError`\n when the subprocess has a non-zero exit code.\n kwargs (`Dict[str]`):\n Keyword arguments to be passed to the `subprocess.run` underlying command.\n\n Returns:\n `subprocess.CompletedProcess`: The completed process.\n \"\"\"\n if isinstance(command, str):\n command = command.split()\n\n if isinstance(folder, Path):\n folder = str(folder)\n\n return subprocess.run(\n command,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n check=check,\n encoding=\"utf-8\",\n errors=\"replace\", # if not utf-8, replace char by \ufffd\n cwd=folder or os.getcwd(),\n **kwargs,\n )\n\n\n@contextmanager\ndef run_interactive_subprocess(\n command: Union[str, List[str]],\n folder: Optional[Union[str, Path]] = None,\n **kwargs,\n) -> Generator[Tuple[IO[str], IO[str]], None, None]:\n \"\"\"Run a subprocess in an interactive mode in a context manager.\n\n Args:\n command (`str` or `List[str]`):\n The command to execute as a string or list of strings.\n folder (`str`, *optional*):\n The folder in which to run the command. Defaults to current working\n directory (from `os.getcwd()`).\n kwargs (`Dict[str]`):\n Keyword arguments to be passed to the `subprocess.run` underlying command.\n\n Returns:\n `Tuple[IO[str], IO[str]]`: A tuple with `stdin` and `stdout` to interact\n with the process (input and output are utf-8 encoded).\n\n Example:\n ```python\n with _interactive_subprocess(\"git credential-store get\") as (stdin, stdout):\n # Write to stdin\n stdin.write(\"url=hf.co\\nusername=obama\\n\".encode(\"utf-8\"))\n stdin.flush()\n\n # Read from stdout\n output = stdout.read().decode(\"utf-8\")\n ```\n \"\"\"\n if isinstance(command, str):\n command = command.split()\n\n with subprocess.Popen(\n command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n errors=\"replace\", # if not utf-8, replace char by \ufffd\n cwd=folder or os.getcwd(),\n **kwargs,\n ) as process:\n assert process.stdin is not None, \"subprocess is opened as subprocess.PIPE\"\n assert process.stdout is not None, \"subprocess is opened as subprocess.PIPE\"\n yield process.stdin, process.stdout\n", "path": "src/huggingface_hub/utils/_subprocess.py"}]}
| 2,861 | 183 |
gh_patches_debug_5191
|
rasdani/github-patches
|
git_diff
|
nf-core__tools-1333
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Space missing in tip message for --fix files_unchanged
<!--
# nf-core/tools bug report
Hi there!
Thanks for telling us about a problem with the nf-core/tools package.
Please delete this text and anything that's not relevant from the template below:
-->
## Description of the bug
a space is missing before `--fix files_unchanged`
```
Tip: Some of these linting errors can automatically be resolved with the
following command:
nf-core lint --dir /home/runner/work/rnavar/rnavar--fix files_unchanged
```
## Steps to reproduce
https://github.com/nf-core/rnavar/runs/4317868056?check_suite_focus=true#step:6:100
## Expected behaviour
<!-- A clear and concise description of what you expected to happen. -->
## System
- Hardware: <!-- [e.g. HPC, Desktop, Cloud...] -->
- Executor: <!-- [e.g. slurm, local, awsbatch...] -->
- OS: <!-- [e.g. CentOS Linux, macOS, Linux Mint...] -->
- Version of nf-core/tools: <!-- [e.g. 1.1, 1.5, 1.8.2...] -->
- Python version: <!-- [e.g. 3.7, 3.8...] -->
## Nextflow Installation
- Version: <!-- [e.g. 19.10.0] -->
## Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nf_core/lint_utils.py`
Content:
```
1 import rich
2 from rich.console import Console
3 from rich.table import Table
4 import logging
5
6 import nf_core.utils
7
8 log = logging.getLogger(__name__)
9
10 # Create a console used by all lint tests
11 console = Console(force_terminal=nf_core.utils.rich_force_colors())
12
13
14 def print_joint_summary(lint_obj, module_lint_obj):
15 """Print a joint summary of the general pipe lint tests and the module lint tests"""
16 nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)
17 nbr_ignored = len(lint_obj.ignored)
18 nbr_fixed = len(lint_obj.fixed)
19 nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)
20 nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)
21
22 def _s(some_length):
23 return "" if some_length == 1 else "s"
24
25 summary_colour = "red" if nbr_failed > 0 else "green"
26 table = Table(box=rich.box.ROUNDED, style=summary_colour)
27 table.add_column(f"LINT RESULTS SUMMARY".format(nbr_passed), no_wrap=True)
28 table.add_row(r"[green][✔] {:>3} Test{} Passed".format(nbr_passed, _s(nbr_passed)))
29 if nbr_fixed:
30 table.add_row(r"[bright blue][?] {:>3} Test{} Fixed".format(nbr_fixed, _s(nbr_fixed)))
31 table.add_row(r"[grey58][?] {:>3} Test{} Ignored".format(nbr_ignored, _s(nbr_ignored)))
32 table.add_row(r"[yellow][!] {:>3} Test Warning{}".format(nbr_warned, _s(nbr_warned)))
33 table.add_row(r"[red][✗] {:>3} Test{} Failed".format(nbr_failed, _s(nbr_failed)))
34 console.print(table)
35
36
37 def print_fixes(lint_obj, module_lint_obj):
38 """Prints available and applied fixes"""
39
40 if len(lint_obj.could_fix):
41 fix_cmd = "nf-core lint {}--fix {}".format(
42 "" if lint_obj.wf_path == "." else f"--dir {lint_obj.wf_path}", " --fix ".join(lint_obj.could_fix)
43 )
44 console.print(
45 f"\nTip: Some of these linting errors can automatically be resolved with the following command:\n\n[blue] {fix_cmd}\n"
46 )
47 if len(lint_obj.fix):
48 console.print(
49 "Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'."
50 )
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nf_core/lint_utils.py b/nf_core/lint_utils.py
--- a/nf_core/lint_utils.py
+++ b/nf_core/lint_utils.py
@@ -38,7 +38,7 @@
"""Prints available and applied fixes"""
if len(lint_obj.could_fix):
- fix_cmd = "nf-core lint {}--fix {}".format(
+ fix_cmd = "nf-core lint {} --fix {}".format(
"" if lint_obj.wf_path == "." else f"--dir {lint_obj.wf_path}", " --fix ".join(lint_obj.could_fix)
)
console.print(
|
{"golden_diff": "diff --git a/nf_core/lint_utils.py b/nf_core/lint_utils.py\n--- a/nf_core/lint_utils.py\n+++ b/nf_core/lint_utils.py\n@@ -38,7 +38,7 @@\n \"\"\"Prints available and applied fixes\"\"\"\n \n if len(lint_obj.could_fix):\n- fix_cmd = \"nf-core lint {}--fix {}\".format(\n+ fix_cmd = \"nf-core lint {} --fix {}\".format(\n \"\" if lint_obj.wf_path == \".\" else f\"--dir {lint_obj.wf_path}\", \" --fix \".join(lint_obj.could_fix)\n )\n console.print(\n", "issue": "Space missing in tip message for --fix files_unchanged\n<!--\r\n# nf-core/tools bug report\r\n\r\nHi there!\r\n\r\nThanks for telling us about a problem with the nf-core/tools package.\r\nPlease delete this text and anything that's not relevant from the template below:\r\n-->\r\n\r\n## Description of the bug\r\n\r\na space is missing before `--fix files_unchanged`\r\n\r\n```\r\nTip: Some of these linting errors can automatically be resolved with the \r\nfollowing command:\r\n\r\n nf-core lint --dir /home/runner/work/rnavar/rnavar--fix files_unchanged\r\n```\r\n\r\n## Steps to reproduce\r\n\r\nhttps://github.com/nf-core/rnavar/runs/4317868056?check_suite_focus=true#step:6:100\r\n\r\n## Expected behaviour\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## System\r\n\r\n- Hardware: <!-- [e.g. HPC, Desktop, Cloud...] -->\r\n- Executor: <!-- [e.g. slurm, local, awsbatch...] -->\r\n- OS: <!-- [e.g. CentOS Linux, macOS, Linux Mint...] -->\r\n- Version of nf-core/tools: <!-- [e.g. 1.1, 1.5, 1.8.2...] -->\r\n- Python version: <!-- [e.g. 3.7, 3.8...] -->\r\n\r\n## Nextflow Installation\r\n\r\n- Version: <!-- [e.g. 19.10.0] -->\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "import rich\nfrom rich.console import Console\nfrom rich.table import Table\nimport logging\n\nimport nf_core.utils\n\nlog = logging.getLogger(__name__)\n\n# Create a console used by all lint tests\nconsole = Console(force_terminal=nf_core.utils.rich_force_colors())\n\n\ndef print_joint_summary(lint_obj, module_lint_obj):\n \"\"\"Print a joint summary of the general pipe lint tests and the module lint tests\"\"\"\n nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)\n nbr_ignored = len(lint_obj.ignored)\n nbr_fixed = len(lint_obj.fixed)\n nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)\n nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)\n\n def _s(some_length):\n return \"\" if some_length == 1 else \"s\"\n\n summary_colour = \"red\" if nbr_failed > 0 else \"green\"\n table = Table(box=rich.box.ROUNDED, style=summary_colour)\n table.add_column(f\"LINT RESULTS SUMMARY\".format(nbr_passed), no_wrap=True)\n table.add_row(r\"[green][\u2714] {:>3} Test{} Passed\".format(nbr_passed, _s(nbr_passed)))\n if nbr_fixed:\n table.add_row(r\"[bright blue][?] {:>3} Test{} Fixed\".format(nbr_fixed, _s(nbr_fixed)))\n table.add_row(r\"[grey58][?] {:>3} Test{} Ignored\".format(nbr_ignored, _s(nbr_ignored)))\n table.add_row(r\"[yellow][!] {:>3} Test Warning{}\".format(nbr_warned, _s(nbr_warned)))\n table.add_row(r\"[red][\u2717] {:>3} Test{} Failed\".format(nbr_failed, _s(nbr_failed)))\n console.print(table)\n\n\ndef print_fixes(lint_obj, module_lint_obj):\n \"\"\"Prints available and applied fixes\"\"\"\n\n if len(lint_obj.could_fix):\n fix_cmd = \"nf-core lint {}--fix {}\".format(\n \"\" if lint_obj.wf_path == \".\" else f\"--dir {lint_obj.wf_path}\", \" --fix \".join(lint_obj.could_fix)\n )\n console.print(\n f\"\\nTip: Some of these linting errors can automatically be resolved with the following command:\\n\\n[blue] {fix_cmd}\\n\"\n )\n if len(lint_obj.fix):\n console.print(\n \"Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'.\"\n )\n", "path": "nf_core/lint_utils.py"}], "after_files": [{"content": "import rich\nfrom rich.console import Console\nfrom rich.table import Table\nimport logging\n\nimport nf_core.utils\n\nlog = logging.getLogger(__name__)\n\n# Create a console used by all lint tests\nconsole = Console(force_terminal=nf_core.utils.rich_force_colors())\n\n\ndef print_joint_summary(lint_obj, module_lint_obj):\n \"\"\"Print a joint summary of the general pipe lint tests and the module lint tests\"\"\"\n nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)\n nbr_ignored = len(lint_obj.ignored)\n nbr_fixed = len(lint_obj.fixed)\n nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)\n nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)\n\n def _s(some_length):\n return \"\" if some_length == 1 else \"s\"\n\n summary_colour = \"red\" if nbr_failed > 0 else \"green\"\n table = Table(box=rich.box.ROUNDED, style=summary_colour)\n table.add_column(f\"LINT RESULTS SUMMARY\".format(nbr_passed), no_wrap=True)\n table.add_row(r\"[green][\u2714] {:>3} Test{} Passed\".format(nbr_passed, _s(nbr_passed)))\n if nbr_fixed:\n table.add_row(r\"[bright blue][?] {:>3} Test{} Fixed\".format(nbr_fixed, _s(nbr_fixed)))\n table.add_row(r\"[grey58][?] {:>3} Test{} Ignored\".format(nbr_ignored, _s(nbr_ignored)))\n table.add_row(r\"[yellow][!] {:>3} Test Warning{}\".format(nbr_warned, _s(nbr_warned)))\n table.add_row(r\"[red][\u2717] {:>3} Test{} Failed\".format(nbr_failed, _s(nbr_failed)))\n console.print(table)\n\n\ndef print_fixes(lint_obj, module_lint_obj):\n \"\"\"Prints available and applied fixes\"\"\"\n\n if len(lint_obj.could_fix):\n fix_cmd = \"nf-core lint {} --fix {}\".format(\n \"\" if lint_obj.wf_path == \".\" else f\"--dir {lint_obj.wf_path}\", \" --fix \".join(lint_obj.could_fix)\n )\n console.print(\n f\"\\nTip: Some of these linting errors can automatically be resolved with the following command:\\n\\n[blue] {fix_cmd}\\n\"\n )\n if len(lint_obj.fix):\n console.print(\n \"Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'.\"\n )\n", "path": "nf_core/lint_utils.py"}]}
| 1,252 | 141 |
gh_patches_debug_15169
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-1171
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pending import csv lines displayed under "Successful" title until tried
Importing a CSV into Bookwyrm shows titles being "successfully imported" but they do not show up in the library.
Here's screenshots of the import results, neither the successful nor the failed imports seem to show up:


[Attached is the file which I attempted to import.](https://github.com/bookwyrm-social/bookwyrm/files/6523421/Tomat0.s.Library.csv)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/import_data.py`
Content:
```
1 """ import books from another app """
2 from io import TextIOWrapper
3
4 from django.contrib.auth.decorators import login_required
5 from django.core.exceptions import PermissionDenied
6 from django.http import HttpResponseBadRequest
7 from django.shortcuts import get_object_or_404, redirect
8 from django.template.response import TemplateResponse
9 from django.utils.decorators import method_decorator
10 from django.utils.translation import gettext_lazy as _
11 from django.views import View
12
13 from bookwyrm import forms, models
14 from bookwyrm.importers import (
15 Importer,
16 LibrarythingImporter,
17 GoodreadsImporter,
18 StorygraphImporter,
19 )
20 from bookwyrm.tasks import app
21
22 # pylint: disable= no-self-use
23 @method_decorator(login_required, name="dispatch")
24 class Import(View):
25 """import view"""
26
27 def get(self, request):
28 """load import page"""
29 return TemplateResponse(
30 request,
31 "import.html",
32 {
33 "import_form": forms.ImportForm(),
34 "jobs": models.ImportJob.objects.filter(user=request.user).order_by(
35 "-created_date"
36 ),
37 },
38 )
39
40 def post(self, request):
41 """ingest a goodreads csv"""
42 form = forms.ImportForm(request.POST, request.FILES)
43 if form.is_valid():
44 include_reviews = request.POST.get("include_reviews") == "on"
45 privacy = request.POST.get("privacy")
46 source = request.POST.get("source")
47
48 importer = None
49 if source == "LibraryThing":
50 importer = LibrarythingImporter()
51 elif source == "Storygraph":
52 importer = StorygraphImporter()
53 else:
54 # Default : GoodReads
55 importer = GoodreadsImporter()
56
57 try:
58 job = importer.create_job(
59 request.user,
60 TextIOWrapper(
61 request.FILES["csv_file"], encoding=importer.encoding
62 ),
63 include_reviews,
64 privacy,
65 )
66 except (UnicodeDecodeError, ValueError, KeyError):
67 return HttpResponseBadRequest(_("Not a valid csv file"))
68
69 importer.start_import(job)
70
71 return redirect("/import/%d" % job.id)
72 return HttpResponseBadRequest()
73
74
75 @method_decorator(login_required, name="dispatch")
76 class ImportStatus(View):
77 """status of an existing import"""
78
79 def get(self, request, job_id):
80 """status of an import job"""
81 job = models.ImportJob.objects.get(id=job_id)
82 if job.user != request.user:
83 raise PermissionDenied
84 try:
85 task = app.AsyncResult(job.task_id)
86 except ValueError:
87 task = None
88 items = job.items.order_by("index").all()
89 failed_items = [i for i in items if i.fail_reason]
90 items = [i for i in items if not i.fail_reason]
91 return TemplateResponse(
92 request,
93 "import_status.html",
94 {"job": job, "items": items, "failed_items": failed_items, "task": task},
95 )
96
97 def post(self, request, job_id):
98 """retry lines from an import"""
99 job = get_object_or_404(models.ImportJob, id=job_id)
100 items = []
101 for item in request.POST.getlist("import_item"):
102 items.append(get_object_or_404(models.ImportItem, id=item))
103
104 importer = Importer()
105 job = importer.create_retry_job(
106 request.user,
107 job,
108 items,
109 )
110 importer.start_import(job)
111 return redirect("/import/%d" % job.id)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py
--- a/bookwyrm/views/import_data.py
+++ b/bookwyrm/views/import_data.py
@@ -78,13 +78,15 @@
def get(self, request, job_id):
"""status of an import job"""
- job = models.ImportJob.objects.get(id=job_id)
+ job = get_object_or_404(models.ImportJob, id=job_id)
if job.user != request.user:
raise PermissionDenied
+
try:
task = app.AsyncResult(job.task_id)
except ValueError:
task = None
+
items = job.items.order_by("index").all()
failed_items = [i for i in items if i.fail_reason]
items = [i for i in items if not i.fail_reason]
|
{"golden_diff": "diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py\n--- a/bookwyrm/views/import_data.py\n+++ b/bookwyrm/views/import_data.py\n@@ -78,13 +78,15 @@\n \n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n- job = models.ImportJob.objects.get(id=job_id)\n+ job = get_object_or_404(models.ImportJob, id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n+\n try:\n task = app.AsyncResult(job.task_id)\n except ValueError:\n task = None\n+\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n", "issue": "Pending import csv lines displayed under \"Successful\" title until tried\nImporting a CSV into Bookwyrm shows titles being \"successfully imported\" but they do not show up in the library.\r\n\r\nHere's screenshots of the import results, neither the successful nor the failed imports seem to show up:\r\n\r\n\r\n\r\n\r\n[Attached is the file which I attempted to import.](https://github.com/bookwyrm-social/bookwyrm/files/6523421/Tomat0.s.Library.csv)\r\n\r\n\n", "before_files": [{"content": "\"\"\" import books from another app \"\"\"\nfrom io import TextIOWrapper\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.importers import (\n Importer,\n LibrarythingImporter,\n GoodreadsImporter,\n StorygraphImporter,\n)\nfrom bookwyrm.tasks import app\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Import(View):\n \"\"\"import view\"\"\"\n\n def get(self, request):\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n \"import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n ),\n },\n )\n\n def post(self, request):\n \"\"\"ingest a goodreads csv\"\"\"\n form = forms.ImportForm(request.POST, request.FILES)\n if form.is_valid():\n include_reviews = request.POST.get(\"include_reviews\") == \"on\"\n privacy = request.POST.get(\"privacy\")\n source = request.POST.get(\"source\")\n\n importer = None\n if source == \"LibraryThing\":\n importer = LibrarythingImporter()\n elif source == \"Storygraph\":\n importer = StorygraphImporter()\n else:\n # Default : GoodReads\n importer = GoodreadsImporter()\n\n try:\n job = importer.create_job(\n request.user,\n TextIOWrapper(\n request.FILES[\"csv_file\"], encoding=importer.encoding\n ),\n include_reviews,\n privacy,\n )\n except (UnicodeDecodeError, ValueError, KeyError):\n return HttpResponseBadRequest(_(\"Not a valid csv file\"))\n\n importer.start_import(job)\n\n return redirect(\"/import/%d\" % job.id)\n return HttpResponseBadRequest()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportStatus(View):\n \"\"\"status of an existing import\"\"\"\n\n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n job = models.ImportJob.objects.get(id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n try:\n task = app.AsyncResult(job.task_id)\n except ValueError:\n task = None\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n \"import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n\n def post(self, request, job_id):\n \"\"\"retry lines from an import\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n items = []\n for item in request.POST.getlist(\"import_item\"):\n items.append(get_object_or_404(models.ImportItem, id=item))\n\n importer = Importer()\n job = importer.create_retry_job(\n request.user,\n job,\n items,\n )\n importer.start_import(job)\n return redirect(\"/import/%d\" % job.id)\n", "path": "bookwyrm/views/import_data.py"}], "after_files": [{"content": "\"\"\" import books from another app \"\"\"\nfrom io import TextIOWrapper\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.importers import (\n Importer,\n LibrarythingImporter,\n GoodreadsImporter,\n StorygraphImporter,\n)\nfrom bookwyrm.tasks import app\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Import(View):\n \"\"\"import view\"\"\"\n\n def get(self, request):\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n \"import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n ),\n },\n )\n\n def post(self, request):\n \"\"\"ingest a goodreads csv\"\"\"\n form = forms.ImportForm(request.POST, request.FILES)\n if form.is_valid():\n include_reviews = request.POST.get(\"include_reviews\") == \"on\"\n privacy = request.POST.get(\"privacy\")\n source = request.POST.get(\"source\")\n\n importer = None\n if source == \"LibraryThing\":\n importer = LibrarythingImporter()\n elif source == \"Storygraph\":\n importer = StorygraphImporter()\n else:\n # Default : GoodReads\n importer = GoodreadsImporter()\n\n try:\n job = importer.create_job(\n request.user,\n TextIOWrapper(\n request.FILES[\"csv_file\"], encoding=importer.encoding\n ),\n include_reviews,\n privacy,\n )\n except (UnicodeDecodeError, ValueError, KeyError):\n return HttpResponseBadRequest(_(\"Not a valid csv file\"))\n\n importer.start_import(job)\n\n return redirect(\"/import/%d\" % job.id)\n return HttpResponseBadRequest()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportStatus(View):\n \"\"\"status of an existing import\"\"\"\n\n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n\n try:\n task = app.AsyncResult(job.task_id)\n except ValueError:\n task = None\n\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n \"import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n\n def post(self, request, job_id):\n \"\"\"retry lines from an import\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n items = []\n for item in request.POST.getlist(\"import_item\"):\n items.append(get_object_or_404(models.ImportItem, id=item))\n\n importer = Importer()\n job = importer.create_retry_job(\n request.user,\n job,\n items,\n )\n importer.start_import(job)\n return redirect(\"/import/%d\" % job.id)\n", "path": "bookwyrm/views/import_data.py"}]}
| 1,347 | 186 |
gh_patches_debug_16337
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-31514
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[rpm_key] When no key is installed module fail to install any new key
<!---
Verify first that your issue/request is not already reported on GitHub.
Also test if the latest release, and master branch are affected too.
-->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bug Report
##### COMPONENT NAME
<!--- Name of the module/plugin/task/feature -->
rpm_key
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes below -->
```
ansible 2.4.0.0
config file = /etc/ansible/ansible.cfg
configured module search path = [u'/home/lbednar/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /home/lbednar/work/kubevirt-org/kubevirt-ansible/E/lib/python2.7/site-packages/ansible
executable location = /home/lbednar/work/kubevirt-org/kubevirt-ansible/E/bin/ansible
python version = 2.7.13 (default, May 10 2017, 20:04:28) [GCC 6.3.1 20161221 (Red Hat 6.3.1-1)]
```
##### CONFIGURATION
<!---
If using Ansible 2.4 or above, paste the results of "ansible-config dump --only-changed"
Otherwise, mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
```
DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = [u'/home/lbednar/work/kubevirt-org/kubevirt-ansible/galaxy-roles']
```
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say "N/A" for anything that is not platform-specific.
Also mention the specific version of what you are trying to control,
e.g. if this is a network bug the version of firmware on the network device.
-->
Target system CentOS 7.3
##### SUMMARY
<!--- Explain the problem briefly -->
When running `rpm_key` to add new key on system which doesn't have any keys installed yet (I want to install first key), then module fails to add new key during execution following line:
https://github.com/ansible/ansible/blob/e609618274db6a7e3c273abde457f53de8c9976c/lib/ansible/modules/packaging/os/rpm_key.py#L173
the command fails with:
```
$ /usr/bin/rpm -q gpg-pubkey --qf "%{description}"
package gpg-pubkey is not installed
```
and then following command in shell pipe fails on
```
$ /usr/bin/gpg --no-tty --batch --with-colons --fixed-list-mode -
gpg: no valid OpenPGP data found.
gpg: processing message failed: Unknown system error
```
At this point module stops execution and fail.
##### STEPS TO REPRODUCE
<!---
For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used.
-->
**You need to make sure that you don't have any RPM KEY installed yet.**
This issue is reproducible only in case of when you are adding first rpm key.
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: import rpm keys
rpm_key:
state: present
key: "{{ item }}"
with_items:
- "https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg"
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
I would like to get rpm keys imported.
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
No key is added and getting following output instead.
<!--- Paste verbatim command output between quotes below -->
```
failed: [vm-69-15.qa.lab.tlv.redhat.com] (item=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg) => {"changed": false, "failed": true, "item": "https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg", "msg": "gpg: no valid OpenPGP data found.\ngpg: processing message failed: Unknown system error\n"}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/modules/packaging/os/rpm_key.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Ansible module to import third party repo keys to your rpm db
5 # (c) 2013, Héctor Acosta <[email protected]>
6 #
7 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11
12
13 ANSIBLE_METADATA = {'metadata_version': '1.1',
14 'status': ['preview'],
15 'supported_by': 'core'}
16
17
18 DOCUMENTATION = '''
19 ---
20 module: rpm_key
21 author: "Hector Acosta (@hacosta) <[email protected]>"
22 short_description: Adds or removes a gpg key from the rpm db
23 description:
24 - Adds or removes (rpm --import) a gpg key to your rpm database.
25 version_added: "1.3"
26 options:
27 key:
28 required: true
29 default: null
30 aliases: []
31 description:
32 - Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.
33 state:
34 required: false
35 default: "present"
36 choices: [present, absent]
37 description:
38 - If the key will be imported or removed from the rpm db.
39 validate_certs:
40 description:
41 - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used
42 on personally controlled sites using self-signed certificates.
43 required: false
44 default: 'yes'
45 choices: ['yes', 'no']
46
47 '''
48
49 EXAMPLES = '''
50 # Example action to import a key from a url
51 - rpm_key:
52 state: present
53 key: http://apt.sw.be/RPM-GPG-KEY.dag.txt
54
55 # Example action to import a key from a file
56 - rpm_key:
57 state: present
58 key: /path/to/key.gpg
59
60 # Example action to ensure a key is not present in the db
61 - rpm_key:
62 state: absent
63 key: DEADB33F
64 '''
65 import re
66 import os.path
67 import tempfile
68
69 # import module snippets
70 from ansible.module_utils.basic import AnsibleModule
71 from ansible.module_utils.urls import fetch_url
72 from ansible.module_utils._text import to_native
73
74
75 def is_pubkey(string):
76 """Verifies if string is a pubkey"""
77 pgp_regex = ".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*"
78 return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))
79
80
81 class RpmKey(object):
82
83 def __init__(self, module):
84 # If the key is a url, we need to check if it's present to be idempotent,
85 # to do that, we need to check the keyid, which we can get from the armor.
86 keyfile = None
87 should_cleanup_keyfile = False
88 self.module = module
89 self.rpm = self.module.get_bin_path('rpm', True)
90 state = module.params['state']
91 key = module.params['key']
92
93 self.gpg = self.module.get_bin_path('gpg')
94 if not self.gpg:
95 self.gpg = self.module.get_bin_path('gpg2',required=True)
96
97 if '://' in key:
98 keyfile = self.fetch_key(key)
99 keyid = self.getkeyid(keyfile)
100 should_cleanup_keyfile = True
101 elif self.is_keyid(key):
102 keyid = key
103 elif os.path.isfile(key):
104 keyfile = key
105 keyid = self.getkeyid(keyfile)
106 else:
107 self.module.fail_json(msg="Not a valid key %s" % key)
108 keyid = self.normalize_keyid(keyid)
109
110 if state == 'present':
111 if self.is_key_imported(keyid):
112 module.exit_json(changed=False)
113 else:
114 if not keyfile:
115 self.module.fail_json(msg="When importing a key, a valid file must be given")
116 self.import_key(keyfile)
117 if should_cleanup_keyfile:
118 self.module.cleanup(keyfile)
119 module.exit_json(changed=True)
120 else:
121 if self.is_key_imported(keyid):
122 self.drop_key(keyid)
123 module.exit_json(changed=True)
124 else:
125 module.exit_json(changed=False)
126
127 def fetch_key(self, url):
128 """Downloads a key from url, returns a valid path to a gpg key"""
129 rsp, info = fetch_url(self.module, url)
130 if info['status'] != 200:
131 self.module.fail_json(msg="failed to fetch key at %s , error was: %s" % (url, info['msg']))
132
133 key = rsp.read()
134 if not is_pubkey(key):
135 self.module.fail_json(msg="Not a public key: %s" % url)
136 tmpfd, tmpname = tempfile.mkstemp()
137 self.module.add_cleanup_file(tmpname)
138 tmpfile = os.fdopen(tmpfd, "w+b")
139 tmpfile.write(key)
140 tmpfile.close()
141 return tmpname
142
143 def normalize_keyid(self, keyid):
144 """Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase"""
145 ret = keyid.strip().upper()
146 if ret.startswith('0x'):
147 return ret[2:]
148 elif ret.startswith('0X'):
149 return ret[2:]
150 else:
151 return ret
152
153 def getkeyid(self, keyfile):
154 stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])
155 for line in stdout.splitlines():
156 line = line.strip()
157 if line.startswith('pub:'):
158 return line.split(':')[4]
159
160 self.module.fail_json(msg="Unexpected gpg output")
161
162 def is_keyid(self, keystr):
163 """Verifies if a key, as provided by the user is a keyid"""
164 return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
165
166 def execute_command(self, cmd):
167 rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)
168 if rc != 0:
169 self.module.fail_json(msg=stderr)
170 return stdout, stderr
171
172 def is_key_imported(self, keyid):
173 cmd=self.rpm + ' -q gpg-pubkey --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
174 stdout, stderr = self.execute_command(cmd)
175 for line in stdout.splitlines():
176 if keyid in line.split(':')[4]:
177 return True
178 return False
179
180 def import_key(self, keyfile):
181 if not self.module.check_mode:
182 self.execute_command([self.rpm, '--import', keyfile])
183
184 def drop_key(self, keyid):
185 if not self.module.check_mode:
186 self.execute_command([self.rpm, '--erase', '--allmatches', "gpg-pubkey-%s" % keyid[-8:].lower()])
187
188
189 def main():
190 module = AnsibleModule(
191 argument_spec = dict(
192 state=dict(default='present', choices=['present', 'absent'], type='str'),
193 key=dict(required=True, type='str'),
194 validate_certs=dict(default='yes', type='bool'),
195 ),
196 supports_check_mode=True
197 )
198
199 RpmKey(module)
200
201
202 if __name__ == '__main__':
203 main()
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/ansible/modules/packaging/os/rpm_key.py b/lib/ansible/modules/packaging/os/rpm_key.py
--- a/lib/ansible/modules/packaging/os/rpm_key.py
+++ b/lib/ansible/modules/packaging/os/rpm_key.py
@@ -170,11 +170,15 @@
return stdout, stderr
def is_key_imported(self, keyid):
- cmd=self.rpm + ' -q gpg-pubkey --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
+ cmd = self.rpm + ' -q gpg-pubkey'
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0: # No key is installed on system
+ return False
+ cmd += ' --qf "%{description}" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'
stdout, stderr = self.execute_command(cmd)
for line in stdout.splitlines():
if keyid in line.split(':')[4]:
- return True
+ return True
return False
def import_key(self, keyfile):
|
{"golden_diff": "diff --git a/lib/ansible/modules/packaging/os/rpm_key.py b/lib/ansible/modules/packaging/os/rpm_key.py\n--- a/lib/ansible/modules/packaging/os/rpm_key.py\n+++ b/lib/ansible/modules/packaging/os/rpm_key.py\n@@ -170,11 +170,15 @@\n return stdout, stderr\n \n def is_key_imported(self, keyid):\n- cmd=self.rpm + ' -q gpg-pubkey --qf \"%{description}\" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'\n+ cmd = self.rpm + ' -q gpg-pubkey'\n+ rc, stdout, stderr = self.module.run_command(cmd)\n+ if rc != 0: # No key is installed on system\n+ return False\n+ cmd += ' --qf \"%{description}\" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'\n stdout, stderr = self.execute_command(cmd)\n for line in stdout.splitlines():\n if keyid in line.split(':')[4]:\n- return True\n+ return True\n return False\n \n def import_key(self, keyfile):\n", "issue": "[rpm_key] When no key is installed module fail to install any new key\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and master branch are affected too.\r\n-->\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest: -->\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Name of the module/plugin/task/feature -->\r\nrpm_key\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\nansible 2.4.0.0\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = [u'/home/lbednar/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/lbednar/work/kubevirt-org/kubevirt-ansible/E/lib/python2.7/site-packages/ansible\r\n executable location = /home/lbednar/work/kubevirt-org/kubevirt-ansible/E/bin/ansible\r\n python version = 2.7.13 (default, May 10 2017, 20:04:28) [GCC 6.3.1 20161221 (Red Hat 6.3.1-1)]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!---\r\nIf using Ansible 2.4 or above, paste the results of \"ansible-config dump --only-changed\"\r\n\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).\r\n\r\n-->\r\n```\r\nDEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = [u'/home/lbednar/work/kubevirt-org/kubevirt-ansible/galaxy-roles']\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!---\r\nMention the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.\r\n-->\r\nTarget system CentOS 7.3\r\n\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\n\r\nWhen running `rpm_key` to add new key on system which doesn't have any keys installed yet (I want to install first key), then module fails to add new key during execution following line:\r\n\r\nhttps://github.com/ansible/ansible/blob/e609618274db6a7e3c273abde457f53de8c9976c/lib/ansible/modules/packaging/os/rpm_key.py#L173\r\n\r\nthe command fails with:\r\n```\r\n$ /usr/bin/rpm -q gpg-pubkey --qf \"%{description}\"\r\npackage gpg-pubkey is not installed\r\n```\r\nand then following command in shell pipe fails on\r\n```\r\n$ /usr/bin/gpg --no-tty --batch --with-colons --fixed-list-mode -\r\ngpg: no valid OpenPGP data found.\r\ngpg: processing message failed: Unknown system error\r\n```\r\nAt this point module stops execution and fail.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\n\r\n**You need to make sure that you don't have any RPM KEY installed yet.**\r\nThis issue is reproducible only in case of when you are adding first rpm key.\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: import rpm keys\r\n rpm_key: \r\n state: present\r\n key: \"{{ item }}\"\r\n with_items:\r\n - \"https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg\"\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\n\r\nI would like to get rpm keys imported.\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\n\r\nNo key is added and getting following output instead.\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\nfailed: [vm-69-15.qa.lab.tlv.redhat.com] (item=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg) => {\"changed\": false, \"failed\": true, \"item\": \"https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg\", \"msg\": \"gpg: no valid OpenPGP data found.\\ngpg: processing message failed: Unknown system error\\n\"}\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Ansible module to import third party repo keys to your rpm db\n# (c) 2013, H\u00e9ctor Acosta <[email protected]>\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'core'}\n\n\nDOCUMENTATION = '''\n---\nmodule: rpm_key\nauthor: \"Hector Acosta (@hacosta) <[email protected]>\"\nshort_description: Adds or removes a gpg key from the rpm db\ndescription:\n - Adds or removes (rpm --import) a gpg key to your rpm database.\nversion_added: \"1.3\"\noptions:\n key:\n required: true\n default: null\n aliases: []\n description:\n - Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.\n state:\n required: false\n default: \"present\"\n choices: [present, absent]\n description:\n - If the key will be imported or removed from the rpm db.\n validate_certs:\n description:\n - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used\n on personally controlled sites using self-signed certificates.\n required: false\n default: 'yes'\n choices: ['yes', 'no']\n\n'''\n\nEXAMPLES = '''\n# Example action to import a key from a url\n- rpm_key:\n state: present\n key: http://apt.sw.be/RPM-GPG-KEY.dag.txt\n\n# Example action to import a key from a file\n- rpm_key:\n state: present\n key: /path/to/key.gpg\n\n# Example action to ensure a key is not present in the db\n- rpm_key:\n state: absent\n key: DEADB33F\n'''\nimport re\nimport os.path\nimport tempfile\n\n# import module snippets\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.urls import fetch_url\nfrom ansible.module_utils._text import to_native\n\n\ndef is_pubkey(string):\n \"\"\"Verifies if string is a pubkey\"\"\"\n pgp_regex = \".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*\"\n return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))\n\n\nclass RpmKey(object):\n\n def __init__(self, module):\n # If the key is a url, we need to check if it's present to be idempotent,\n # to do that, we need to check the keyid, which we can get from the armor.\n keyfile = None\n should_cleanup_keyfile = False\n self.module = module\n self.rpm = self.module.get_bin_path('rpm', True)\n state = module.params['state']\n key = module.params['key']\n\n self.gpg = self.module.get_bin_path('gpg')\n if not self.gpg:\n self.gpg = self.module.get_bin_path('gpg2',required=True)\n\n if '://' in key:\n keyfile = self.fetch_key(key)\n keyid = self.getkeyid(keyfile)\n should_cleanup_keyfile = True\n elif self.is_keyid(key):\n keyid = key\n elif os.path.isfile(key):\n keyfile = key\n keyid = self.getkeyid(keyfile)\n else:\n self.module.fail_json(msg=\"Not a valid key %s\" % key)\n keyid = self.normalize_keyid(keyid)\n\n if state == 'present':\n if self.is_key_imported(keyid):\n module.exit_json(changed=False)\n else:\n if not keyfile:\n self.module.fail_json(msg=\"When importing a key, a valid file must be given\")\n self.import_key(keyfile)\n if should_cleanup_keyfile:\n self.module.cleanup(keyfile)\n module.exit_json(changed=True)\n else:\n if self.is_key_imported(keyid):\n self.drop_key(keyid)\n module.exit_json(changed=True)\n else:\n module.exit_json(changed=False)\n\n def fetch_key(self, url):\n \"\"\"Downloads a key from url, returns a valid path to a gpg key\"\"\"\n rsp, info = fetch_url(self.module, url)\n if info['status'] != 200:\n self.module.fail_json(msg=\"failed to fetch key at %s , error was: %s\" % (url, info['msg']))\n\n key = rsp.read()\n if not is_pubkey(key):\n self.module.fail_json(msg=\"Not a public key: %s\" % url)\n tmpfd, tmpname = tempfile.mkstemp()\n self.module.add_cleanup_file(tmpname)\n tmpfile = os.fdopen(tmpfd, \"w+b\")\n tmpfile.write(key)\n tmpfile.close()\n return tmpname\n\n def normalize_keyid(self, keyid):\n \"\"\"Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase\"\"\"\n ret = keyid.strip().upper()\n if ret.startswith('0x'):\n return ret[2:]\n elif ret.startswith('0X'):\n return ret[2:]\n else:\n return ret\n\n def getkeyid(self, keyfile):\n stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])\n for line in stdout.splitlines():\n line = line.strip()\n if line.startswith('pub:'):\n return line.split(':')[4]\n\n self.module.fail_json(msg=\"Unexpected gpg output\")\n\n def is_keyid(self, keystr):\n \"\"\"Verifies if a key, as provided by the user is a keyid\"\"\"\n return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)\n\n def execute_command(self, cmd):\n rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)\n if rc != 0:\n self.module.fail_json(msg=stderr)\n return stdout, stderr\n\n def is_key_imported(self, keyid):\n cmd=self.rpm + ' -q gpg-pubkey --qf \"%{description}\" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'\n stdout, stderr = self.execute_command(cmd)\n for line in stdout.splitlines():\n if keyid in line.split(':')[4]:\n return True\n return False\n\n def import_key(self, keyfile):\n if not self.module.check_mode:\n self.execute_command([self.rpm, '--import', keyfile])\n\n def drop_key(self, keyid):\n if not self.module.check_mode:\n self.execute_command([self.rpm, '--erase', '--allmatches', \"gpg-pubkey-%s\" % keyid[-8:].lower()])\n\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n state=dict(default='present', choices=['present', 'absent'], type='str'),\n key=dict(required=True, type='str'),\n validate_certs=dict(default='yes', type='bool'),\n ),\n supports_check_mode=True\n )\n\n RpmKey(module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/packaging/os/rpm_key.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Ansible module to import third party repo keys to your rpm db\n# (c) 2013, H\u00e9ctor Acosta <[email protected]>\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'core'}\n\n\nDOCUMENTATION = '''\n---\nmodule: rpm_key\nauthor: \"Hector Acosta (@hacosta) <[email protected]>\"\nshort_description: Adds or removes a gpg key from the rpm db\ndescription:\n - Adds or removes (rpm --import) a gpg key to your rpm database.\nversion_added: \"1.3\"\noptions:\n key:\n required: true\n default: null\n aliases: []\n description:\n - Key that will be modified. Can be a url, a file, or a keyid if the key already exists in the database.\n state:\n required: false\n default: \"present\"\n choices: [present, absent]\n description:\n - If the key will be imported or removed from the rpm db.\n validate_certs:\n description:\n - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used\n on personally controlled sites using self-signed certificates.\n required: false\n default: 'yes'\n choices: ['yes', 'no']\n\n'''\n\nEXAMPLES = '''\n# Example action to import a key from a url\n- rpm_key:\n state: present\n key: http://apt.sw.be/RPM-GPG-KEY.dag.txt\n\n# Example action to import a key from a file\n- rpm_key:\n state: present\n key: /path/to/key.gpg\n\n# Example action to ensure a key is not present in the db\n- rpm_key:\n state: absent\n key: DEADB33F\n'''\nimport re\nimport os.path\nimport tempfile\n\n# import module snippets\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.urls import fetch_url\nfrom ansible.module_utils._text import to_native\n\n\ndef is_pubkey(string):\n \"\"\"Verifies if string is a pubkey\"\"\"\n pgp_regex = \".*?(-----BEGIN PGP PUBLIC KEY BLOCK-----.*?-----END PGP PUBLIC KEY BLOCK-----).*\"\n return bool(re.match(pgp_regex, to_native(string, errors='surrogate_or_strict'), re.DOTALL))\n\n\nclass RpmKey(object):\n\n def __init__(self, module):\n # If the key is a url, we need to check if it's present to be idempotent,\n # to do that, we need to check the keyid, which we can get from the armor.\n keyfile = None\n should_cleanup_keyfile = False\n self.module = module\n self.rpm = self.module.get_bin_path('rpm', True)\n state = module.params['state']\n key = module.params['key']\n\n self.gpg = self.module.get_bin_path('gpg')\n if not self.gpg:\n self.gpg = self.module.get_bin_path('gpg2',required=True)\n\n if '://' in key:\n keyfile = self.fetch_key(key)\n keyid = self.getkeyid(keyfile)\n should_cleanup_keyfile = True\n elif self.is_keyid(key):\n keyid = key\n elif os.path.isfile(key):\n keyfile = key\n keyid = self.getkeyid(keyfile)\n else:\n self.module.fail_json(msg=\"Not a valid key %s\" % key)\n keyid = self.normalize_keyid(keyid)\n\n if state == 'present':\n if self.is_key_imported(keyid):\n module.exit_json(changed=False)\n else:\n if not keyfile:\n self.module.fail_json(msg=\"When importing a key, a valid file must be given\")\n self.import_key(keyfile)\n if should_cleanup_keyfile:\n self.module.cleanup(keyfile)\n module.exit_json(changed=True)\n else:\n if self.is_key_imported(keyid):\n self.drop_key(keyid)\n module.exit_json(changed=True)\n else:\n module.exit_json(changed=False)\n\n def fetch_key(self, url):\n \"\"\"Downloads a key from url, returns a valid path to a gpg key\"\"\"\n rsp, info = fetch_url(self.module, url)\n if info['status'] != 200:\n self.module.fail_json(msg=\"failed to fetch key at %s , error was: %s\" % (url, info['msg']))\n\n key = rsp.read()\n if not is_pubkey(key):\n self.module.fail_json(msg=\"Not a public key: %s\" % url)\n tmpfd, tmpname = tempfile.mkstemp()\n self.module.add_cleanup_file(tmpname)\n tmpfile = os.fdopen(tmpfd, \"w+b\")\n tmpfile.write(key)\n tmpfile.close()\n return tmpname\n\n def normalize_keyid(self, keyid):\n \"\"\"Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is uppercase\"\"\"\n ret = keyid.strip().upper()\n if ret.startswith('0x'):\n return ret[2:]\n elif ret.startswith('0X'):\n return ret[2:]\n else:\n return ret\n\n def getkeyid(self, keyfile):\n stdout, stderr = self.execute_command([self.gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', keyfile])\n for line in stdout.splitlines():\n line = line.strip()\n if line.startswith('pub:'):\n return line.split(':')[4]\n\n self.module.fail_json(msg=\"Unexpected gpg output\")\n\n def is_keyid(self, keystr):\n \"\"\"Verifies if a key, as provided by the user is a keyid\"\"\"\n return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)\n\n def execute_command(self, cmd):\n rc, stdout, stderr = self.module.run_command(cmd, use_unsafe_shell=True)\n if rc != 0:\n self.module.fail_json(msg=stderr)\n return stdout, stderr\n\n def is_key_imported(self, keyid):\n cmd = self.rpm + ' -q gpg-pubkey'\n rc, stdout, stderr = self.module.run_command(cmd)\n if rc != 0: # No key is installed on system\n return False\n cmd += ' --qf \"%{description}\" | ' + self.gpg + ' --no-tty --batch --with-colons --fixed-list-mode -'\n stdout, stderr = self.execute_command(cmd)\n for line in stdout.splitlines():\n if keyid in line.split(':')[4]:\n return True\n return False\n\n def import_key(self, keyfile):\n if not self.module.check_mode:\n self.execute_command([self.rpm, '--import', keyfile])\n\n def drop_key(self, keyid):\n if not self.module.check_mode:\n self.execute_command([self.rpm, '--erase', '--allmatches', \"gpg-pubkey-%s\" % keyid[-8:].lower()])\n\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n state=dict(default='present', choices=['present', 'absent'], type='str'),\n key=dict(required=True, type='str'),\n validate_certs=dict(default='yes', type='bool'),\n ),\n supports_check_mode=True\n )\n\n RpmKey(module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/packaging/os/rpm_key.py"}]}
| 3,461 | 279 |
gh_patches_debug_30804
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-easyblocks-2153
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MESA on arm/aarch64 lacks gallium drivers and fails to build with -Dlibunwind=true
`easybuild/easyblocks/m/mesa.py` specifies the following:
```
if not gallium_drivers:
# Add appropriate Gallium drivers for current architecture
arch = get_cpu_architecture()
arch_gallium_drivers = {
'x86_64': ['swrast', 'swr'],
'POWER': ['swrast'],
}
```
this leads to:
```
== processing EasyBuild easyconfig /home/terjekv/easybuild/software/EasyBuild/4.2.2/easybuild/easyconfigs/m/Mesa/Mesa-20.0.2-GCCcore-9.3.0.eb
ERROR: Traceback (most recent call last):
File "/home/terjekv/easybuild/software/EasyBuild/4.2.2/lib/python3.6/site-packages/easybuild/main.py", line 115, in build_and_install_software
(ec_res['success'], app_log, err) = build_and_install_one(ec, init_env)
File "/home/terjekv/easybuild/software/EasyBuild/4.2.2/lib/python3.6/site-packages/easybuild/framework/easyblock.py", line 3264, in build_and_install_one
app = app_class(ecdict['ec'])
File "/home/terjekv/easybuild/software/EasyBuild/4.2.2/lib/python3.6/site-packages/easybuild/easyblocks/m/mesa.py", line 66, in __init__
self.log.debug('Gallium driver(s) included in the installation: %s' % ', '.join(gallium_drivers))
TypeError: can only join an iterable
```
Adding `'aarch64': ['swrast']` should be enough. Will patch and test.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/easyblocks/m/mesa.py`
Content:
```
1 ##
2 # Copyright 2009-2020 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for installing Mesa, implemented as an easyblock
27
28 @author: Andrew Edmondson (University of Birmingham)
29 @author: Kenneth Hoste (HPC-UGent)
30 @author: Alex Domingo (Vrije Universiteit Brussel)
31 @author: Alexander Grund (TU Dresden)
32 """
33 import os
34 from distutils.version import LooseVersion
35
36 from easybuild.easyblocks.generic.mesonninja import MesonNinja
37 from easybuild.tools.filetools import copy_dir
38 from easybuild.tools.systemtools import POWER, X86_64, get_cpu_architecture, get_cpu_features, get_shared_lib_ext
39
40
41 class EB_Mesa(MesonNinja):
42 """Custom easyblock for building and installing Mesa."""
43
44 def __init__(self, *args, **kwargs):
45 """Constructor for custom Mesa easyblock: figure out which values to pass to swr-arches configuration option."""
46
47 super(EB_Mesa, self).__init__(*args, **kwargs)
48
49 self.gallium_configopts = []
50
51 # Check user-defined Gallium drivers
52 gallium_drivers = self.get_configopt_value('gallium-drivers')
53
54 if not gallium_drivers:
55 # Add appropriate Gallium drivers for current architecture
56 arch = get_cpu_architecture()
57 arch_gallium_drivers = {
58 X86_64: ['swrast', 'swr'],
59 POWER: ['swrast'],
60 }
61 if arch in arch_gallium_drivers:
62 gallium_drivers = arch_gallium_drivers[arch]
63 # Add configopt for additional Gallium drivers
64 self.gallium_configopts.append('-Dgallium-drivers=' + ','.join(gallium_drivers))
65
66 self.log.debug('Gallium driver(s) included in the installation: %s' % ', '.join(gallium_drivers))
67
68 self.swr_arches = []
69
70 if 'swr' in gallium_drivers:
71 # Check user-defined SWR arches
72 self.swr_arches = self.get_configopt_value('swr-arches')
73
74 if not self.swr_arches:
75 # Set cpu features of SWR for current micro-architecture
76 feat_to_swrarch = {
77 'avx': 'avx',
78 'avx1.0': 'avx', # on macOS, AVX is indicated with 'avx1.0' rather than 'avx'
79 'avx2': 'avx2',
80 'avx512f': 'skx', # AVX-512 Foundation - introduced in Skylake
81 'avx512er': 'knl', # AVX-512 Exponential and Reciprocal Instructions implemented in Knights Landing
82 }
83 # Determine list of values to pass to swr-arches configuration option
84 cpu_features = get_cpu_features()
85 self.swr_arches = sorted([swrarch for feat, swrarch in feat_to_swrarch.items() if feat in cpu_features])
86 # Add configopt for additional SWR arches
87 self.gallium_configopts.append('-Dswr-arches=' + ','.join(self.swr_arches))
88
89 self.log.debug('SWR Gallium driver will support: %s' % ', '.join(self.swr_arches))
90
91 def get_configopt_value(self, configopt_name):
92 """
93 Return list of values for the given configuration option in configopts
94 """
95 configopt_args = [opt for opt in self.cfg['configopts'].split() if opt.startswith('-D%s=' % configopt_name)]
96
97 if configopt_args:
98 if len(configopt_args) > 1:
99 self.log.warning("Found multiple instances of %s in configopts, using last one: %s",
100 configopt_name, configopt_args[-1])
101 # Get value of last option added
102 configopt_value = configopt_args[-1].split('=')[-1]
103 # Remove quotes and extract individual values
104 configopt_value = configopt_value.strip('"\'').split(',')
105 else:
106 configopt_value = None
107
108 return configopt_value
109
110 def configure_step(self):
111 """
112 Customise the configure options based on the processor architecture of the host
113 (Gallium drivers installed, SWR CPU features, ...)
114 """
115
116 if self.gallium_configopts:
117 self.cfg.update('configopts', self.gallium_configopts)
118
119 return super(EB_Mesa, self).configure_step()
120
121 def install_step(self):
122 """Also copy additional header files after installing Mesa."""
123
124 super(EB_Mesa, self).install_step()
125
126 # also install header files located in include/GL/internal, unless they're available already;
127 # we can't enable both DRI and Gallium drivers,
128 # but we can provide the DRI header file (GL/internal/dri_interface.h)
129 target_inc_GL_internal = os.path.join(self.installdir, 'include', 'GL', 'internal')
130 if not os.path.exists(target_inc_GL_internal):
131 src_inc_GL_internal = os.path.join(self.start_dir, 'include', 'GL', 'internal')
132 copy_dir(src_inc_GL_internal, target_inc_GL_internal)
133 self.log.info("Copied %s to %s" % (src_inc_GL_internal, target_inc_GL_internal))
134
135 def sanity_check_step(self):
136 """Custom sanity check for Mesa."""
137
138 shlib_ext = get_shared_lib_ext()
139
140 if LooseVersion(self.version) >= LooseVersion('20.0'):
141 header_files = [os.path.join('include', 'EGL', x) for x in ['eglmesaext.h', 'eglextchromium.h']]
142 header_files.extend([
143 os.path.join('include', 'GL', 'osmesa.h'),
144 os.path.join('include', 'GL', 'internal', 'dri_interface.h'),
145 ])
146 else:
147 gl_inc_files = ['glext.h', 'gl_mangle.h', 'glx.h', 'osmesa.h', 'gl.h', 'glxext.h', 'glx_mangle.h']
148 gles_inc_files = [('GLES', 'gl.h'), ('GLES2', 'gl2.h'), ('GLES3', 'gl3.h')]
149 header_files = [os.path.join('include', 'GL', x) for x in gl_inc_files]
150 header_files.extend([os.path.join('include', x, y) for (x, y) in gles_inc_files])
151
152 custom_paths = {
153 'files': [os.path.join('lib', 'libOSMesa.%s' % shlib_ext)] + header_files,
154 'dirs': [os.path.join('include', 'GL', 'internal')],
155 }
156
157 if self.swr_arches:
158 swr_arch_libs = [os.path.join('lib', 'libswr%s.%s' % (a.upper(), shlib_ext)) for a in self.swr_arches]
159 custom_paths['files'].extend(swr_arch_libs)
160
161 super(EB_Mesa, self).sanity_check_step(custom_paths=custom_paths)
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/easybuild/easyblocks/m/mesa.py b/easybuild/easyblocks/m/mesa.py
--- a/easybuild/easyblocks/m/mesa.py
+++ b/easybuild/easyblocks/m/mesa.py
@@ -35,7 +35,8 @@
from easybuild.easyblocks.generic.mesonninja import MesonNinja
from easybuild.tools.filetools import copy_dir
-from easybuild.tools.systemtools import POWER, X86_64, get_cpu_architecture, get_cpu_features, get_shared_lib_ext
+from easybuild.tools.systemtools import POWER, X86_64, AARCH64
+from easybuild.tools.systemtools import get_cpu_architecture, get_cpu_features, get_shared_lib_ext
class EB_Mesa(MesonNinja):
@@ -48,6 +49,16 @@
self.gallium_configopts = []
+ # Mesa fails to build with libunwind on aarch64
+ # See https://github.com/easybuilders/easybuild-easyblocks/issues/2150
+ if get_cpu_architecture() == AARCH64:
+ given_config_opts = self.cfg.get('configopts')
+ if "-Dlibunwind=true" in given_config_opts:
+ self.log.warning('libunwind not supported on aarch64, stripping from configopts!')
+ configopts_libunwind_stripped = given_config_opts.replace('-Dlibunwind=true', '-Dlibunwind=false')
+ self.cfg.set_keys({'configopts': configopts_libunwind_stripped})
+ self.log.warning('New configopts after stripping: ' + self.cfg.get('configopts'))
+
# Check user-defined Gallium drivers
gallium_drivers = self.get_configopt_value('gallium-drivers')
@@ -57,6 +68,7 @@
arch_gallium_drivers = {
X86_64: ['swrast', 'swr'],
POWER: ['swrast'],
+ AARCH64: ['swrast'],
}
if arch in arch_gallium_drivers:
gallium_drivers = arch_gallium_drivers[arch]
|
{"golden_diff": "diff --git a/easybuild/easyblocks/m/mesa.py b/easybuild/easyblocks/m/mesa.py\n--- a/easybuild/easyblocks/m/mesa.py\n+++ b/easybuild/easyblocks/m/mesa.py\n@@ -35,7 +35,8 @@\n \n from easybuild.easyblocks.generic.mesonninja import MesonNinja\n from easybuild.tools.filetools import copy_dir\n-from easybuild.tools.systemtools import POWER, X86_64, get_cpu_architecture, get_cpu_features, get_shared_lib_ext\n+from easybuild.tools.systemtools import POWER, X86_64, AARCH64\n+from easybuild.tools.systemtools import get_cpu_architecture, get_cpu_features, get_shared_lib_ext\n \n \n class EB_Mesa(MesonNinja):\n@@ -48,6 +49,16 @@\n \n self.gallium_configopts = []\n \n+ # Mesa fails to build with libunwind on aarch64\n+ # See https://github.com/easybuilders/easybuild-easyblocks/issues/2150\n+ if get_cpu_architecture() == AARCH64:\n+ given_config_opts = self.cfg.get('configopts')\n+ if \"-Dlibunwind=true\" in given_config_opts:\n+ self.log.warning('libunwind not supported on aarch64, stripping from configopts!')\n+ configopts_libunwind_stripped = given_config_opts.replace('-Dlibunwind=true', '-Dlibunwind=false')\n+ self.cfg.set_keys({'configopts': configopts_libunwind_stripped})\n+ self.log.warning('New configopts after stripping: ' + self.cfg.get('configopts'))\n+\n # Check user-defined Gallium drivers\n gallium_drivers = self.get_configopt_value('gallium-drivers')\n \n@@ -57,6 +68,7 @@\n arch_gallium_drivers = {\n X86_64: ['swrast', 'swr'],\n POWER: ['swrast'],\n+ AARCH64: ['swrast'],\n }\n if arch in arch_gallium_drivers:\n gallium_drivers = arch_gallium_drivers[arch]\n", "issue": "MESA on arm/aarch64 lacks gallium drivers and fails to build with -Dlibunwind=true\n`easybuild/easyblocks/m/mesa.py` specifies the following:\r\n```\r\n if not gallium_drivers:\r\n # Add appropriate Gallium drivers for current architecture\r\n arch = get_cpu_architecture()\r\n arch_gallium_drivers = {\r\n 'x86_64': ['swrast', 'swr'],\r\n 'POWER': ['swrast'],\r\n }\r\n```\r\nthis leads to:\r\n```\r\n== processing EasyBuild easyconfig /home/terjekv/easybuild/software/EasyBuild/4.2.2/easybuild/easyconfigs/m/Mesa/Mesa-20.0.2-GCCcore-9.3.0.eb\r\nERROR: Traceback (most recent call last):\r\n File \"/home/terjekv/easybuild/software/EasyBuild/4.2.2/lib/python3.6/site-packages/easybuild/main.py\", line 115, in build_and_install_software\r\n (ec_res['success'], app_log, err) = build_and_install_one(ec, init_env)\r\n File \"/home/terjekv/easybuild/software/EasyBuild/4.2.2/lib/python3.6/site-packages/easybuild/framework/easyblock.py\", line 3264, in build_and_install_one\r\n app = app_class(ecdict['ec'])\r\n File \"/home/terjekv/easybuild/software/EasyBuild/4.2.2/lib/python3.6/site-packages/easybuild/easyblocks/m/mesa.py\", line 66, in __init__\r\n self.log.debug('Gallium driver(s) included in the installation: %s' % ', '.join(gallium_drivers))\r\nTypeError: can only join an iterable\r\n```\r\nAdding `'aarch64': ['swrast']` should be enough. Will patch and test.\n", "before_files": [{"content": "##\n# Copyright 2009-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for installing Mesa, implemented as an easyblock\n\n@author: Andrew Edmondson (University of Birmingham)\n@author: Kenneth Hoste (HPC-UGent)\n@author: Alex Domingo (Vrije Universiteit Brussel)\n@author: Alexander Grund (TU Dresden)\n\"\"\"\nimport os\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.mesonninja import MesonNinja\nfrom easybuild.tools.filetools import copy_dir\nfrom easybuild.tools.systemtools import POWER, X86_64, get_cpu_architecture, get_cpu_features, get_shared_lib_ext\n\n\nclass EB_Mesa(MesonNinja):\n \"\"\"Custom easyblock for building and installing Mesa.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor for custom Mesa easyblock: figure out which values to pass to swr-arches configuration option.\"\"\"\n\n super(EB_Mesa, self).__init__(*args, **kwargs)\n\n self.gallium_configopts = []\n\n # Check user-defined Gallium drivers\n gallium_drivers = self.get_configopt_value('gallium-drivers')\n\n if not gallium_drivers:\n # Add appropriate Gallium drivers for current architecture\n arch = get_cpu_architecture()\n arch_gallium_drivers = {\n X86_64: ['swrast', 'swr'],\n POWER: ['swrast'],\n }\n if arch in arch_gallium_drivers:\n gallium_drivers = arch_gallium_drivers[arch]\n # Add configopt for additional Gallium drivers\n self.gallium_configopts.append('-Dgallium-drivers=' + ','.join(gallium_drivers))\n\n self.log.debug('Gallium driver(s) included in the installation: %s' % ', '.join(gallium_drivers))\n\n self.swr_arches = []\n\n if 'swr' in gallium_drivers:\n # Check user-defined SWR arches\n self.swr_arches = self.get_configopt_value('swr-arches')\n\n if not self.swr_arches:\n # Set cpu features of SWR for current micro-architecture\n feat_to_swrarch = {\n 'avx': 'avx',\n 'avx1.0': 'avx', # on macOS, AVX is indicated with 'avx1.0' rather than 'avx'\n 'avx2': 'avx2',\n 'avx512f': 'skx', # AVX-512 Foundation - introduced in Skylake\n 'avx512er': 'knl', # AVX-512 Exponential and Reciprocal Instructions implemented in Knights Landing\n }\n # Determine list of values to pass to swr-arches configuration option\n cpu_features = get_cpu_features()\n self.swr_arches = sorted([swrarch for feat, swrarch in feat_to_swrarch.items() if feat in cpu_features])\n # Add configopt for additional SWR arches\n self.gallium_configopts.append('-Dswr-arches=' + ','.join(self.swr_arches))\n\n self.log.debug('SWR Gallium driver will support: %s' % ', '.join(self.swr_arches))\n\n def get_configopt_value(self, configopt_name):\n \"\"\"\n Return list of values for the given configuration option in configopts\n \"\"\"\n configopt_args = [opt for opt in self.cfg['configopts'].split() if opt.startswith('-D%s=' % configopt_name)]\n\n if configopt_args:\n if len(configopt_args) > 1:\n self.log.warning(\"Found multiple instances of %s in configopts, using last one: %s\",\n configopt_name, configopt_args[-1])\n # Get value of last option added\n configopt_value = configopt_args[-1].split('=')[-1]\n # Remove quotes and extract individual values\n configopt_value = configopt_value.strip('\"\\'').split(',')\n else:\n configopt_value = None\n\n return configopt_value\n\n def configure_step(self):\n \"\"\"\n Customise the configure options based on the processor architecture of the host\n (Gallium drivers installed, SWR CPU features, ...)\n \"\"\"\n\n if self.gallium_configopts:\n self.cfg.update('configopts', self.gallium_configopts)\n\n return super(EB_Mesa, self).configure_step()\n\n def install_step(self):\n \"\"\"Also copy additional header files after installing Mesa.\"\"\"\n\n super(EB_Mesa, self).install_step()\n\n # also install header files located in include/GL/internal, unless they're available already;\n # we can't enable both DRI and Gallium drivers,\n # but we can provide the DRI header file (GL/internal/dri_interface.h)\n target_inc_GL_internal = os.path.join(self.installdir, 'include', 'GL', 'internal')\n if not os.path.exists(target_inc_GL_internal):\n src_inc_GL_internal = os.path.join(self.start_dir, 'include', 'GL', 'internal')\n copy_dir(src_inc_GL_internal, target_inc_GL_internal)\n self.log.info(\"Copied %s to %s\" % (src_inc_GL_internal, target_inc_GL_internal))\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for Mesa.\"\"\"\n\n shlib_ext = get_shared_lib_ext()\n\n if LooseVersion(self.version) >= LooseVersion('20.0'):\n header_files = [os.path.join('include', 'EGL', x) for x in ['eglmesaext.h', 'eglextchromium.h']]\n header_files.extend([\n os.path.join('include', 'GL', 'osmesa.h'),\n os.path.join('include', 'GL', 'internal', 'dri_interface.h'),\n ])\n else:\n gl_inc_files = ['glext.h', 'gl_mangle.h', 'glx.h', 'osmesa.h', 'gl.h', 'glxext.h', 'glx_mangle.h']\n gles_inc_files = [('GLES', 'gl.h'), ('GLES2', 'gl2.h'), ('GLES3', 'gl3.h')]\n header_files = [os.path.join('include', 'GL', x) for x in gl_inc_files]\n header_files.extend([os.path.join('include', x, y) for (x, y) in gles_inc_files])\n\n custom_paths = {\n 'files': [os.path.join('lib', 'libOSMesa.%s' % shlib_ext)] + header_files,\n 'dirs': [os.path.join('include', 'GL', 'internal')],\n }\n\n if self.swr_arches:\n swr_arch_libs = [os.path.join('lib', 'libswr%s.%s' % (a.upper(), shlib_ext)) for a in self.swr_arches]\n custom_paths['files'].extend(swr_arch_libs)\n\n super(EB_Mesa, self).sanity_check_step(custom_paths=custom_paths)\n", "path": "easybuild/easyblocks/m/mesa.py"}], "after_files": [{"content": "##\n# Copyright 2009-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for installing Mesa, implemented as an easyblock\n\n@author: Andrew Edmondson (University of Birmingham)\n@author: Kenneth Hoste (HPC-UGent)\n@author: Alex Domingo (Vrije Universiteit Brussel)\n@author: Alexander Grund (TU Dresden)\n\"\"\"\nimport os\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.mesonninja import MesonNinja\nfrom easybuild.tools.filetools import copy_dir\nfrom easybuild.tools.systemtools import POWER, X86_64, AARCH64\nfrom easybuild.tools.systemtools import get_cpu_architecture, get_cpu_features, get_shared_lib_ext\n\n\nclass EB_Mesa(MesonNinja):\n \"\"\"Custom easyblock for building and installing Mesa.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Constructor for custom Mesa easyblock: figure out which values to pass to swr-arches configuration option.\"\"\"\n\n super(EB_Mesa, self).__init__(*args, **kwargs)\n\n self.gallium_configopts = []\n\n # Mesa fails to build with libunwind on aarch64\n # See https://github.com/easybuilders/easybuild-easyblocks/issues/2150\n if get_cpu_architecture() == AARCH64:\n given_config_opts = self.cfg.get('configopts')\n if \"-Dlibunwind=true\" in given_config_opts:\n self.log.warning('libunwind not supported on aarch64, stripping from configopts!')\n configopts_libunwind_stripped = given_config_opts.replace('-Dlibunwind=true', '-Dlibunwind=false')\n self.cfg.set_keys({'configopts': configopts_libunwind_stripped})\n self.log.warning('New configopts after stripping: ' + self.cfg.get('configopts'))\n\n # Check user-defined Gallium drivers\n gallium_drivers = self.get_configopt_value('gallium-drivers')\n\n if not gallium_drivers:\n # Add appropriate Gallium drivers for current architecture\n arch = get_cpu_architecture()\n arch_gallium_drivers = {\n X86_64: ['swrast', 'swr'],\n POWER: ['swrast'],\n AARCH64: ['swrast'],\n }\n if arch in arch_gallium_drivers:\n gallium_drivers = arch_gallium_drivers[arch]\n # Add configopt for additional Gallium drivers\n self.gallium_configopts.append('-Dgallium-drivers=' + ','.join(gallium_drivers))\n\n self.log.debug('Gallium driver(s) included in the installation: %s' % ', '.join(gallium_drivers))\n\n self.swr_arches = []\n\n if 'swr' in gallium_drivers:\n # Check user-defined SWR arches\n self.swr_arches = self.get_configopt_value('swr-arches')\n\n if not self.swr_arches:\n # Set cpu features of SWR for current micro-architecture\n feat_to_swrarch = {\n 'avx': 'avx',\n 'avx1.0': 'avx', # on macOS, AVX is indicated with 'avx1.0' rather than 'avx'\n 'avx2': 'avx2',\n 'avx512f': 'skx', # AVX-512 Foundation - introduced in Skylake\n 'avx512er': 'knl', # AVX-512 Exponential and Reciprocal Instructions implemented in Knights Landing\n }\n # Determine list of values to pass to swr-arches configuration option\n cpu_features = get_cpu_features()\n self.swr_arches = sorted([swrarch for feat, swrarch in feat_to_swrarch.items() if feat in cpu_features])\n # Add configopt for additional SWR arches\n self.gallium_configopts.append('-Dswr-arches=' + ','.join(self.swr_arches))\n\n self.log.debug('SWR Gallium driver will support: %s' % ', '.join(self.swr_arches))\n\n def get_configopt_value(self, configopt_name):\n \"\"\"\n Return list of values for the given configuration option in configopts\n \"\"\"\n configopt_args = [opt for opt in self.cfg['configopts'].split() if opt.startswith('-D%s=' % configopt_name)]\n\n if configopt_args:\n if len(configopt_args) > 1:\n self.log.warning(\"Found multiple instances of %s in configopts, using last one: %s\",\n configopt_name, configopt_args[-1])\n # Get value of last option added\n configopt_value = configopt_args[-1].split('=')[-1]\n # Remove quotes and extract individual values\n configopt_value = configopt_value.strip('\"\\'').split(',')\n else:\n configopt_value = None\n\n return configopt_value\n\n def configure_step(self):\n \"\"\"\n Customise the configure options based on the processor architecture of the host\n (Gallium drivers installed, SWR CPU features, ...)\n \"\"\"\n\n if self.gallium_configopts:\n self.cfg.update('configopts', self.gallium_configopts)\n\n return super(EB_Mesa, self).configure_step()\n\n def install_step(self):\n \"\"\"Also copy additional header files after installing Mesa.\"\"\"\n\n super(EB_Mesa, self).install_step()\n\n # also install header files located in include/GL/internal, unless they're available already;\n # we can't enable both DRI and Gallium drivers,\n # but we can provide the DRI header file (GL/internal/dri_interface.h)\n target_inc_GL_internal = os.path.join(self.installdir, 'include', 'GL', 'internal')\n if not os.path.exists(target_inc_GL_internal):\n src_inc_GL_internal = os.path.join(self.start_dir, 'include', 'GL', 'internal')\n copy_dir(src_inc_GL_internal, target_inc_GL_internal)\n self.log.info(\"Copied %s to %s\" % (src_inc_GL_internal, target_inc_GL_internal))\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for Mesa.\"\"\"\n\n shlib_ext = get_shared_lib_ext()\n\n if LooseVersion(self.version) >= LooseVersion('20.0'):\n header_files = [os.path.join('include', 'EGL', x) for x in ['eglmesaext.h', 'eglextchromium.h']]\n header_files.extend([\n os.path.join('include', 'GL', 'osmesa.h'),\n os.path.join('include', 'GL', 'internal', 'dri_interface.h'),\n ])\n else:\n gl_inc_files = ['glext.h', 'gl_mangle.h', 'glx.h', 'osmesa.h', 'gl.h', 'glxext.h', 'glx_mangle.h']\n gles_inc_files = [('GLES', 'gl.h'), ('GLES2', 'gl2.h'), ('GLES3', 'gl3.h')]\n header_files = [os.path.join('include', 'GL', x) for x in gl_inc_files]\n header_files.extend([os.path.join('include', x, y) for (x, y) in gles_inc_files])\n\n custom_paths = {\n 'files': [os.path.join('lib', 'libOSMesa.%s' % shlib_ext)] + header_files,\n 'dirs': [os.path.join('include', 'GL', 'internal')],\n }\n\n if self.swr_arches:\n swr_arch_libs = [os.path.join('lib', 'libswr%s.%s' % (a.upper(), shlib_ext)) for a in self.swr_arches]\n custom_paths['files'].extend(swr_arch_libs)\n\n super(EB_Mesa, self).sanity_check_step(custom_paths=custom_paths)\n", "path": "easybuild/easyblocks/m/mesa.py"}]}
| 2,879 | 488 |
gh_patches_debug_15661
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__PaddleOCR-66
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
检测模型转inference模型,'use_gpu': False,但仍然转换失败
你好,在检测模型转inference模型时,已修改det_mv3_db.yml 中'use_gpu': False,但仍然报错,如下所示:
python tools/export_model.py -c configs/det/det_mv3_db.yml -o Global.checkpoints=./ch_lite/det_mv3_db/best_accuracy Global.save_inference_dir=./inference_model/det_db/
2020-05-19 10:29:59,237-INFO: {'Global': {'algorithm': 'DB', 'use_gpu': False, 'epoch_num': 1200, 'log_smooth_window': 20, 'print_batch_step': 2, 'save_model_dir': './output/det_db/', 'save_epoch_step': 200, 'eval_batch_step': 5000, 'train_batch_size_per_card': 16, 'test_batch_size_per_card': 16, 'image_shape': [3, 640, 640], 'reader_yml': './configs/det/det_db_icdar15_reader.yml', 'pretrain_weights': './pretrain_models/MobileNetV3_large_x0_5_pretrained/', 'checkpoints': './ch_lite/det_mv3_db/best_accuracy', 'save_res_path': './output/det_db/predicts_db.txt', 'save_inference_dir': './inference_model/det_db/'}, 'Architecture': {'function': 'ppocr.modeling.architectures.det_model,DetModel'}, 'Backbone': {'function': 'ppocr.modeling.backbones.det_mobilenet_v3,MobileNetV3', 'scale': 0.5, 'model_name': 'large'}, 'Head': {'function': 'ppocr.modeling.heads.det_db_head,DBHead', 'model_name': 'large', 'k': 50, 'inner_channels': 96, 'out_channels': 2}, 'Loss': {'function': 'ppocr.modeling.losses.det_db_loss,DBLoss', 'balance_loss': True, 'main_loss_type': 'DiceLoss', 'alpha': 5, 'beta': 10, 'ohem_ratio': 3}, 'Optimizer': {'function': 'ppocr.optimizer,AdamDecay', 'base_lr': 0.001, 'beta1': 0.9, 'beta2': 0.999}, 'PostProcess': {'function': 'ppocr.postprocess.db_postprocess,DBPostProcess', 'thresh': 0.3, 'box_thresh': 0.7, 'max_candidates': 1000, 'unclip_ratio': 1.5}, 'TrainReader': {'reader_function': 'ppocr.data.det.dataset_traversal,TrainReader', 'process_function': 'ppocr.data.det.db_process,DBProcessTrain', 'num_workers': 8, 'img_set_dir': './train_data/icdar2015/text_localization/', 'label_file_path': './train_data/icdar2015/text_localization/train_icdar2015_label.txt'}, 'EvalReader': {'reader_function': 'ppocr.data.det.dataset_traversal,EvalTestReader', 'process_function': 'ppocr.data.det.db_process,DBProcessTest', 'img_set_dir': './train_data/icdar2015/text_localization/', 'label_file_path': './train_data/icdar2015/text_localization/test_icdar2015_label.txt', 'test_image_shape': [736, 1280]}, 'TestReader': {'reader_function': 'ppocr.data.det.dataset_traversal,EvalTestReader', 'process_function': 'ppocr.data.det.db_process,DBProcessTest', 'single_img_path': None, 'img_set_dir': './train_data/icdar2015/text_localization/', 'label_file_path': './train_data/icdar2015/text_localization/test_icdar2015_label.txt', 'test_image_shape': [736, 1280], 'do_eval': True}}
2020-05-19 10:29:59,238-ERROR: Config use_gpu cannot be set as true while you are using paddlepaddle cpu version !
Please try:
1. Install paddlepaddle-gpu to run model on GPU
2. Set use_gpu as false in config file to run model on CPU
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/export_model.py`
Content:
```
1 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18
19 import os
20 import sys
21 import time
22 import multiprocessing
23 import numpy as np
24
25
26 def set_paddle_flags(**kwargs):
27 for key, value in kwargs.items():
28 if os.environ.get(key, None) is None:
29 os.environ[key] = str(value)
30
31
32 # NOTE(paddle-dev): All of these flags should be
33 # set before `import paddle`. Otherwise, it would
34 # not take any effect.
35 set_paddle_flags(
36 FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
37 )
38
39 import program
40 from paddle import fluid
41 from ppocr.utils.utility import initial_logger
42 logger = initial_logger()
43 from ppocr.utils.save_load import init_model
44 from ppocr.utils.character import CharacterOps
45 from ppocr.utils.utility import create_module
46
47
48 def main():
49 config = program.load_config(FLAGS.config)
50 program.merge_config(FLAGS.opt)
51 logger.info(config)
52
53 # check if set use_gpu=True in paddlepaddle cpu version
54 use_gpu = config['Global']['use_gpu']
55 program.check_gpu(True)
56
57 alg = config['Global']['algorithm']
58 assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
59 if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:
60 config['Global']['char_ops'] = CharacterOps(config['Global'])
61
62 place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
63 startup_prog = fluid.Program()
64 eval_program = fluid.Program()
65
66 feeded_var_names, target_vars, fetches_var_name = program.build_export(
67 config, eval_program, startup_prog)
68 eval_program = eval_program.clone(for_test=True)
69 exe = fluid.Executor(place)
70 exe.run(startup_prog)
71
72 init_model(config, eval_program, exe)
73
74 save_inference_dir = config['Global']['save_inference_dir']
75 if not os.path.exists(save_inference_dir):
76 os.makedirs(save_inference_dir)
77 fluid.io.save_inference_model(
78 dirname=save_inference_dir,
79 feeded_var_names=feeded_var_names,
80 main_program=eval_program,
81 target_vars=target_vars,
82 executor=exe,
83 model_filename='model',
84 params_filename='params')
85 print("inference model saved in {}/model and {}/params".format(
86 save_inference_dir, save_inference_dir))
87 print("save success, output_name_list:", fetches_var_name)
88
89
90 if __name__ == '__main__':
91 parser = program.ArgsParser()
92 FLAGS = parser.parse_args()
93 main()
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/export_model.py b/tools/export_model.py
--- a/tools/export_model.py
+++ b/tools/export_model.py
@@ -31,7 +31,7 @@
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
-# not take any effect.
+# not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
@@ -52,7 +52,7 @@
# check if set use_gpu=True in paddlepaddle cpu version
use_gpu = config['Global']['use_gpu']
- program.check_gpu(True)
+ program.check_gpu(use_gpu)
alg = config['Global']['algorithm']
assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
|
{"golden_diff": "diff --git a/tools/export_model.py b/tools/export_model.py\n--- a/tools/export_model.py\n+++ b/tools/export_model.py\n@@ -31,7 +31,7 @@\n \n # NOTE(paddle-dev): All of these flags should be\n # set before `import paddle`. Otherwise, it would\n-# not take any effect. \n+# not take any effect.\n set_paddle_flags(\n FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory\n )\n@@ -52,7 +52,7 @@\n \n # check if set use_gpu=True in paddlepaddle cpu version\n use_gpu = config['Global']['use_gpu']\n- program.check_gpu(True)\n+ program.check_gpu(use_gpu)\n \n alg = config['Global']['algorithm']\n assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']\n", "issue": "\u68c0\u6d4b\u6a21\u578b\u8f6cinference\u6a21\u578b\uff0c'use_gpu': False\uff0c\u4f46\u4ecd\u7136\u8f6c\u6362\u5931\u8d25\n\u4f60\u597d\uff0c\u5728\u68c0\u6d4b\u6a21\u578b\u8f6cinference\u6a21\u578b\u65f6\uff0c\u5df2\u4fee\u6539det_mv3_db.yml \u4e2d'use_gpu': False\uff0c\u4f46\u4ecd\u7136\u62a5\u9519\uff0c\u5982\u4e0b\u6240\u793a\uff1a\r\npython tools/export_model.py -c configs/det/det_mv3_db.yml -o Global.checkpoints=./ch_lite/det_mv3_db/best_accuracy Global.save_inference_dir=./inference_model/det_db/\r\n2020-05-19 10:29:59,237-INFO: {'Global': {'algorithm': 'DB', 'use_gpu': False, 'epoch_num': 1200, 'log_smooth_window': 20, 'print_batch_step': 2, 'save_model_dir': './output/det_db/', 'save_epoch_step': 200, 'eval_batch_step': 5000, 'train_batch_size_per_card': 16, 'test_batch_size_per_card': 16, 'image_shape': [3, 640, 640], 'reader_yml': './configs/det/det_db_icdar15_reader.yml', 'pretrain_weights': './pretrain_models/MobileNetV3_large_x0_5_pretrained/', 'checkpoints': './ch_lite/det_mv3_db/best_accuracy', 'save_res_path': './output/det_db/predicts_db.txt', 'save_inference_dir': './inference_model/det_db/'}, 'Architecture': {'function': 'ppocr.modeling.architectures.det_model,DetModel'}, 'Backbone': {'function': 'ppocr.modeling.backbones.det_mobilenet_v3,MobileNetV3', 'scale': 0.5, 'model_name': 'large'}, 'Head': {'function': 'ppocr.modeling.heads.det_db_head,DBHead', 'model_name': 'large', 'k': 50, 'inner_channels': 96, 'out_channels': 2}, 'Loss': {'function': 'ppocr.modeling.losses.det_db_loss,DBLoss', 'balance_loss': True, 'main_loss_type': 'DiceLoss', 'alpha': 5, 'beta': 10, 'ohem_ratio': 3}, 'Optimizer': {'function': 'ppocr.optimizer,AdamDecay', 'base_lr': 0.001, 'beta1': 0.9, 'beta2': 0.999}, 'PostProcess': {'function': 'ppocr.postprocess.db_postprocess,DBPostProcess', 'thresh': 0.3, 'box_thresh': 0.7, 'max_candidates': 1000, 'unclip_ratio': 1.5}, 'TrainReader': {'reader_function': 'ppocr.data.det.dataset_traversal,TrainReader', 'process_function': 'ppocr.data.det.db_process,DBProcessTrain', 'num_workers': 8, 'img_set_dir': './train_data/icdar2015/text_localization/', 'label_file_path': './train_data/icdar2015/text_localization/train_icdar2015_label.txt'}, 'EvalReader': {'reader_function': 'ppocr.data.det.dataset_traversal,EvalTestReader', 'process_function': 'ppocr.data.det.db_process,DBProcessTest', 'img_set_dir': './train_data/icdar2015/text_localization/', 'label_file_path': './train_data/icdar2015/text_localization/test_icdar2015_label.txt', 'test_image_shape': [736, 1280]}, 'TestReader': {'reader_function': 'ppocr.data.det.dataset_traversal,EvalTestReader', 'process_function': 'ppocr.data.det.db_process,DBProcessTest', 'single_img_path': None, 'img_set_dir': './train_data/icdar2015/text_localization/', 'label_file_path': './train_data/icdar2015/text_localization/test_icdar2015_label.txt', 'test_image_shape': [736, 1280], 'do_eval': True}}\r\n2020-05-19 10:29:59,238-ERROR: Config use_gpu cannot be set as true while you are using paddlepaddle cpu version !\r\nPlease try:\r\n 1. Install paddlepaddle-gpu to run model on GPU\r\n 2. Set use_gpu as false in config file to run model on CPU\n", "before_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport multiprocessing\nimport numpy as np\n\n\ndef set_paddle_flags(**kwargs):\n for key, value in kwargs.items():\n if os.environ.get(key, None) is None:\n os.environ[key] = str(value)\n\n\n# NOTE(paddle-dev): All of these flags should be\n# set before `import paddle`. Otherwise, it would\n# not take any effect. \nset_paddle_flags(\n FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory\n)\n\nimport program\nfrom paddle import fluid\nfrom ppocr.utils.utility import initial_logger\nlogger = initial_logger()\nfrom ppocr.utils.save_load import init_model\nfrom ppocr.utils.character import CharacterOps\nfrom ppocr.utils.utility import create_module\n\n\ndef main():\n config = program.load_config(FLAGS.config)\n program.merge_config(FLAGS.opt)\n logger.info(config)\n\n # check if set use_gpu=True in paddlepaddle cpu version\n use_gpu = config['Global']['use_gpu']\n program.check_gpu(True)\n\n alg = config['Global']['algorithm']\n assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']\n if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:\n config['Global']['char_ops'] = CharacterOps(config['Global'])\n\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n startup_prog = fluid.Program()\n eval_program = fluid.Program()\n\n feeded_var_names, target_vars, fetches_var_name = program.build_export(\n config, eval_program, startup_prog)\n eval_program = eval_program.clone(for_test=True)\n exe = fluid.Executor(place)\n exe.run(startup_prog)\n\n init_model(config, eval_program, exe)\n\n save_inference_dir = config['Global']['save_inference_dir']\n if not os.path.exists(save_inference_dir):\n os.makedirs(save_inference_dir)\n fluid.io.save_inference_model(\n dirname=save_inference_dir,\n feeded_var_names=feeded_var_names,\n main_program=eval_program,\n target_vars=target_vars,\n executor=exe,\n model_filename='model',\n params_filename='params')\n print(\"inference model saved in {}/model and {}/params\".format(\n save_inference_dir, save_inference_dir))\n print(\"save success, output_name_list:\", fetches_var_name)\n\n\nif __name__ == '__main__':\n parser = program.ArgsParser()\n FLAGS = parser.parse_args()\n main()\n", "path": "tools/export_model.py"}], "after_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport multiprocessing\nimport numpy as np\n\n\ndef set_paddle_flags(**kwargs):\n for key, value in kwargs.items():\n if os.environ.get(key, None) is None:\n os.environ[key] = str(value)\n\n\n# NOTE(paddle-dev): All of these flags should be\n# set before `import paddle`. Otherwise, it would\n# not take any effect.\nset_paddle_flags(\n FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory\n)\n\nimport program\nfrom paddle import fluid\nfrom ppocr.utils.utility import initial_logger\nlogger = initial_logger()\nfrom ppocr.utils.save_load import init_model\nfrom ppocr.utils.character import CharacterOps\nfrom ppocr.utils.utility import create_module\n\n\ndef main():\n config = program.load_config(FLAGS.config)\n program.merge_config(FLAGS.opt)\n logger.info(config)\n\n # check if set use_gpu=True in paddlepaddle cpu version\n use_gpu = config['Global']['use_gpu']\n program.check_gpu(use_gpu)\n\n alg = config['Global']['algorithm']\n assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']\n if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:\n config['Global']['char_ops'] = CharacterOps(config['Global'])\n\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n startup_prog = fluid.Program()\n eval_program = fluid.Program()\n\n feeded_var_names, target_vars, fetches_var_name = program.build_export(\n config, eval_program, startup_prog)\n eval_program = eval_program.clone(for_test=True)\n exe = fluid.Executor(place)\n exe.run(startup_prog)\n\n init_model(config, eval_program, exe)\n\n save_inference_dir = config['Global']['save_inference_dir']\n if not os.path.exists(save_inference_dir):\n os.makedirs(save_inference_dir)\n fluid.io.save_inference_model(\n dirname=save_inference_dir,\n feeded_var_names=feeded_var_names,\n main_program=eval_program,\n target_vars=target_vars,\n executor=exe,\n model_filename='model',\n params_filename='params')\n print(\"inference model saved in {}/model and {}/params\".format(\n save_inference_dir, save_inference_dir))\n print(\"save success, output_name_list:\", fetches_var_name)\n\n\nif __name__ == '__main__':\n parser = program.ArgsParser()\n FLAGS = parser.parse_args()\n main()\n", "path": "tools/export_model.py"}]}
| 2,125 | 195 |
gh_patches_debug_33751
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-1083
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remote_side_bash_executor global logger conflates logs from multiple tasks/sources
`remote_side_bash_executor` configures and uses the global `logging` logger, rather than one scoped to that function. Once it has configured a log file for output, all subsequent global logs from any further `remote_side_bash_executor` invocation in that process, as well as other uses of logging, such as htex `process_worker_pool.py`, end up in earlier configured log files.
This results in `/tmp/bashexec` logs containing a confused assortment of logs from different sources, rather than being focuses on a single bash execution.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/app/bash.py`
Content:
```
1 import logging
2 from functools import update_wrapper
3 from inspect import signature, Parameter
4
5 from parsl.app.errors import wrap_error
6 from parsl.app.futures import DataFuture
7 from parsl.app.app import AppBase
8 from parsl.dataflow.dflow import DataFlowKernelLoader
9
10 logger = logging.getLogger(__name__)
11
12
13 def remote_side_bash_executor(func, *args, **kwargs):
14 """Execute the bash app type function and return the command line string.
15
16 This string is reformatted with the *args, and **kwargs
17 from call time.
18 """
19 import os
20 import time
21 import subprocess
22 import logging
23 import parsl.app.errors as pe
24
25 logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG)
26
27 func_name = func.__name__
28
29 partial_cmdline = None
30
31 # Try to run the func to compose the commandline
32 try:
33 # Execute the func to get the commandline
34 partial_cmdline = func(*args, **kwargs)
35 # Reformat the commandline with current args and kwargs
36 executable = partial_cmdline.format(*args, **kwargs)
37
38 except AttributeError as e:
39 if partial_cmdline is not None:
40 raise pe.AppBadFormatting("App formatting failed for app '{}' with AttributeError: {}".format(func_name, e))
41 else:
42 raise pe.BashAppNoReturn("Bash app '{}' did not return a value, or returned none - with this exception: {}".format(func_name, e), None)
43
44 except IndexError as e:
45 raise pe.AppBadFormatting("App formatting failed for app '{}' with IndexError: {}".format(func_name, e))
46 except Exception as e:
47 logging.error("Caught exception during formatting of app '{}': {}".format(func_name, e))
48 raise e
49
50 logging.debug("Executable: %s", executable)
51
52 # Updating stdout, stderr if values passed at call time.
53
54 def open_std_fd(fdname):
55 # fdname is 'stdout' or 'stderr'
56 stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)
57 if stdfspec is None:
58 return None
59 elif isinstance(stdfspec, str):
60 fname = stdfspec
61 mode = 'a+'
62 elif isinstance(stdfspec, tuple):
63 if len(stdfspec) != 2:
64 raise pe.BadStdStreamFile("std descriptor %s has incorrect tuple length %s" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))
65 fname, mode = stdfspec
66 else:
67 raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))
68
69 try:
70 if os.path.dirname(fname):
71 os.makedirs(os.path.dirname(fname), exist_ok=True)
72 fd = open(fname, mode)
73 except Exception as e:
74 raise pe.BadStdStreamFile(fname, e)
75 return fd
76
77 std_out = open_std_fd('stdout')
78 std_err = open_std_fd('stderr')
79 timeout = kwargs.get('walltime')
80
81 if std_err is not None:
82 print('--> executable follows <--\n{}\n--> end executable <--'.format(executable), file=std_err, flush=True)
83
84 returncode = None
85 try:
86 proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')
87 proc.wait(timeout=timeout)
88 returncode = proc.returncode
89
90 except subprocess.TimeoutExpired:
91 raise pe.AppTimeout("[{}] App exceeded walltime: {}".format(func_name, timeout))
92
93 except Exception as e:
94 raise pe.AppException("[{}] App caught exception: {}".format(func_name, proc.returncode), e)
95
96 if returncode != 0:
97 raise pe.AppFailure("[{}] App failed with exit code: {}".format(func_name, proc.returncode), proc.returncode)
98
99 # TODO : Add support for globs here
100
101 missing = []
102 for outputfile in kwargs.get('outputs', []):
103 fpath = outputfile
104 if type(outputfile) != str:
105 fpath = outputfile.filepath
106
107 if not os.path.exists(fpath):
108 missing.extend([outputfile])
109
110 if missing:
111 raise pe.MissingOutputs("[{}] Missing outputs".format(func_name), missing)
112
113 return returncode
114
115
116 class BashApp(AppBase):
117
118 def __init__(self, func, data_flow_kernel=None, walltime=60, cache=False, executors='all'):
119 super().__init__(func, data_flow_kernel=data_flow_kernel, walltime=60, executors=executors, cache=cache)
120 self.kwargs = {}
121
122 # We duplicate the extraction of parameter defaults
123 # to self.kwargs to ensure availability at point of
124 # command string format. Refer: #349
125 sig = signature(func)
126
127 for s in sig.parameters:
128 if sig.parameters[s].default != Parameter.empty:
129 self.kwargs[s] = sig.parameters[s].default
130
131 def __call__(self, *args, **kwargs):
132 """Handle the call to a Bash app.
133
134 Args:
135 - Arbitrary
136
137 Kwargs:
138 - Arbitrary
139
140 Returns:
141 If outputs=[...] was a kwarg then:
142 App_fut, [Data_Futures...]
143 else:
144 App_fut
145
146 """
147 # Update kwargs in the app definition with ones passed in at calltime
148 self.kwargs.update(kwargs)
149
150 if self.data_flow_kernel is None:
151 dfk = DataFlowKernelLoader.dfk()
152 else:
153 dfk = self.data_flow_kernel
154
155 app_fut = dfk.submit(wrap_error(update_wrapper(remote_side_bash_executor, self.func)),
156 self.func, *args,
157 executors=self.executors,
158 fn_hash=self.func_hash,
159 cache=self.cache,
160 **self.kwargs)
161
162 out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)
163 for o in kwargs.get('outputs', [])]
164 app_fut._outputs = out_futs
165
166 return app_fut
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsl/app/bash.py b/parsl/app/bash.py
--- a/parsl/app/bash.py
+++ b/parsl/app/bash.py
@@ -1,4 +1,3 @@
-import logging
from functools import update_wrapper
from inspect import signature, Parameter
@@ -7,8 +6,6 @@
from parsl.app.app import AppBase
from parsl.dataflow.dflow import DataFlowKernelLoader
-logger = logging.getLogger(__name__)
-
def remote_side_bash_executor(func, *args, **kwargs):
"""Execute the bash app type function and return the command line string.
@@ -21,8 +18,20 @@
import subprocess
import logging
import parsl.app.errors as pe
+ from parsl import set_file_logger
+
+ logbase = "/tmp"
+ format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
+
+ # make this name unique per invocation so that each invocation can
+ # log to its own file. It would be better to include the task_id here
+ # but that is awkward to wire through at the moment as apps do not
+ # have access to that execution context.
+ t = time.time()
- logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG)
+ logname = __name__ + "." + str(t)
+ logger = logging.getLogger(logname)
+ set_file_logger(filename='{0}/bashexec.{1}.log'.format(logbase, t), name=logname, level=logging.DEBUG, format_string=format_string)
func_name = func.__name__
@@ -44,10 +53,10 @@
except IndexError as e:
raise pe.AppBadFormatting("App formatting failed for app '{}' with IndexError: {}".format(func_name, e))
except Exception as e:
- logging.error("Caught exception during formatting of app '{}': {}".format(func_name, e))
+ logger.error("Caught exception during formatting of app '{}': {}".format(func_name, e))
raise e
- logging.debug("Executable: %s", executable)
+ logger.debug("Executable: %s", executable)
# Updating stdout, stderr if values passed at call time.
|
{"golden_diff": "diff --git a/parsl/app/bash.py b/parsl/app/bash.py\n--- a/parsl/app/bash.py\n+++ b/parsl/app/bash.py\n@@ -1,4 +1,3 @@\n-import logging\n from functools import update_wrapper\n from inspect import signature, Parameter\n \n@@ -7,8 +6,6 @@\n from parsl.app.app import AppBase\n from parsl.dataflow.dflow import DataFlowKernelLoader\n \n-logger = logging.getLogger(__name__)\n-\n \n def remote_side_bash_executor(func, *args, **kwargs):\n \"\"\"Execute the bash app type function and return the command line string.\n@@ -21,8 +18,20 @@\n import subprocess\n import logging\n import parsl.app.errors as pe\n+ from parsl import set_file_logger\n+\n+ logbase = \"/tmp\"\n+ format_string = \"%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s\"\n+\n+ # make this name unique per invocation so that each invocation can\n+ # log to its own file. It would be better to include the task_id here\n+ # but that is awkward to wire through at the moment as apps do not\n+ # have access to that execution context.\n+ t = time.time()\n \n- logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG)\n+ logname = __name__ + \".\" + str(t)\n+ logger = logging.getLogger(logname)\n+ set_file_logger(filename='{0}/bashexec.{1}.log'.format(logbase, t), name=logname, level=logging.DEBUG, format_string=format_string)\n \n func_name = func.__name__\n \n@@ -44,10 +53,10 @@\n except IndexError as e:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with IndexError: {}\".format(func_name, e))\n except Exception as e:\n- logging.error(\"Caught exception during formatting of app '{}': {}\".format(func_name, e))\n+ logger.error(\"Caught exception during formatting of app '{}': {}\".format(func_name, e))\n raise e\n \n- logging.debug(\"Executable: %s\", executable)\n+ logger.debug(\"Executable: %s\", executable)\n \n # Updating stdout, stderr if values passed at call time.\n", "issue": "remote_side_bash_executor global logger conflates logs from multiple tasks/sources\n`remote_side_bash_executor` configures and uses the global `logging` logger, rather than one scoped to that function. Once it has configured a log file for output, all subsequent global logs from any further `remote_side_bash_executor` invocation in that process, as well as other uses of logging, such as htex `process_worker_pool.py`, end up in earlier configured log files.\r\n\r\nThis results in `/tmp/bashexec` logs containing a confused assortment of logs from different sources, rather than being focuses on a single bash execution.\n", "before_files": [{"content": "import logging\nfrom functools import update_wrapper\nfrom inspect import signature, Parameter\n\nfrom parsl.app.errors import wrap_error\nfrom parsl.app.futures import DataFuture\nfrom parsl.app.app import AppBase\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\nlogger = logging.getLogger(__name__)\n\n\ndef remote_side_bash_executor(func, *args, **kwargs):\n \"\"\"Execute the bash app type function and return the command line string.\n\n This string is reformatted with the *args, and **kwargs\n from call time.\n \"\"\"\n import os\n import time\n import subprocess\n import logging\n import parsl.app.errors as pe\n\n logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG)\n\n func_name = func.__name__\n\n partial_cmdline = None\n\n # Try to run the func to compose the commandline\n try:\n # Execute the func to get the commandline\n partial_cmdline = func(*args, **kwargs)\n # Reformat the commandline with current args and kwargs\n executable = partial_cmdline.format(*args, **kwargs)\n\n except AttributeError as e:\n if partial_cmdline is not None:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with AttributeError: {}\".format(func_name, e))\n else:\n raise pe.BashAppNoReturn(\"Bash app '{}' did not return a value, or returned none - with this exception: {}\".format(func_name, e), None)\n\n except IndexError as e:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with IndexError: {}\".format(func_name, e))\n except Exception as e:\n logging.error(\"Caught exception during formatting of app '{}': {}\".format(func_name, e))\n raise e\n\n logging.debug(\"Executable: %s\", executable)\n\n # Updating stdout, stderr if values passed at call time.\n\n def open_std_fd(fdname):\n # fdname is 'stdout' or 'stderr'\n stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)\n if stdfspec is None:\n return None\n elif isinstance(stdfspec, str):\n fname = stdfspec\n mode = 'a+'\n elif isinstance(stdfspec, tuple):\n if len(stdfspec) != 2:\n raise pe.BadStdStreamFile(\"std descriptor %s has incorrect tuple length %s\" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))\n fname, mode = stdfspec\n else:\n raise pe.BadStdStreamFile(\"std descriptor %s has unexpected type %s\" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))\n\n try:\n if os.path.dirname(fname):\n os.makedirs(os.path.dirname(fname), exist_ok=True)\n fd = open(fname, mode)\n except Exception as e:\n raise pe.BadStdStreamFile(fname, e)\n return fd\n\n std_out = open_std_fd('stdout')\n std_err = open_std_fd('stderr')\n timeout = kwargs.get('walltime')\n\n if std_err is not None:\n print('--> executable follows <--\\n{}\\n--> end executable <--'.format(executable), file=std_err, flush=True)\n\n returncode = None\n try:\n proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')\n proc.wait(timeout=timeout)\n returncode = proc.returncode\n\n except subprocess.TimeoutExpired:\n raise pe.AppTimeout(\"[{}] App exceeded walltime: {}\".format(func_name, timeout))\n\n except Exception as e:\n raise pe.AppException(\"[{}] App caught exception: {}\".format(func_name, proc.returncode), e)\n\n if returncode != 0:\n raise pe.AppFailure(\"[{}] App failed with exit code: {}\".format(func_name, proc.returncode), proc.returncode)\n\n # TODO : Add support for globs here\n\n missing = []\n for outputfile in kwargs.get('outputs', []):\n fpath = outputfile\n if type(outputfile) != str:\n fpath = outputfile.filepath\n\n if not os.path.exists(fpath):\n missing.extend([outputfile])\n\n if missing:\n raise pe.MissingOutputs(\"[{}] Missing outputs\".format(func_name), missing)\n\n return returncode\n\n\nclass BashApp(AppBase):\n\n def __init__(self, func, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n super().__init__(func, data_flow_kernel=data_flow_kernel, walltime=60, executors=executors, cache=cache)\n self.kwargs = {}\n\n # We duplicate the extraction of parameter defaults\n # to self.kwargs to ensure availability at point of\n # command string format. Refer: #349\n sig = signature(func)\n\n for s in sig.parameters:\n if sig.parameters[s].default != Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n def __call__(self, *args, **kwargs):\n \"\"\"Handle the call to a Bash app.\n\n Args:\n - Arbitrary\n\n Kwargs:\n - Arbitrary\n\n Returns:\n If outputs=[...] was a kwarg then:\n App_fut, [Data_Futures...]\n else:\n App_fut\n\n \"\"\"\n # Update kwargs in the app definition with ones passed in at calltime\n self.kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n app_fut = dfk.submit(wrap_error(update_wrapper(remote_side_bash_executor, self.func)),\n self.func, *args,\n executors=self.executors,\n fn_hash=self.func_hash,\n cache=self.cache,\n **self.kwargs)\n\n out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)\n for o in kwargs.get('outputs', [])]\n app_fut._outputs = out_futs\n\n return app_fut\n", "path": "parsl/app/bash.py"}], "after_files": [{"content": "from functools import update_wrapper\nfrom inspect import signature, Parameter\n\nfrom parsl.app.errors import wrap_error\nfrom parsl.app.futures import DataFuture\nfrom parsl.app.app import AppBase\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\n\ndef remote_side_bash_executor(func, *args, **kwargs):\n \"\"\"Execute the bash app type function and return the command line string.\n\n This string is reformatted with the *args, and **kwargs\n from call time.\n \"\"\"\n import os\n import time\n import subprocess\n import logging\n import parsl.app.errors as pe\n from parsl import set_file_logger\n\n logbase = \"/tmp\"\n format_string = \"%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s\"\n\n # make this name unique per invocation so that each invocation can\n # log to its own file. It would be better to include the task_id here\n # but that is awkward to wire through at the moment as apps do not\n # have access to that execution context.\n t = time.time()\n\n logname = __name__ + \".\" + str(t)\n logger = logging.getLogger(logname)\n set_file_logger(filename='{0}/bashexec.{1}.log'.format(logbase, t), name=logname, level=logging.DEBUG, format_string=format_string)\n\n func_name = func.__name__\n\n partial_cmdline = None\n\n # Try to run the func to compose the commandline\n try:\n # Execute the func to get the commandline\n partial_cmdline = func(*args, **kwargs)\n # Reformat the commandline with current args and kwargs\n executable = partial_cmdline.format(*args, **kwargs)\n\n except AttributeError as e:\n if partial_cmdline is not None:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with AttributeError: {}\".format(func_name, e))\n else:\n raise pe.BashAppNoReturn(\"Bash app '{}' did not return a value, or returned none - with this exception: {}\".format(func_name, e), None)\n\n except IndexError as e:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with IndexError: {}\".format(func_name, e))\n except Exception as e:\n logger.error(\"Caught exception during formatting of app '{}': {}\".format(func_name, e))\n raise e\n\n logger.debug(\"Executable: %s\", executable)\n\n # Updating stdout, stderr if values passed at call time.\n\n def open_std_fd(fdname):\n # fdname is 'stdout' or 'stderr'\n stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)\n if stdfspec is None:\n return None\n elif isinstance(stdfspec, str):\n fname = stdfspec\n mode = 'a+'\n elif isinstance(stdfspec, tuple):\n if len(stdfspec) != 2:\n raise pe.BadStdStreamFile(\"std descriptor %s has incorrect tuple length %s\" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))\n fname, mode = stdfspec\n else:\n raise pe.BadStdStreamFile(\"std descriptor %s has unexpected type %s\" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))\n\n try:\n if os.path.dirname(fname):\n os.makedirs(os.path.dirname(fname), exist_ok=True)\n fd = open(fname, mode)\n except Exception as e:\n raise pe.BadStdStreamFile(fname, e)\n return fd\n\n std_out = open_std_fd('stdout')\n std_err = open_std_fd('stderr')\n timeout = kwargs.get('walltime')\n\n if std_err is not None:\n print('--> executable follows <--\\n{}\\n--> end executable <--'.format(executable), file=std_err, flush=True)\n\n returncode = None\n try:\n proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')\n proc.wait(timeout=timeout)\n returncode = proc.returncode\n\n except subprocess.TimeoutExpired:\n raise pe.AppTimeout(\"[{}] App exceeded walltime: {}\".format(func_name, timeout))\n\n except Exception as e:\n raise pe.AppException(\"[{}] App caught exception: {}\".format(func_name, proc.returncode), e)\n\n if returncode != 0:\n raise pe.AppFailure(\"[{}] App failed with exit code: {}\".format(func_name, proc.returncode), proc.returncode)\n\n # TODO : Add support for globs here\n\n missing = []\n for outputfile in kwargs.get('outputs', []):\n fpath = outputfile\n if type(outputfile) != str:\n fpath = outputfile.filepath\n\n if not os.path.exists(fpath):\n missing.extend([outputfile])\n\n if missing:\n raise pe.MissingOutputs(\"[{}] Missing outputs\".format(func_name), missing)\n\n return returncode\n\n\nclass BashApp(AppBase):\n\n def __init__(self, func, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n super().__init__(func, data_flow_kernel=data_flow_kernel, walltime=60, executors=executors, cache=cache)\n self.kwargs = {}\n\n # We duplicate the extraction of parameter defaults\n # to self.kwargs to ensure availability at point of\n # command string format. Refer: #349\n sig = signature(func)\n\n for s in sig.parameters:\n if sig.parameters[s].default != Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n def __call__(self, *args, **kwargs):\n \"\"\"Handle the call to a Bash app.\n\n Args:\n - Arbitrary\n\n Kwargs:\n - Arbitrary\n\n Returns:\n If outputs=[...] was a kwarg then:\n App_fut, [Data_Futures...]\n else:\n App_fut\n\n \"\"\"\n # Update kwargs in the app definition with ones passed in at calltime\n self.kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n app_fut = dfk.submit(wrap_error(update_wrapper(remote_side_bash_executor, self.func)),\n self.func, *args,\n executors=self.executors,\n fn_hash=self.func_hash,\n cache=self.cache,\n **self.kwargs)\n\n out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)\n for o in kwargs.get('outputs', [])]\n app_fut._outputs = out_futs\n\n return app_fut\n", "path": "parsl/app/bash.py"}]}
| 2,114 | 509 |
gh_patches_debug_18875
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1749
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Useless Request of a streetlist from muellmax.de
### I Have A Problem With:
A specific source
### What's Your Problem
We are the developers of müllmax. We can accept automated daily ical-requests, currently more than 500 a day. But it is annoying, when useless dataload is produced. To select a street name, an empty formfield mm_frm_str_name is sent, which is the request for a complete list of streetnames. This can cause a dataload of 100.000 kb or more and is comletely useless. Instead of an empty field the requested streetname should be submitted. The second call with the requested streetname in formfield mm_frm_str_sel is unnecessary and should be omitted.
### Source (if relevant)
muellmax_de.py
### Logs
```Shell
if self._mm_frm_str_sel is not None:
# show street selection page
args = {
"mm_ses": mm_ses.value,
"xxx": 1,
"mm_frm_str_name": "",
"mm_aus_str_txt_submit": "suchen",
}
r = requests.post(url, data=args)
mm_ses.feed(r.text)
# select street
args = {
"mm_ses": mm_ses.value,
"xxx": 1,
"mm_frm_str_sel": self._mm_frm_str_sel,
"mm_aus_str_sel_submit": "weiter",
}
r = requests.post(url, data=args)
mm_ses.feed(r.text)
```
### Relevant Configuration
```YAML
We do not have hacs_waste_collection_schedule installed.
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py`
Content:
```
1 from html.parser import HTMLParser
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6 from waste_collection_schedule.service.MuellmaxDe import SERVICE_MAP
7
8 TITLE = "Müllmax"
9 DESCRIPTION = "Source for Müllmax waste collection."
10 URL = "https://www.muellmax.de"
11
12
13 def EXTRA_INFO():
14 return [{"title": s["title"], "url": s["url"]} for s in SERVICE_MAP]
15
16
17 TEST_CASES = {
18 "Rhein-Sieg-Kreis, Alfter": {
19 "service": "Rsa",
20 "mm_frm_ort_sel": "Alfter",
21 "mm_frm_str_sel": "Ahrweg (105-Ende/94-Ende)",
22 },
23 "Münster, Achatiusweg": {"service": "Awm", "mm_frm_str_sel": "Achatiusweg"},
24 }
25
26
27 # Parser for HTML checkbox
28 class InputCheckboxParser(HTMLParser):
29 def __init__(self, startswith):
30 super().__init__()
31 self._startswith = startswith
32 self._value = {}
33
34 @property
35 def value(self):
36 return self._value
37
38 def handle_starttag(self, tag, attrs):
39 if tag == "input":
40 d = dict(attrs)
41 if d.get("name", "").startswith(self._startswith):
42 self._value[d["name"]] = d.get("value")
43
44
45 # Parser for HTML input (hidden) text
46 class InputTextParser(HTMLParser):
47 def __init__(self, **identifiers):
48 super().__init__()
49 self._identifiers = identifiers
50 self._value = None
51
52 @property
53 def value(self):
54 return self._value
55
56 def handle_starttag(self, tag, attrs):
57 if tag == "input":
58 d = dict(attrs)
59 for key, value in self._identifiers.items():
60 if key not in d or d[key] != value:
61 return
62 self._value = d.get("value")
63
64
65 class Source:
66 def __init__(
67 self, service, mm_frm_ort_sel=None, mm_frm_str_sel=None, mm_frm_hnr_sel=None
68 ):
69 self._service = service
70 self._mm_frm_ort_sel = mm_frm_ort_sel
71 self._mm_frm_str_sel = mm_frm_str_sel
72 self._mm_frm_hnr_sel = mm_frm_hnr_sel
73 self._ics = ICS()
74
75 def fetch(self):
76 mm_ses = InputTextParser(name="mm_ses")
77
78 url = f"https://www.muellmax.de/abfallkalender/{self._service.lower()}/res/{self._service}Start.php"
79 r = requests.get(url)
80 mm_ses.feed(r.text)
81
82 # select "Abfuhrtermine", returns ort or an empty street search field
83 args = {"mm_ses": mm_ses.value, "mm_aus_ort.x": 0, "mm_aus_ort.x": 0}
84 r = requests.post(url, data=args)
85 mm_ses.feed(r.text)
86
87 if self._mm_frm_ort_sel is not None:
88 # select city
89 args = {
90 "mm_ses": mm_ses.value,
91 "xxx": 1,
92 "mm_frm_ort_sel": self._mm_frm_ort_sel,
93 "mm_aus_ort_submit": "weiter",
94 }
95 r = requests.post(url, data=args)
96 mm_ses.feed(r.text)
97
98 if self._mm_frm_str_sel is not None:
99 # show street selection page
100 args = {
101 "mm_ses": mm_ses.value,
102 "xxx": 1,
103 "mm_frm_str_name": "",
104 "mm_aus_str_txt_submit": "suchen",
105 }
106 r = requests.post(url, data=args)
107 mm_ses.feed(r.text)
108
109 # select street
110 args = {
111 "mm_ses": mm_ses.value,
112 "xxx": 1,
113 "mm_frm_str_sel": self._mm_frm_str_sel,
114 "mm_aus_str_sel_submit": "weiter",
115 }
116 r = requests.post(url, data=args)
117 mm_ses.feed(r.text)
118
119 if self._mm_frm_hnr_sel is not None:
120 # select house number
121 args = {
122 "mm_ses": mm_ses.value,
123 "xxx": 1,
124 "mm_frm_hnr_sel": self._mm_frm_hnr_sel,
125 "mm_aus_hnr_sel_submit": "weiter",
126 }
127 r = requests.post(url, data=args)
128 mm_ses.feed(r.text)
129
130 # select to get ical
131 args = {"mm_ses": mm_ses.value, "xxx": 1, "mm_ica_auswahl": "iCalendar-Datei"}
132 r = requests.post(url, data=args)
133 mm_ses.feed(r.text)
134
135 mm_frm_fra = InputCheckboxParser(startswith="mm_frm_fra")
136 mm_frm_fra.feed(r.text)
137
138 # get ics file
139 args = {"mm_ses": mm_ses.value, "xxx": 1, "mm_frm_type": "termine"}
140 args.update(mm_frm_fra.value)
141 args.update({"mm_ica_gen": "iCalendar-Datei laden"})
142 r = requests.post(url, data=args)
143 mm_ses.feed(r.text)
144
145 entries = []
146
147 # parse ics file
148 dates = self._ics.convert(r.text)
149
150 entries = []
151 for d in dates:
152 entries.append(Collection(d[0], d[1]))
153 return entries
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py
@@ -96,22 +96,12 @@
mm_ses.feed(r.text)
if self._mm_frm_str_sel is not None:
- # show street selection page
- args = {
- "mm_ses": mm_ses.value,
- "xxx": 1,
- "mm_frm_str_name": "",
- "mm_aus_str_txt_submit": "suchen",
- }
- r = requests.post(url, data=args)
- mm_ses.feed(r.text)
-
# select street
args = {
"mm_ses": mm_ses.value,
"xxx": 1,
"mm_frm_str_sel": self._mm_frm_str_sel,
- "mm_aus_str_sel_submit": "weiter",
+ "mm_aus_str_sel_submit": "suchen",
}
r = requests.post(url, data=args)
mm_ses.feed(r.text)
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py\n@@ -96,22 +96,12 @@\n mm_ses.feed(r.text)\n \n if self._mm_frm_str_sel is not None:\n- # show street selection page\n- args = {\n- \"mm_ses\": mm_ses.value,\n- \"xxx\": 1,\n- \"mm_frm_str_name\": \"\",\n- \"mm_aus_str_txt_submit\": \"suchen\",\n- }\n- r = requests.post(url, data=args)\n- mm_ses.feed(r.text)\n-\n # select street\n args = {\n \"mm_ses\": mm_ses.value,\n \"xxx\": 1,\n \"mm_frm_str_sel\": self._mm_frm_str_sel,\n- \"mm_aus_str_sel_submit\": \"weiter\",\n+ \"mm_aus_str_sel_submit\": \"suchen\",\n }\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n", "issue": "Useless Request of a streetlist from muellmax.de\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nWe are the developers of m\u00fcllmax. We can accept automated daily ical-requests, currently more than 500 a day. But it is annoying, when useless dataload is produced. To select a street name, an empty formfield mm_frm_str_name is sent, which is the request for a complete list of streetnames. This can cause a dataload of 100.000 kb or more and is comletely useless. Instead of an empty field the requested streetname should be submitted. The second call with the requested streetname in formfield mm_frm_str_sel is unnecessary and should be omitted.\n\n### Source (if relevant)\n\nmuellmax_de.py \n\n### Logs\n\n```Shell\nif self._mm_frm_str_sel is not None:\r\n # show street selection page\r\n args = {\r\n \"mm_ses\": mm_ses.value,\r\n \"xxx\": 1,\r\n \"mm_frm_str_name\": \"\",\r\n \"mm_aus_str_txt_submit\": \"suchen\",\r\n }\r\n r = requests.post(url, data=args)\r\n mm_ses.feed(r.text)\r\n\r\n # select street\r\n args = {\r\n \"mm_ses\": mm_ses.value,\r\n \"xxx\": 1,\r\n \"mm_frm_str_sel\": self._mm_frm_str_sel,\r\n \"mm_aus_str_sel_submit\": \"weiter\",\r\n }\r\n r = requests.post(url, data=args)\r\n mm_ses.feed(r.text)\n```\n\n\n### Relevant Configuration\n\n```YAML\nWe do not have hacs_waste_collection_schedule installed.\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\nfrom waste_collection_schedule.service.MuellmaxDe import SERVICE_MAP\n\nTITLE = \"M\u00fcllmax\"\nDESCRIPTION = \"Source for M\u00fcllmax waste collection.\"\nURL = \"https://www.muellmax.de\"\n\n\ndef EXTRA_INFO():\n return [{\"title\": s[\"title\"], \"url\": s[\"url\"]} for s in SERVICE_MAP]\n\n\nTEST_CASES = {\n \"Rhein-Sieg-Kreis, Alfter\": {\n \"service\": \"Rsa\",\n \"mm_frm_ort_sel\": \"Alfter\",\n \"mm_frm_str_sel\": \"Ahrweg (105-Ende/94-Ende)\",\n },\n \"M\u00fcnster, Achatiusweg\": {\"service\": \"Awm\", \"mm_frm_str_sel\": \"Achatiusweg\"},\n}\n\n\n# Parser for HTML checkbox\nclass InputCheckboxParser(HTMLParser):\n def __init__(self, startswith):\n super().__init__()\n self._startswith = startswith\n self._value = {}\n\n @property\n def value(self):\n return self._value\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n if d.get(\"name\", \"\").startswith(self._startswith):\n self._value[d[\"name\"]] = d.get(\"value\")\n\n\n# Parser for HTML input (hidden) text\nclass InputTextParser(HTMLParser):\n def __init__(self, **identifiers):\n super().__init__()\n self._identifiers = identifiers\n self._value = None\n\n @property\n def value(self):\n return self._value\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n for key, value in self._identifiers.items():\n if key not in d or d[key] != value:\n return\n self._value = d.get(\"value\")\n\n\nclass Source:\n def __init__(\n self, service, mm_frm_ort_sel=None, mm_frm_str_sel=None, mm_frm_hnr_sel=None\n ):\n self._service = service\n self._mm_frm_ort_sel = mm_frm_ort_sel\n self._mm_frm_str_sel = mm_frm_str_sel\n self._mm_frm_hnr_sel = mm_frm_hnr_sel\n self._ics = ICS()\n\n def fetch(self):\n mm_ses = InputTextParser(name=\"mm_ses\")\n\n url = f\"https://www.muellmax.de/abfallkalender/{self._service.lower()}/res/{self._service}Start.php\"\n r = requests.get(url)\n mm_ses.feed(r.text)\n\n # select \"Abfuhrtermine\", returns ort or an empty street search field\n args = {\"mm_ses\": mm_ses.value, \"mm_aus_ort.x\": 0, \"mm_aus_ort.x\": 0}\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n if self._mm_frm_ort_sel is not None:\n # select city\n args = {\n \"mm_ses\": mm_ses.value,\n \"xxx\": 1,\n \"mm_frm_ort_sel\": self._mm_frm_ort_sel,\n \"mm_aus_ort_submit\": \"weiter\",\n }\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n if self._mm_frm_str_sel is not None:\n # show street selection page\n args = {\n \"mm_ses\": mm_ses.value,\n \"xxx\": 1,\n \"mm_frm_str_name\": \"\",\n \"mm_aus_str_txt_submit\": \"suchen\",\n }\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n # select street\n args = {\n \"mm_ses\": mm_ses.value,\n \"xxx\": 1,\n \"mm_frm_str_sel\": self._mm_frm_str_sel,\n \"mm_aus_str_sel_submit\": \"weiter\",\n }\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n if self._mm_frm_hnr_sel is not None:\n # select house number\n args = {\n \"mm_ses\": mm_ses.value,\n \"xxx\": 1,\n \"mm_frm_hnr_sel\": self._mm_frm_hnr_sel,\n \"mm_aus_hnr_sel_submit\": \"weiter\",\n }\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n # select to get ical\n args = {\"mm_ses\": mm_ses.value, \"xxx\": 1, \"mm_ica_auswahl\": \"iCalendar-Datei\"}\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n mm_frm_fra = InputCheckboxParser(startswith=\"mm_frm_fra\")\n mm_frm_fra.feed(r.text)\n\n # get ics file\n args = {\"mm_ses\": mm_ses.value, \"xxx\": 1, \"mm_frm_type\": \"termine\"}\n args.update(mm_frm_fra.value)\n args.update({\"mm_ica_gen\": \"iCalendar-Datei laden\"})\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n entries = []\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py"}], "after_files": [{"content": "from html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\nfrom waste_collection_schedule.service.MuellmaxDe import SERVICE_MAP\n\nTITLE = \"M\u00fcllmax\"\nDESCRIPTION = \"Source for M\u00fcllmax waste collection.\"\nURL = \"https://www.muellmax.de\"\n\n\ndef EXTRA_INFO():\n return [{\"title\": s[\"title\"], \"url\": s[\"url\"]} for s in SERVICE_MAP]\n\n\nTEST_CASES = {\n \"Rhein-Sieg-Kreis, Alfter\": {\n \"service\": \"Rsa\",\n \"mm_frm_ort_sel\": \"Alfter\",\n \"mm_frm_str_sel\": \"Ahrweg (105-Ende/94-Ende)\",\n },\n \"M\u00fcnster, Achatiusweg\": {\"service\": \"Awm\", \"mm_frm_str_sel\": \"Achatiusweg\"},\n}\n\n\n# Parser for HTML checkbox\nclass InputCheckboxParser(HTMLParser):\n def __init__(self, startswith):\n super().__init__()\n self._startswith = startswith\n self._value = {}\n\n @property\n def value(self):\n return self._value\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n if d.get(\"name\", \"\").startswith(self._startswith):\n self._value[d[\"name\"]] = d.get(\"value\")\n\n\n# Parser for HTML input (hidden) text\nclass InputTextParser(HTMLParser):\n def __init__(self, **identifiers):\n super().__init__()\n self._identifiers = identifiers\n self._value = None\n\n @property\n def value(self):\n return self._value\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n for key, value in self._identifiers.items():\n if key not in d or d[key] != value:\n return\n self._value = d.get(\"value\")\n\n\nclass Source:\n def __init__(\n self, service, mm_frm_ort_sel=None, mm_frm_str_sel=None, mm_frm_hnr_sel=None\n ):\n self._service = service\n self._mm_frm_ort_sel = mm_frm_ort_sel\n self._mm_frm_str_sel = mm_frm_str_sel\n self._mm_frm_hnr_sel = mm_frm_hnr_sel\n self._ics = ICS()\n\n def fetch(self):\n mm_ses = InputTextParser(name=\"mm_ses\")\n\n url = f\"https://www.muellmax.de/abfallkalender/{self._service.lower()}/res/{self._service}Start.php\"\n r = requests.get(url)\n mm_ses.feed(r.text)\n\n # select \"Abfuhrtermine\", returns ort or an empty street search field\n args = {\"mm_ses\": mm_ses.value, \"mm_aus_ort.x\": 0, \"mm_aus_ort.x\": 0}\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n if self._mm_frm_ort_sel is not None:\n # select city\n args = {\n \"mm_ses\": mm_ses.value,\n \"xxx\": 1,\n \"mm_frm_ort_sel\": self._mm_frm_ort_sel,\n \"mm_aus_ort_submit\": \"weiter\",\n }\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n if self._mm_frm_str_sel is not None:\n # select street\n args = {\n \"mm_ses\": mm_ses.value,\n \"xxx\": 1,\n \"mm_frm_str_sel\": self._mm_frm_str_sel,\n \"mm_aus_str_sel_submit\": \"suchen\",\n }\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n if self._mm_frm_hnr_sel is not None:\n # select house number\n args = {\n \"mm_ses\": mm_ses.value,\n \"xxx\": 1,\n \"mm_frm_hnr_sel\": self._mm_frm_hnr_sel,\n \"mm_aus_hnr_sel_submit\": \"weiter\",\n }\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n # select to get ical\n args = {\"mm_ses\": mm_ses.value, \"xxx\": 1, \"mm_ica_auswahl\": \"iCalendar-Datei\"}\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n mm_frm_fra = InputCheckboxParser(startswith=\"mm_frm_fra\")\n mm_frm_fra.feed(r.text)\n\n # get ics file\n args = {\"mm_ses\": mm_ses.value, \"xxx\": 1, \"mm_frm_type\": \"termine\"}\n args.update(mm_frm_fra.value)\n args.update({\"mm_ica_gen\": \"iCalendar-Datei laden\"})\n r = requests.post(url, data=args)\n mm_ses.feed(r.text)\n\n entries = []\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/muellmax_de.py"}]}
| 2,434 | 280 |
gh_patches_debug_13448
|
rasdani/github-patches
|
git_diff
|
huggingface__diffusers-1932
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Error] got an unexpected keyword argument `eta`
### Describe the bug
When I try to make a sampling using DDIM pipeline, an error occurs
```bash
python utils/ddim.py
0%| | 0/50 [00:00<?, ?it/s]
Traceback (most recent call last):
File "utils/ddim.py", line 10, in <module>
image = ddim(num_inference_steps=50).images[0]
File "/home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 28, in decorate_context
return func(*args, **kwargs)
File "/home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/diffusers/pipelines/ddim/pipeline_ddim.py", line 129, in __call__
image = self.scheduler.step(
File "/home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/diffusers/schedulers/scheduling_ddpm.py", line 259, in step
predict_epsilon = deprecate("predict_epsilon", "0.12.0", message, take_from=kwargs)
File "/home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/diffusers/utils/deprecation_utils.py", line 43, in deprecate
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`")
TypeError: step in /home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/diffusers/schedulers/scheduling_ddpm.py line 258 got an unexpected keyword argument `eta`
```
Is there anything I can do?
Thnaks!
### Reproduction
I used code lines the same as the typical example code:
```python
from diffusers import DDIMPipeline
model_id = "/home/sr5/se91.kim/AMP/Diffusers/models/google/ddpm-cifar10-32"
# load model and scheduler
ddim = DDIMPipeline.from_pretrained(model_id)
# run pipeline in inference (sample random noise and denoise)
image = ddim(num_inference_steps=50).images[0]
# save image
image.save("ddim_generated_image.png")
```
### Logs
_No response_
### System Info
I saw a closed issue (https://github.com/huggingface/diffusers/issues/170) but the diffusers in my virtual env. is already of the latest version (0.12.dev):
```bash
$ pip list
Package Version
------------------ ------------
accelerate 0.15.0
certifi 2020.12.5
chardet 4.0.0
diffusers 0.12.0.dev0
filelock 3.0.12
fsspec 2022.11.0
huggingface-hub 0.11.1
idna 2.10
importlib-metadata 3.7.3
numpy 1.20.1
packaging 20.9
Pillow 8.1.2
pip 21.0.1
psutil 5.8.0
pyarrow 10.0.1
pyparsing 2.4.7
PyYAML 5.4.1
regex 2021.3.17
requests 2.25.1
setuptools 54.1.2
torch 1.9.1+cu111
torchaudio 0.9.1
torchvision 0.10.1+cu111
tqdm 4.64.1
typing-extensions 3.7.4.3
urllib3 1.26.4
wheel 0.36.2
zipp 3.4.1
```
I tried under two versions of diffusers; 0.12.dev and 0.11.1, and both give the same error message.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/diffusers/pipelines/ddim/pipeline_ddim.py`
Content:
```
1 # Copyright 2022 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import List, Optional, Tuple, Union
16
17 import torch
18
19 from ...utils import deprecate, randn_tensor
20 from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
21
22
23 class DDIMPipeline(DiffusionPipeline):
24 r"""
25 This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
26 library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
27
28 Parameters:
29 unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
30 scheduler ([`SchedulerMixin`]):
31 A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
32 [`DDPMScheduler`], or [`DDIMScheduler`].
33 """
34
35 def __init__(self, unet, scheduler):
36 super().__init__()
37 self.register_modules(unet=unet, scheduler=scheduler)
38
39 @torch.no_grad()
40 def __call__(
41 self,
42 batch_size: int = 1,
43 generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
44 eta: float = 0.0,
45 num_inference_steps: int = 50,
46 use_clipped_model_output: Optional[bool] = None,
47 output_type: Optional[str] = "pil",
48 return_dict: bool = True,
49 ) -> Union[ImagePipelineOutput, Tuple]:
50 r"""
51 Args:
52 batch_size (`int`, *optional*, defaults to 1):
53 The number of images to generate.
54 generator (`torch.Generator`, *optional*):
55 One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
56 to make generation deterministic.
57 eta (`float`, *optional*, defaults to 0.0):
58 The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
59 num_inference_steps (`int`, *optional*, defaults to 50):
60 The number of denoising steps. More denoising steps usually lead to a higher quality image at the
61 expense of slower inference.
62 use_clipped_model_output (`bool`, *optional*, defaults to `None`):
63 if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed
64 downstream to the scheduler. So use `None` for schedulers which don't support this argument.
65 output_type (`str`, *optional*, defaults to `"pil"`):
66 The output format of the generate image. Choose between
67 [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
68 return_dict (`bool`, *optional*, defaults to `True`):
69 Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
70
71 Returns:
72 [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
73 True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
74 """
75
76 if (
77 generator is not None
78 and isinstance(generator, torch.Generator)
79 and generator.device.type != self.device.type
80 and self.device.type != "mps"
81 ):
82 message = (
83 f"The `generator` device is `{generator.device}` and does not match the pipeline "
84 f"device `{self.device}`, so the `generator` will be ignored. "
85 f'Please use `generator=torch.Generator(device="{self.device}")` instead.'
86 )
87 deprecate(
88 "generator.device == 'cpu'",
89 "0.13.0",
90 message,
91 )
92 generator = None
93
94 # Sample gaussian noise to begin loop
95 if isinstance(self.unet.sample_size, int):
96 image_shape = (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size)
97 else:
98 image_shape = (batch_size, self.unet.in_channels, *self.unet.sample_size)
99
100 if isinstance(generator, list) and len(generator) != batch_size:
101 raise ValueError(
102 f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
103 f" size of {batch_size}. Make sure the batch size matches the length of the generators."
104 )
105
106 image = randn_tensor(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype)
107
108 # set step values
109 self.scheduler.set_timesteps(num_inference_steps)
110
111 for t in self.progress_bar(self.scheduler.timesteps):
112 # 1. predict noise model_output
113 model_output = self.unet(image, t).sample
114
115 # 2. predict previous mean of image x_t-1 and add variance depending on eta
116 # eta corresponds to η in paper and should be between [0, 1]
117 # do x_t -> x_t-1
118 image = self.scheduler.step(
119 model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator
120 ).prev_sample
121
122 image = (image / 2 + 0.5).clamp(0, 1)
123 image = image.cpu().permute(0, 2, 3, 1).numpy()
124 if output_type == "pil":
125 image = self.numpy_to_pil(image)
126
127 if not return_dict:
128 return (image,)
129
130 return ImagePipelineOutput(images=image)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/diffusers/pipelines/ddim/pipeline_ddim.py b/src/diffusers/pipelines/ddim/pipeline_ddim.py
--- a/src/diffusers/pipelines/ddim/pipeline_ddim.py
+++ b/src/diffusers/pipelines/ddim/pipeline_ddim.py
@@ -16,6 +16,7 @@
import torch
+from ...schedulers import DDIMScheduler
from ...utils import deprecate, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
@@ -34,6 +35,10 @@
def __init__(self, unet, scheduler):
super().__init__()
+
+ # make sure scheduler can always be converted to DDIM
+ scheduler = DDIMScheduler.from_config(scheduler.config)
+
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
|
{"golden_diff": "diff --git a/src/diffusers/pipelines/ddim/pipeline_ddim.py b/src/diffusers/pipelines/ddim/pipeline_ddim.py\n--- a/src/diffusers/pipelines/ddim/pipeline_ddim.py\n+++ b/src/diffusers/pipelines/ddim/pipeline_ddim.py\n@@ -16,6 +16,7 @@\n \n import torch\n \n+from ...schedulers import DDIMScheduler\n from ...utils import deprecate, randn_tensor\n from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput\n \n@@ -34,6 +35,10 @@\n \n def __init__(self, unet, scheduler):\n super().__init__()\n+\n+ # make sure scheduler can always be converted to DDIM\n+ scheduler = DDIMScheduler.from_config(scheduler.config)\n+\n self.register_modules(unet=unet, scheduler=scheduler)\n \n @torch.no_grad()\n", "issue": "[Error] got an unexpected keyword argument `eta`\n### Describe the bug\n\nWhen I try to make a sampling using DDIM pipeline, an error occurs\r\n```bash\r\npython utils/ddim.py\r\n 0%| | 0/50 [00:00<?, ?it/s]\r\nTraceback (most recent call last):\r\n File \"utils/ddim.py\", line 10, in <module>\r\n image = ddim(num_inference_steps=50).images[0]\r\n File \"/home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/torch/autograd/grad_mode.py\", line 28, in decorate_context\r\n return func(*args, **kwargs)\r\n File \"/home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/diffusers/pipelines/ddim/pipeline_ddim.py\", line 129, in __call__\r\n image = self.scheduler.step(\r\n File \"/home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/diffusers/schedulers/scheduling_ddpm.py\", line 259, in step\r\n predict_epsilon = deprecate(\"predict_epsilon\", \"0.12.0\", message, take_from=kwargs)\r\n File \"/home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/diffusers/utils/deprecation_utils.py\", line 43, in deprecate\r\n raise TypeError(f\"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`\")\r\nTypeError: step in /home/sr5/se91.kim/.venv/diffusers/lib/python3.8/site-packages/diffusers/schedulers/scheduling_ddpm.py line 258 got an unexpected keyword argument `eta`\r\n```\r\n\r\nIs there anything I can do? \r\n\r\nThnaks!\n\n### Reproduction\n\nI used code lines the same as the typical example code:\r\n```python\r\nfrom diffusers import DDIMPipeline\r\n\r\nmodel_id = \"/home/sr5/se91.kim/AMP/Diffusers/models/google/ddpm-cifar10-32\"\r\n\r\n# load model and scheduler\r\nddim = DDIMPipeline.from_pretrained(model_id)\r\n\r\n# run pipeline in inference (sample random noise and denoise)\r\nimage = ddim(num_inference_steps=50).images[0]\r\n\r\n# save image\r\nimage.save(\"ddim_generated_image.png\")\r\n```\n\n### Logs\n\n_No response_\n\n### System Info\n\nI saw a closed issue (https://github.com/huggingface/diffusers/issues/170) but the diffusers in my virtual env. is already of the latest version (0.12.dev):\r\n```bash\r\n$ pip list\r\nPackage Version\r\n------------------ ------------\r\naccelerate 0.15.0\r\ncertifi 2020.12.5\r\nchardet 4.0.0\r\ndiffusers 0.12.0.dev0\r\nfilelock 3.0.12\r\nfsspec 2022.11.0\r\nhuggingface-hub 0.11.1\r\nidna 2.10\r\nimportlib-metadata 3.7.3\r\nnumpy 1.20.1\r\npackaging 20.9\r\nPillow 8.1.2\r\npip 21.0.1\r\npsutil 5.8.0\r\npyarrow 10.0.1\r\npyparsing 2.4.7\r\nPyYAML 5.4.1\r\nregex 2021.3.17\r\nrequests 2.25.1\r\nsetuptools 54.1.2\r\ntorch 1.9.1+cu111\r\ntorchaudio 0.9.1\r\ntorchvision 0.10.1+cu111\r\ntqdm 4.64.1\r\ntyping-extensions 3.7.4.3\r\nurllib3 1.26.4\r\nwheel 0.36.2\r\nzipp 3.4.1\r\n```\r\nI tried under two versions of diffusers; 0.12.dev and 0.11.1, and both give the same error message.\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\n\nfrom ...utils import deprecate, randn_tensor\nfrom ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput\n\n\nclass DDIMPipeline(DiffusionPipeline):\n r\"\"\"\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Parameters:\n unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of\n [`DDPMScheduler`], or [`DDIMScheduler`].\n \"\"\"\n\n def __init__(self, unet, scheduler):\n super().__init__()\n self.register_modules(unet=unet, scheduler=scheduler)\n\n @torch.no_grad()\n def __call__(\n self,\n batch_size: int = 1,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n eta: float = 0.0,\n num_inference_steps: int = 50,\n use_clipped_model_output: Optional[bool] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n ) -> Union[ImagePipelineOutput, Tuple]:\n r\"\"\"\n Args:\n batch_size (`int`, *optional*, defaults to 1):\n The number of images to generate.\n generator (`torch.Generator`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n eta (`float`, *optional*, defaults to 0.0):\n The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n use_clipped_model_output (`bool`, *optional*, defaults to `None`):\n if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed\n downstream to the scheduler. So use `None` for schedulers which don't support this argument.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.\n\n Returns:\n [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is\n True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.\n \"\"\"\n\n if (\n generator is not None\n and isinstance(generator, torch.Generator)\n and generator.device.type != self.device.type\n and self.device.type != \"mps\"\n ):\n message = (\n f\"The `generator` device is `{generator.device}` and does not match the pipeline \"\n f\"device `{self.device}`, so the `generator` will be ignored. \"\n f'Please use `generator=torch.Generator(device=\"{self.device}\")` instead.'\n )\n deprecate(\n \"generator.device == 'cpu'\",\n \"0.13.0\",\n message,\n )\n generator = None\n\n # Sample gaussian noise to begin loop\n if isinstance(self.unet.sample_size, int):\n image_shape = (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size)\n else:\n image_shape = (batch_size, self.unet.in_channels, *self.unet.sample_size)\n\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n image = randn_tensor(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype)\n\n # set step values\n self.scheduler.set_timesteps(num_inference_steps)\n\n for t in self.progress_bar(self.scheduler.timesteps):\n # 1. predict noise model_output\n model_output = self.unet(image, t).sample\n\n # 2. predict previous mean of image x_t-1 and add variance depending on eta\n # eta corresponds to \u03b7 in paper and should be between [0, 1]\n # do x_t -> x_t-1\n image = self.scheduler.step(\n model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator\n ).prev_sample\n\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()\n if output_type == \"pil\":\n image = self.numpy_to_pil(image)\n\n if not return_dict:\n return (image,)\n\n return ImagePipelineOutput(images=image)\n", "path": "src/diffusers/pipelines/ddim/pipeline_ddim.py"}], "after_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\n\nfrom ...schedulers import DDIMScheduler\nfrom ...utils import deprecate, randn_tensor\nfrom ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput\n\n\nclass DDIMPipeline(DiffusionPipeline):\n r\"\"\"\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Parameters:\n unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of\n [`DDPMScheduler`], or [`DDIMScheduler`].\n \"\"\"\n\n def __init__(self, unet, scheduler):\n super().__init__()\n\n # make sure scheduler can always be converted to DDIM\n scheduler = DDIMScheduler.from_config(scheduler.config)\n\n self.register_modules(unet=unet, scheduler=scheduler)\n\n @torch.no_grad()\n def __call__(\n self,\n batch_size: int = 1,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n eta: float = 0.0,\n num_inference_steps: int = 50,\n use_clipped_model_output: Optional[bool] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n ) -> Union[ImagePipelineOutput, Tuple]:\n r\"\"\"\n Args:\n batch_size (`int`, *optional*, defaults to 1):\n The number of images to generate.\n generator (`torch.Generator`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n eta (`float`, *optional*, defaults to 0.0):\n The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n use_clipped_model_output (`bool`, *optional*, defaults to `None`):\n if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed\n downstream to the scheduler. So use `None` for schedulers which don't support this argument.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.\n\n Returns:\n [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is\n True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.\n \"\"\"\n\n if (\n generator is not None\n and isinstance(generator, torch.Generator)\n and generator.device.type != self.device.type\n and self.device.type != \"mps\"\n ):\n message = (\n f\"The `generator` device is `{generator.device}` and does not match the pipeline \"\n f\"device `{self.device}`, so the `generator` will be ignored. \"\n f'Please use `generator=torch.Generator(device=\"{self.device}\")` instead.'\n )\n deprecate(\n \"generator.device == 'cpu'\",\n \"0.13.0\",\n message,\n )\n generator = None\n\n # Sample gaussian noise to begin loop\n if isinstance(self.unet.sample_size, int):\n image_shape = (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size)\n else:\n image_shape = (batch_size, self.unet.in_channels, *self.unet.sample_size)\n\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n image = randn_tensor(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype)\n\n # set step values\n self.scheduler.set_timesteps(num_inference_steps)\n\n for t in self.progress_bar(self.scheduler.timesteps):\n # 1. predict noise model_output\n model_output = self.unet(image, t).sample\n\n # 2. predict previous mean of image x_t-1 and add variance depending on eta\n # eta corresponds to \u03b7 in paper and should be between [0, 1]\n # do x_t -> x_t-1\n image = self.scheduler.step(\n model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator\n ).prev_sample\n\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()\n if output_type == \"pil\":\n image = self.numpy_to_pil(image)\n\n if not return_dict:\n return (image,)\n\n return ImagePipelineOutput(images=image)\n", "path": "src/diffusers/pipelines/ddim/pipeline_ddim.py"}]}
| 2,873 | 196 |
gh_patches_debug_23873
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-186
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid json generated with SecurityGroupIngress
Ref: https://s3-us-west-2.amazonaws.com/cloudformation-templates-us-west-2/EC2InstanceWithSecurityGroupSample.template
Invalid format generated:
``` json
"SecurityGroupIngress": [
{
"Properties": {
"CidrIp": "0.0.0.0/0",
"FromPort": "0",
"IpProtocol": "-1",
"ToPort": "65535"
},
"Type": "AWS::EC2::SecurityGroupIngress"
}
]
```
With the above template AWS will complain:
```
Encountered unsupported property Type
```
Correct format:
``` json
"SecurityGroupIngress": [
{
"CidrIp": "0.0.0.0/0",
"FromPort": "0",
"IpProtocol": "-1",
"ToPort": "65535"
}
]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/RedshiftClusterInVpc.py`
Content:
```
1 # Converted from Redshift.template located at:
2 # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
3
4 from troposphere import Template, Parameter, Ref, Equals
5 from troposphere import If, Output, Join, GetAtt
6 from troposphere.redshift import Cluster, ClusterParameterGroup
7 from troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup
8 from troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment
9 from troposphere.ec2 import SecurityGroup, SecurityGroupIngress
10
11
12 t = Template()
13
14 t.add_version("2010-09-09")
15
16 t.add_description(
17 "AWS CloudFormation Sample Template: Redshift cluster in a VPC")
18
19 dbname = t.add_parameter(Parameter(
20 "DatabaseName",
21 Description="The name of the first database to be created when the "
22 "redshift cluster is created",
23 Type="String",
24 Default="defaultdb",
25 AllowedPattern="([a-z]|[0-9])+",
26 ))
27
28 clustertype = t.add_parameter(Parameter(
29 "ClusterType",
30 Description="The type of the cluster",
31 Type="String",
32 Default="single-node",
33 AllowedValues=[
34 "single-node",
35 "multi-mode"
36 ],
37 ))
38
39 numberofnodes = t.add_parameter(Parameter(
40 "NumberOfNodes",
41 Description="The number of compute nodes in the redshift cluster. "
42 "When cluster type is specified as: 1) single-node, the NumberOfNodes "
43 "parameter should be specified as 1, 2) multi-node, the NumberOfNodes "
44 "parameter should be greater than 1",
45 Type="Number",
46 Default="1",
47 ))
48
49 nodetype = t.add_parameter(Parameter(
50 "NodeType",
51 Description="The node type to be provisioned for the redshift cluster",
52 Type="String",
53 Default="dw2.large",
54 ))
55
56 masterusername = t.add_parameter(Parameter(
57 "MasterUsername",
58 Description="The user name associated with the master user account for "
59 "the redshift cluster that is being created",
60 Type="String",
61 Default="defaultuser",
62 AllowedPattern="([a-z])([a-z]|[0-9])*",
63 NoEcho=True,
64 ))
65
66 masteruserpassword = t.add_parameter(Parameter(
67 "MasterUserPassword",
68 Description="The password associated with the master user account for the "
69 "redshift cluster that is being created.",
70 Type="String",
71 NoEcho=True,
72 ))
73
74 conditions = {
75 "IsMultiNodeCluster": Equals(
76 Ref("ClusterType"),
77 "multi-mode"
78 ),
79 }
80
81 for k in conditions:
82 t.add_condition(k, conditions[k])
83
84 redshiftcluster = t.add_resource(Cluster(
85 "RedshiftCluster",
86 ClusterType=Ref("ClusterType"),
87 NumberOfNodes=If("IsMultiNodeCluster",
88 Ref("NumberOfNodes"), Ref("AWS::NoValue")),
89 NodeType=Ref("NodeType"),
90 DBName=Ref("DatabaseName"),
91 MasterUsername=Ref("MasterUsername"),
92 MasterUserPassword=Ref("MasterUserPassword"),
93 ClusterParameterGroupName=Ref("RedshiftClusterParameterGroup"),
94 VpcSecurityGroupIds=Ref("SecurityGroup"),
95 ClusterSubnetGroupName=Ref("RedshiftClusterSubnetGroup"),
96 ))
97
98 amazonredshiftparameter1 = AmazonRedshiftParameter(
99 "AmazonRedshiftParameter1",
100 ParameterName="enable_user_activity_logging",
101 ParameterValue="true",
102 )
103
104 redshiftclusterparametergroup = t.add_resource(ClusterParameterGroup(
105 "RedshiftClusterParameterGroup",
106 Description="Cluster parameter group",
107 ParameterGroupFamily="redshift-1.0",
108 Parameters=[amazonredshiftparameter1],
109 ))
110
111 redshiftclustersubnetgroup = t.add_resource(ClusterSubnetGroup(
112 "RedshiftClusterSubnetGroup",
113 Description="Cluster subnet group",
114 SubnetIds=Ref("Subnet"),
115 ))
116
117 vpc = t.add_resource(VPC(
118 "VPC",
119 CidrBlock="10.0.0.0/16",
120 ))
121
122 subnet = t.add_resource(Subnet(
123 "Subnet",
124 CidrBlock="10.0.0.0/24",
125 VpcId=Ref("VPC"),
126 ))
127
128 internetgateway = t.add_resource(InternetGateway(
129 "InternetGateway",
130 ))
131
132 gatewayattachment = t.add_resource(VPCGatewayAttachment(
133 "GatewayAttachment",
134 VpcId=Ref("VPC"),
135 InternetGatewayId=Ref("InternetGateway"),
136 ))
137
138 securitygroupingress1 = SecurityGroupIngress(
139 "SecurityGroupIngress1",
140 CidrIp="10.0.0.0/16",
141 FromPort="80",
142 ToPort="80",
143 IpProtocol="tcp",
144 )
145
146 securitygroup = t.add_resource(SecurityGroup(
147 "SecurityGroup",
148 GroupDescription="Security Group",
149 SecurityGroupIngress=[securitygroupingress1],
150 VpcId=Ref("VPC"),
151 ))
152
153 t.add_output(Output(
154 "ClusterEndpoint",
155 Value=Join(":", [GetAtt(redshiftcluster, "Endpoint.Address"),
156 GetAtt(redshiftcluster, "Endpoint.Port")]),
157 ))
158
159 print(t.to_json())
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/RedshiftClusterInVpc.py b/examples/RedshiftClusterInVpc.py
--- a/examples/RedshiftClusterInVpc.py
+++ b/examples/RedshiftClusterInVpc.py
@@ -6,7 +6,7 @@
from troposphere.redshift import Cluster, ClusterParameterGroup
from troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup
from troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment
-from troposphere.ec2 import SecurityGroup, SecurityGroupIngress
+from troposphere.ec2 import SecurityGroup, SecurityGroupRule
t = Template()
@@ -135,18 +135,18 @@
InternetGatewayId=Ref("InternetGateway"),
))
-securitygroupingress1 = SecurityGroupIngress(
- "SecurityGroupIngress1",
- CidrIp="10.0.0.0/16",
- FromPort="80",
- ToPort="80",
- IpProtocol="tcp",
-)
-
securitygroup = t.add_resource(SecurityGroup(
"SecurityGroup",
GroupDescription="Security Group",
- SecurityGroupIngress=[securitygroupingress1],
+ SecurityGroupIngress=[
+ SecurityGroupRule(
+ "SecurityGroupIngress1",
+ CidrIp="10.0.0.0/16",
+ FromPort="80",
+ ToPort="80",
+ IpProtocol="tcp",
+ )
+ ],
VpcId=Ref("VPC"),
))
|
{"golden_diff": "diff --git a/examples/RedshiftClusterInVpc.py b/examples/RedshiftClusterInVpc.py\n--- a/examples/RedshiftClusterInVpc.py\n+++ b/examples/RedshiftClusterInVpc.py\n@@ -6,7 +6,7 @@\n from troposphere.redshift import Cluster, ClusterParameterGroup\n from troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup\n from troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment\n-from troposphere.ec2 import SecurityGroup, SecurityGroupIngress\n+from troposphere.ec2 import SecurityGroup, SecurityGroupRule\n \n \n t = Template()\n@@ -135,18 +135,18 @@\n InternetGatewayId=Ref(\"InternetGateway\"),\n ))\n \n-securitygroupingress1 = SecurityGroupIngress(\n- \"SecurityGroupIngress1\",\n- CidrIp=\"10.0.0.0/16\",\n- FromPort=\"80\",\n- ToPort=\"80\",\n- IpProtocol=\"tcp\",\n-)\n-\n securitygroup = t.add_resource(SecurityGroup(\n \"SecurityGroup\",\n GroupDescription=\"Security Group\",\n- SecurityGroupIngress=[securitygroupingress1],\n+ SecurityGroupIngress=[\n+ SecurityGroupRule(\n+ \"SecurityGroupIngress1\",\n+ CidrIp=\"10.0.0.0/16\",\n+ FromPort=\"80\",\n+ ToPort=\"80\",\n+ IpProtocol=\"tcp\",\n+ )\n+ ],\n VpcId=Ref(\"VPC\"),\n ))\n", "issue": "Invalid json generated with SecurityGroupIngress\nRef: https://s3-us-west-2.amazonaws.com/cloudformation-templates-us-west-2/EC2InstanceWithSecurityGroupSample.template\n\nInvalid format generated:\n\n``` json\n\"SecurityGroupIngress\": [\n {\n \"Properties\": {\n \"CidrIp\": \"0.0.0.0/0\",\n \"FromPort\": \"0\",\n \"IpProtocol\": \"-1\",\n \"ToPort\": \"65535\"\n },\n \"Type\": \"AWS::EC2::SecurityGroupIngress\"\n }\n ]\n```\n\nWith the above template AWS will complain:\n\n```\nEncountered unsupported property Type\n```\n\nCorrect format:\n\n``` json\n\"SecurityGroupIngress\": [\n {\n \"CidrIp\": \"0.0.0.0/0\",\n \"FromPort\": \"0\",\n \"IpProtocol\": \"-1\",\n \"ToPort\": \"65535\"\n }\n ]\n```\n\n", "before_files": [{"content": "# Converted from Redshift.template located at:\n# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/\n\nfrom troposphere import Template, Parameter, Ref, Equals\nfrom troposphere import If, Output, Join, GetAtt\nfrom troposphere.redshift import Cluster, ClusterParameterGroup\nfrom troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup\nfrom troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment\nfrom troposphere.ec2 import SecurityGroup, SecurityGroupIngress\n\n\nt = Template()\n\nt.add_version(\"2010-09-09\")\n\nt.add_description(\n \"AWS CloudFormation Sample Template: Redshift cluster in a VPC\")\n\ndbname = t.add_parameter(Parameter(\n \"DatabaseName\",\n Description=\"The name of the first database to be created when the \"\n \"redshift cluster is created\",\n Type=\"String\",\n Default=\"defaultdb\",\n AllowedPattern=\"([a-z]|[0-9])+\",\n))\n\nclustertype = t.add_parameter(Parameter(\n \"ClusterType\",\n Description=\"The type of the cluster\",\n Type=\"String\",\n Default=\"single-node\",\n AllowedValues=[\n \"single-node\",\n \"multi-mode\"\n ],\n))\n\nnumberofnodes = t.add_parameter(Parameter(\n \"NumberOfNodes\",\n Description=\"The number of compute nodes in the redshift cluster. \"\n \"When cluster type is specified as: 1) single-node, the NumberOfNodes \"\n \"parameter should be specified as 1, 2) multi-node, the NumberOfNodes \"\n \"parameter should be greater than 1\",\n Type=\"Number\",\n Default=\"1\",\n))\n\nnodetype = t.add_parameter(Parameter(\n \"NodeType\",\n Description=\"The node type to be provisioned for the redshift cluster\",\n Type=\"String\",\n Default=\"dw2.large\",\n))\n\nmasterusername = t.add_parameter(Parameter(\n \"MasterUsername\",\n Description=\"The user name associated with the master user account for \"\n \"the redshift cluster that is being created\",\n Type=\"String\",\n Default=\"defaultuser\",\n AllowedPattern=\"([a-z])([a-z]|[0-9])*\",\n NoEcho=True,\n))\n\nmasteruserpassword = t.add_parameter(Parameter(\n \"MasterUserPassword\",\n Description=\"The password associated with the master user account for the \"\n \"redshift cluster that is being created.\",\n Type=\"String\",\n NoEcho=True,\n))\n\nconditions = {\n \"IsMultiNodeCluster\": Equals(\n Ref(\"ClusterType\"),\n \"multi-mode\"\n ),\n}\n\nfor k in conditions:\n t.add_condition(k, conditions[k])\n\nredshiftcluster = t.add_resource(Cluster(\n \"RedshiftCluster\",\n ClusterType=Ref(\"ClusterType\"),\n NumberOfNodes=If(\"IsMultiNodeCluster\",\n Ref(\"NumberOfNodes\"), Ref(\"AWS::NoValue\")),\n NodeType=Ref(\"NodeType\"),\n DBName=Ref(\"DatabaseName\"),\n MasterUsername=Ref(\"MasterUsername\"),\n MasterUserPassword=Ref(\"MasterUserPassword\"),\n ClusterParameterGroupName=Ref(\"RedshiftClusterParameterGroup\"),\n VpcSecurityGroupIds=Ref(\"SecurityGroup\"),\n ClusterSubnetGroupName=Ref(\"RedshiftClusterSubnetGroup\"),\n))\n\namazonredshiftparameter1 = AmazonRedshiftParameter(\n \"AmazonRedshiftParameter1\",\n ParameterName=\"enable_user_activity_logging\",\n ParameterValue=\"true\",\n)\n\nredshiftclusterparametergroup = t.add_resource(ClusterParameterGroup(\n \"RedshiftClusterParameterGroup\",\n Description=\"Cluster parameter group\",\n ParameterGroupFamily=\"redshift-1.0\",\n Parameters=[amazonredshiftparameter1],\n))\n\nredshiftclustersubnetgroup = t.add_resource(ClusterSubnetGroup(\n \"RedshiftClusterSubnetGroup\",\n Description=\"Cluster subnet group\",\n SubnetIds=Ref(\"Subnet\"),\n))\n\nvpc = t.add_resource(VPC(\n \"VPC\",\n CidrBlock=\"10.0.0.0/16\",\n))\n\nsubnet = t.add_resource(Subnet(\n \"Subnet\",\n CidrBlock=\"10.0.0.0/24\",\n VpcId=Ref(\"VPC\"),\n))\n\ninternetgateway = t.add_resource(InternetGateway(\n \"InternetGateway\",\n))\n\ngatewayattachment = t.add_resource(VPCGatewayAttachment(\n \"GatewayAttachment\",\n VpcId=Ref(\"VPC\"),\n InternetGatewayId=Ref(\"InternetGateway\"),\n))\n\nsecuritygroupingress1 = SecurityGroupIngress(\n \"SecurityGroupIngress1\",\n CidrIp=\"10.0.0.0/16\",\n FromPort=\"80\",\n ToPort=\"80\",\n IpProtocol=\"tcp\",\n)\n\nsecuritygroup = t.add_resource(SecurityGroup(\n \"SecurityGroup\",\n GroupDescription=\"Security Group\",\n SecurityGroupIngress=[securitygroupingress1],\n VpcId=Ref(\"VPC\"),\n))\n\nt.add_output(Output(\n \"ClusterEndpoint\",\n Value=Join(\":\", [GetAtt(redshiftcluster, \"Endpoint.Address\"),\n GetAtt(redshiftcluster, \"Endpoint.Port\")]),\n))\n\nprint(t.to_json())\n", "path": "examples/RedshiftClusterInVpc.py"}], "after_files": [{"content": "# Converted from Redshift.template located at:\n# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/\n\nfrom troposphere import Template, Parameter, Ref, Equals\nfrom troposphere import If, Output, Join, GetAtt\nfrom troposphere.redshift import Cluster, ClusterParameterGroup\nfrom troposphere.redshift import AmazonRedshiftParameter, ClusterSubnetGroup\nfrom troposphere.ec2 import VPC, Subnet, InternetGateway, VPCGatewayAttachment\nfrom troposphere.ec2 import SecurityGroup, SecurityGroupRule\n\n\nt = Template()\n\nt.add_version(\"2010-09-09\")\n\nt.add_description(\n \"AWS CloudFormation Sample Template: Redshift cluster in a VPC\")\n\ndbname = t.add_parameter(Parameter(\n \"DatabaseName\",\n Description=\"The name of the first database to be created when the \"\n \"redshift cluster is created\",\n Type=\"String\",\n Default=\"defaultdb\",\n AllowedPattern=\"([a-z]|[0-9])+\",\n))\n\nclustertype = t.add_parameter(Parameter(\n \"ClusterType\",\n Description=\"The type of the cluster\",\n Type=\"String\",\n Default=\"single-node\",\n AllowedValues=[\n \"single-node\",\n \"multi-mode\"\n ],\n))\n\nnumberofnodes = t.add_parameter(Parameter(\n \"NumberOfNodes\",\n Description=\"The number of compute nodes in the redshift cluster. \"\n \"When cluster type is specified as: 1) single-node, the NumberOfNodes \"\n \"parameter should be specified as 1, 2) multi-node, the NumberOfNodes \"\n \"parameter should be greater than 1\",\n Type=\"Number\",\n Default=\"1\",\n))\n\nnodetype = t.add_parameter(Parameter(\n \"NodeType\",\n Description=\"The node type to be provisioned for the redshift cluster\",\n Type=\"String\",\n Default=\"dw2.large\",\n))\n\nmasterusername = t.add_parameter(Parameter(\n \"MasterUsername\",\n Description=\"The user name associated with the master user account for \"\n \"the redshift cluster that is being created\",\n Type=\"String\",\n Default=\"defaultuser\",\n AllowedPattern=\"([a-z])([a-z]|[0-9])*\",\n NoEcho=True,\n))\n\nmasteruserpassword = t.add_parameter(Parameter(\n \"MasterUserPassword\",\n Description=\"The password associated with the master user account for the \"\n \"redshift cluster that is being created.\",\n Type=\"String\",\n NoEcho=True,\n))\n\nconditions = {\n \"IsMultiNodeCluster\": Equals(\n Ref(\"ClusterType\"),\n \"multi-mode\"\n ),\n}\n\nfor k in conditions:\n t.add_condition(k, conditions[k])\n\nredshiftcluster = t.add_resource(Cluster(\n \"RedshiftCluster\",\n ClusterType=Ref(\"ClusterType\"),\n NumberOfNodes=If(\"IsMultiNodeCluster\",\n Ref(\"NumberOfNodes\"), Ref(\"AWS::NoValue\")),\n NodeType=Ref(\"NodeType\"),\n DBName=Ref(\"DatabaseName\"),\n MasterUsername=Ref(\"MasterUsername\"),\n MasterUserPassword=Ref(\"MasterUserPassword\"),\n ClusterParameterGroupName=Ref(\"RedshiftClusterParameterGroup\"),\n VpcSecurityGroupIds=Ref(\"SecurityGroup\"),\n ClusterSubnetGroupName=Ref(\"RedshiftClusterSubnetGroup\"),\n))\n\namazonredshiftparameter1 = AmazonRedshiftParameter(\n \"AmazonRedshiftParameter1\",\n ParameterName=\"enable_user_activity_logging\",\n ParameterValue=\"true\",\n)\n\nredshiftclusterparametergroup = t.add_resource(ClusterParameterGroup(\n \"RedshiftClusterParameterGroup\",\n Description=\"Cluster parameter group\",\n ParameterGroupFamily=\"redshift-1.0\",\n Parameters=[amazonredshiftparameter1],\n))\n\nredshiftclustersubnetgroup = t.add_resource(ClusterSubnetGroup(\n \"RedshiftClusterSubnetGroup\",\n Description=\"Cluster subnet group\",\n SubnetIds=Ref(\"Subnet\"),\n))\n\nvpc = t.add_resource(VPC(\n \"VPC\",\n CidrBlock=\"10.0.0.0/16\",\n))\n\nsubnet = t.add_resource(Subnet(\n \"Subnet\",\n CidrBlock=\"10.0.0.0/24\",\n VpcId=Ref(\"VPC\"),\n))\n\ninternetgateway = t.add_resource(InternetGateway(\n \"InternetGateway\",\n))\n\ngatewayattachment = t.add_resource(VPCGatewayAttachment(\n \"GatewayAttachment\",\n VpcId=Ref(\"VPC\"),\n InternetGatewayId=Ref(\"InternetGateway\"),\n))\n\nsecuritygroup = t.add_resource(SecurityGroup(\n \"SecurityGroup\",\n GroupDescription=\"Security Group\",\n SecurityGroupIngress=[\n SecurityGroupRule(\n \"SecurityGroupIngress1\",\n CidrIp=\"10.0.0.0/16\",\n FromPort=\"80\",\n ToPort=\"80\",\n IpProtocol=\"tcp\",\n )\n ],\n VpcId=Ref(\"VPC\"),\n))\n\nt.add_output(Output(\n \"ClusterEndpoint\",\n Value=Join(\":\", [GetAtt(redshiftcluster, \"Endpoint.Address\"),\n GetAtt(redshiftcluster, \"Endpoint.Port\")]),\n))\n\nprint(t.to_json())\n", "path": "examples/RedshiftClusterInVpc.py"}]}
| 1,962 | 348 |
gh_patches_debug_30936
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-2994
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Login user after password reset notify events with Anonymous User
## BUG/PROBLEM REPORT
The option **Login user after password reset** allows a user that just performed a password reset to automatically log in after the .process is complete.
One of the two events (**UserLoggedInEvent**, **UserInitialLoginInEvent**) is triggered by this process, but with the Anonymous User instead of the user that just performed the password request.
### What I did:
- Newly created Plone site (no addons)
- **Login user after password reset** selected on */@@security-controlpanel*
- Create a new user
- Request a reset user password
- Follow the generated link
### What I expect to happen:
**UserLoggedInEvent** or **UserInitialLoginInEvent** should be triggered with the newly logged in user.
### What actually happened:
**UserLoggedInEvent** and **UserInitialLoginInEvent** are triggrered with **<SpecialUser 'Anonymous User'>**.
### What version of Plone/ Addons I am using:
* Plone 5.2
* No addons
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/browser/login/password_reset.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from AccessControl.SecurityManagement import getSecurityManager
3 from email.header import Header
4 from plone.app.layout.navigation.interfaces import INavigationRoot
5 from plone.memoize import view
6 from plone.registry.interfaces import IRegistry
7 from Products.CMFCore.utils import getToolByName
8 from Products.CMFPlone import PloneMessageFactory as _
9 from Products.CMFPlone.interfaces import IPasswordResetToolView
10 from Products.CMFPlone.interfaces.controlpanel import IMailSchema
11 from Products.CMFPlone.PasswordResetTool import ExpiredRequestError
12 from Products.CMFPlone.PasswordResetTool import InvalidRequestError
13 from Products.CMFPlone.utils import safe_unicode
14 from Products.CMFPlone.utils import safeToInt
15 from Products.Five import BrowserView
16 from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
17 from Products.PlonePAS.events import UserInitialLoginInEvent
18 from Products.PlonePAS.events import UserLoggedInEvent
19 from Products.PluggableAuthService.interfaces.plugins import ICredentialsUpdatePlugin # noqa
20 from Products.statusmessages.interfaces import IStatusMessage
21 from zope.component import getMultiAdapter
22 from zope.component import getUtility
23 from zope.event import notify
24 from zope.i18n import translate
25 from zope.interface import implementer
26 from zope.publisher.interfaces import IPublishTraverse
27
28
29 @implementer(IPasswordResetToolView)
30 class PasswordResetToolView(BrowserView):
31
32 @view.memoize_contextless
33 def portal_state(self):
34 """ return portal_state of plone.app.layout
35 """
36 return getMultiAdapter((self.context, self.request),
37 name=u"plone_portal_state")
38
39 def encode_mail_header(self, text):
40 """ Encodes text into correctly encoded email header """
41 return Header(safe_unicode(text), 'utf-8')
42
43 def encoded_mail_sender(self):
44 """ returns encoded version of Portal name <portal_email> """
45 registry = getUtility(IRegistry)
46 mail_settings = registry.forInterface(IMailSchema, prefix="plone")
47 from_ = mail_settings.email_from_name
48 mail = mail_settings.email_from_address
49 return '"%s" <%s>' % (self.encode_mail_header(from_).encode(), mail)
50
51 def registered_notify_subject(self):
52 portal_name = self.portal_state().portal_title()
53 return translate(
54 _(
55 u'mailtemplate_user_account_info',
56 default=u'User Account Information for ${portal_name}',
57 mapping={'portal_name': safe_unicode(portal_name)},
58 ),
59 context=self.request,
60 )
61
62 def mail_password_subject(self):
63 return translate(
64 _(
65 u'mailtemplate_subject_resetpasswordrequest',
66 default=u'Password reset request',
67 ),
68 context=self.request,
69 )
70
71 def construct_url(self, randomstring):
72 return '%s/passwordreset/%s' % (
73 self.portal_state().navigation_root_url(), randomstring)
74
75 def expiration_timeout(self):
76 pw_tool = getToolByName(self.context, 'portal_password_reset')
77 timeout = int(pw_tool.getExpirationTimeout() or 0)
78 return timeout * 24 # timeout is in days, but templates want in hours.
79
80
81 @implementer(IPublishTraverse)
82 class PasswordResetView(BrowserView):
83 """ """
84
85 invalid = ViewPageTemplateFile('templates/pwreset_invalid.pt')
86 expired = ViewPageTemplateFile('templates/pwreset_expired.pt')
87 finish = ViewPageTemplateFile('templates/pwreset_finish.pt')
88 form = ViewPageTemplateFile('templates/pwreset_form.pt')
89 subpath = None
90
91 def _auto_login(self, userid, password):
92 aclu = getToolByName(self.context, 'acl_users')
93 for name, plugin in aclu.plugins.listPlugins(ICredentialsUpdatePlugin):
94 plugin.updateCredentials(
95 self.request,
96 self.request.response,
97 userid,
98 password
99 )
100 user = getSecurityManager().getUser()
101 login_time = user.getProperty('login_time', None)
102 if login_time is None:
103 notify(UserInitialLoginInEvent(user))
104 else:
105 notify(UserLoggedInEvent(user))
106
107 IStatusMessage(self.request).addStatusMessage(
108 _(
109 'password_reset_successful',
110 default='Password reset successful, '
111 'you are logged in now!',
112 ),
113 'info',
114 )
115 url = INavigationRoot(self.context).absolute_url()
116 self.request.response.redirect(url)
117 return
118
119 def _reset_password(self, pw_tool, randomstring):
120 state = self.getErrors()
121 if state:
122 return self.form()
123 userid = self.request.form.get('userid')
124 password = self.request.form.get('password')
125 try:
126 pw_tool.resetPassword(userid, randomstring, password)
127 except ExpiredRequestError:
128 return self.expired()
129 except InvalidRequestError:
130 return self.invalid()
131 except RuntimeError:
132 return self.invalid()
133 registry = getUtility(IRegistry)
134 if registry.get('plone.autologin_after_password_reset', False):
135 return self._auto_login(userid, password)
136 return self.finish()
137
138 def __call__(self):
139 if self.subpath:
140 # Try traverse subpath first:
141 randomstring = self.subpath[0]
142 else:
143 randomstring = self.request.get('key', None)
144
145 pw_tool = getToolByName(self.context, 'portal_password_reset')
146 if self.request.method == 'POST':
147 return self._reset_password(pw_tool, randomstring)
148 try:
149 pw_tool.verifyKey(randomstring)
150 except InvalidRequestError:
151 return self.invalid()
152 except ExpiredRequestError:
153 return self.expired()
154 return self.form()
155
156 def publishTraverse(self, request, name):
157 if self.subpath is None:
158 self.subpath = []
159 self.subpath.append(name)
160 return self
161
162 def getErrors(self):
163 if self.request.method != 'POST':
164 return
165 password = self.request.form.get('password')
166 password2 = self.request.form.get('password2')
167 userid = self.request.form.get('userid')
168 reg_tool = getToolByName(self.context, 'portal_registration')
169 pw_fail = reg_tool.testPasswordValidity(password, password2)
170 state = {}
171 if pw_fail:
172 state['password'] = pw_fail
173
174 # Determine if we're checking userids or not
175 pw_tool = getToolByName(self.context, 'portal_password_reset')
176 if not pw_tool.checkUser():
177 return state
178
179 if not userid:
180 state['userid'] = _(
181 'This field is required, please provide some information.',
182 )
183 if state:
184 state['status'] = 'failure'
185 state['portal_status_message'] = _(
186 'Please correct the indicated errors.',
187 )
188 return state
189
190 def login_url(self):
191 portal_state = getMultiAdapter((self.context, self.request),
192 name=u"plone_portal_state")
193 return '{0}/login?__ac_name={1}'.format(
194 portal_state.navigation_root_url(),
195 self.request.form.get('userid', ''))
196
197 def expiration_timeout(self):
198 pw_tool = getToolByName(self.context, 'portal_password_reset')
199 timeout = int(pw_tool.getExpirationTimeout() or 0)
200 return timeout * 24 # timeout is in days, but templates want in hours.
201
202
203 class ExplainPWResetToolView(BrowserView):
204 """ """
205
206 def timeout_days(self):
207 return self.context.getExpirationTimeout()
208
209 def user_check(self):
210 return self.context._user_check and 'checked' or None
211
212 @property
213 def stats(self):
214 """Return a dictionary like so:
215 {"open":3, "expired":0}
216 about the number of open and expired reset requests.
217 """
218 # count expired reset requests by creating a list of it
219 bad = len([1 for expiry in self.context._requests.values()
220 if self.context.expired(expiry)])
221 # open reset requests are all requests without the expired ones
222 good = len(self.context._requests) - bad
223 return {"open": good, "expired": bad}
224
225 def __call__(self):
226 if self.request.method == 'POST':
227 timeout_days = safeToInt(self.request.get('timeout_days'), 7)
228 self.context.setExpirationTimeout(timeout_days)
229 self.context._user_check = bool(
230 self.request.get('user_check', False),
231 )
232 return self.index()
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Products/CMFPlone/browser/login/password_reset.py b/Products/CMFPlone/browser/login/password_reset.py
--- a/Products/CMFPlone/browser/login/password_reset.py
+++ b/Products/CMFPlone/browser/login/password_reset.py
@@ -10,6 +10,7 @@
from Products.CMFPlone.interfaces.controlpanel import IMailSchema
from Products.CMFPlone.PasswordResetTool import ExpiredRequestError
from Products.CMFPlone.PasswordResetTool import InvalidRequestError
+from Products.CMFPlone.RegistrationTool import get_member_by_login_name
from Products.CMFPlone.utils import safe_unicode
from Products.CMFPlone.utils import safeToInt
from Products.Five import BrowserView
@@ -89,7 +90,8 @@
subpath = None
def _auto_login(self, userid, password):
- aclu = getToolByName(self.context, 'acl_users')
+ context = self.context
+ aclu = getToolByName(context, 'acl_users')
for name, plugin in aclu.plugins.listPlugins(ICredentialsUpdatePlugin):
plugin.updateCredentials(
self.request,
@@ -97,7 +99,16 @@
userid,
password
)
- user = getSecurityManager().getUser()
+
+ member = get_member_by_login_name(context, userid, False)
+
+ if member:
+ user = member.getUser()
+ else:
+ # Fallback in case we cannot find a user
+ # with the given userid
+ user = getSecurityManager().getUser()
+
login_time = user.getProperty('login_time', None)
if login_time is None:
notify(UserInitialLoginInEvent(user))
|
{"golden_diff": "diff --git a/Products/CMFPlone/browser/login/password_reset.py b/Products/CMFPlone/browser/login/password_reset.py\n--- a/Products/CMFPlone/browser/login/password_reset.py\n+++ b/Products/CMFPlone/browser/login/password_reset.py\n@@ -10,6 +10,7 @@\n from Products.CMFPlone.interfaces.controlpanel import IMailSchema\n from Products.CMFPlone.PasswordResetTool import ExpiredRequestError\n from Products.CMFPlone.PasswordResetTool import InvalidRequestError\n+from Products.CMFPlone.RegistrationTool import get_member_by_login_name\n from Products.CMFPlone.utils import safe_unicode\n from Products.CMFPlone.utils import safeToInt\n from Products.Five import BrowserView\n@@ -89,7 +90,8 @@\n subpath = None\n \n def _auto_login(self, userid, password):\n- aclu = getToolByName(self.context, 'acl_users')\n+ context = self.context\n+ aclu = getToolByName(context, 'acl_users')\n for name, plugin in aclu.plugins.listPlugins(ICredentialsUpdatePlugin):\n plugin.updateCredentials(\n self.request,\n@@ -97,7 +99,16 @@\n userid,\n password\n )\n- user = getSecurityManager().getUser()\n+\n+ member = get_member_by_login_name(context, userid, False)\n+\n+ if member:\n+ user = member.getUser()\n+ else:\n+ # Fallback in case we cannot find a user\n+ # with the given userid\n+ user = getSecurityManager().getUser()\n+\n login_time = user.getProperty('login_time', None)\n if login_time is None:\n notify(UserInitialLoginInEvent(user))\n", "issue": "Login user after password reset notify events with Anonymous User\n## BUG/PROBLEM REPORT\r\n\r\nThe option **Login user after password reset** allows a user that just performed a password reset to automatically log in after the .process is complete.\r\n\r\nOne of the two events (**UserLoggedInEvent**, **UserInitialLoginInEvent**) is triggered by this process, but with the Anonymous User instead of the user that just performed the password request.\r\n\r\n### What I did:\r\n\r\n- Newly created Plone site (no addons)\r\n- **Login user after password reset** selected on */@@security-controlpanel*\r\n- Create a new user\r\n- Request a reset user password\r\n- Follow the generated link\r\n\r\n### What I expect to happen:\r\n\r\n**UserLoggedInEvent** or **UserInitialLoginInEvent** should be triggered with the newly logged in user.\r\n\r\n### What actually happened:\r\n\r\n**UserLoggedInEvent** and **UserInitialLoginInEvent** are triggrered with **<SpecialUser 'Anonymous User'>**.\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\n* Plone 5.2\r\n* No addons\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl.SecurityManagement import getSecurityManager\nfrom email.header import Header\nfrom plone.app.layout.navigation.interfaces import INavigationRoot\nfrom plone.memoize import view\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import IPasswordResetToolView\nfrom Products.CMFPlone.interfaces.controlpanel import IMailSchema\nfrom Products.CMFPlone.PasswordResetTool import ExpiredRequestError\nfrom Products.CMFPlone.PasswordResetTool import InvalidRequestError\nfrom Products.CMFPlone.utils import safe_unicode\nfrom Products.CMFPlone.utils import safeToInt\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.PlonePAS.events import UserInitialLoginInEvent\nfrom Products.PlonePAS.events import UserLoggedInEvent\nfrom Products.PluggableAuthService.interfaces.plugins import ICredentialsUpdatePlugin # noqa\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.event import notify\nfrom zope.i18n import translate\nfrom zope.interface import implementer\nfrom zope.publisher.interfaces import IPublishTraverse\n\n\n@implementer(IPasswordResetToolView)\nclass PasswordResetToolView(BrowserView):\n\n @view.memoize_contextless\n def portal_state(self):\n \"\"\" return portal_state of plone.app.layout\n \"\"\"\n return getMultiAdapter((self.context, self.request),\n name=u\"plone_portal_state\")\n\n def encode_mail_header(self, text):\n \"\"\" Encodes text into correctly encoded email header \"\"\"\n return Header(safe_unicode(text), 'utf-8')\n\n def encoded_mail_sender(self):\n \"\"\" returns encoded version of Portal name <portal_email> \"\"\"\n registry = getUtility(IRegistry)\n mail_settings = registry.forInterface(IMailSchema, prefix=\"plone\")\n from_ = mail_settings.email_from_name\n mail = mail_settings.email_from_address\n return '\"%s\" <%s>' % (self.encode_mail_header(from_).encode(), mail)\n\n def registered_notify_subject(self):\n portal_name = self.portal_state().portal_title()\n return translate(\n _(\n u'mailtemplate_user_account_info',\n default=u'User Account Information for ${portal_name}',\n mapping={'portal_name': safe_unicode(portal_name)},\n ),\n context=self.request,\n )\n\n def mail_password_subject(self):\n return translate(\n _(\n u'mailtemplate_subject_resetpasswordrequest',\n default=u'Password reset request',\n ),\n context=self.request,\n )\n\n def construct_url(self, randomstring):\n return '%s/passwordreset/%s' % (\n self.portal_state().navigation_root_url(), randomstring)\n\n def expiration_timeout(self):\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n timeout = int(pw_tool.getExpirationTimeout() or 0)\n return timeout * 24 # timeout is in days, but templates want in hours.\n\n\n@implementer(IPublishTraverse)\nclass PasswordResetView(BrowserView):\n \"\"\" \"\"\"\n\n invalid = ViewPageTemplateFile('templates/pwreset_invalid.pt')\n expired = ViewPageTemplateFile('templates/pwreset_expired.pt')\n finish = ViewPageTemplateFile('templates/pwreset_finish.pt')\n form = ViewPageTemplateFile('templates/pwreset_form.pt')\n subpath = None\n\n def _auto_login(self, userid, password):\n aclu = getToolByName(self.context, 'acl_users')\n for name, plugin in aclu.plugins.listPlugins(ICredentialsUpdatePlugin):\n plugin.updateCredentials(\n self.request,\n self.request.response,\n userid,\n password\n )\n user = getSecurityManager().getUser()\n login_time = user.getProperty('login_time', None)\n if login_time is None:\n notify(UserInitialLoginInEvent(user))\n else:\n notify(UserLoggedInEvent(user))\n\n IStatusMessage(self.request).addStatusMessage(\n _(\n 'password_reset_successful',\n default='Password reset successful, '\n 'you are logged in now!',\n ),\n 'info',\n )\n url = INavigationRoot(self.context).absolute_url()\n self.request.response.redirect(url)\n return\n\n def _reset_password(self, pw_tool, randomstring):\n state = self.getErrors()\n if state:\n return self.form()\n userid = self.request.form.get('userid')\n password = self.request.form.get('password')\n try:\n pw_tool.resetPassword(userid, randomstring, password)\n except ExpiredRequestError:\n return self.expired()\n except InvalidRequestError:\n return self.invalid()\n except RuntimeError:\n return self.invalid()\n registry = getUtility(IRegistry)\n if registry.get('plone.autologin_after_password_reset', False):\n return self._auto_login(userid, password)\n return self.finish()\n\n def __call__(self):\n if self.subpath:\n # Try traverse subpath first:\n randomstring = self.subpath[0]\n else:\n randomstring = self.request.get('key', None)\n\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n if self.request.method == 'POST':\n return self._reset_password(pw_tool, randomstring)\n try:\n pw_tool.verifyKey(randomstring)\n except InvalidRequestError:\n return self.invalid()\n except ExpiredRequestError:\n return self.expired()\n return self.form()\n\n def publishTraverse(self, request, name):\n if self.subpath is None:\n self.subpath = []\n self.subpath.append(name)\n return self\n\n def getErrors(self):\n if self.request.method != 'POST':\n return\n password = self.request.form.get('password')\n password2 = self.request.form.get('password2')\n userid = self.request.form.get('userid')\n reg_tool = getToolByName(self.context, 'portal_registration')\n pw_fail = reg_tool.testPasswordValidity(password, password2)\n state = {}\n if pw_fail:\n state['password'] = pw_fail\n\n # Determine if we're checking userids or not\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n if not pw_tool.checkUser():\n return state\n\n if not userid:\n state['userid'] = _(\n 'This field is required, please provide some information.',\n )\n if state:\n state['status'] = 'failure'\n state['portal_status_message'] = _(\n 'Please correct the indicated errors.',\n )\n return state\n\n def login_url(self):\n portal_state = getMultiAdapter((self.context, self.request),\n name=u\"plone_portal_state\")\n return '{0}/login?__ac_name={1}'.format(\n portal_state.navigation_root_url(),\n self.request.form.get('userid', ''))\n\n def expiration_timeout(self):\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n timeout = int(pw_tool.getExpirationTimeout() or 0)\n return timeout * 24 # timeout is in days, but templates want in hours.\n\n\nclass ExplainPWResetToolView(BrowserView):\n \"\"\" \"\"\"\n\n def timeout_days(self):\n return self.context.getExpirationTimeout()\n\n def user_check(self):\n return self.context._user_check and 'checked' or None\n\n @property\n def stats(self):\n \"\"\"Return a dictionary like so:\n {\"open\":3, \"expired\":0}\n about the number of open and expired reset requests.\n \"\"\"\n # count expired reset requests by creating a list of it\n bad = len([1 for expiry in self.context._requests.values()\n if self.context.expired(expiry)])\n # open reset requests are all requests without the expired ones\n good = len(self.context._requests) - bad\n return {\"open\": good, \"expired\": bad}\n\n def __call__(self):\n if self.request.method == 'POST':\n timeout_days = safeToInt(self.request.get('timeout_days'), 7)\n self.context.setExpirationTimeout(timeout_days)\n self.context._user_check = bool(\n self.request.get('user_check', False),\n )\n return self.index()\n", "path": "Products/CMFPlone/browser/login/password_reset.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl.SecurityManagement import getSecurityManager\nfrom email.header import Header\nfrom plone.app.layout.navigation.interfaces import INavigationRoot\nfrom plone.memoize import view\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import IPasswordResetToolView\nfrom Products.CMFPlone.interfaces.controlpanel import IMailSchema\nfrom Products.CMFPlone.PasswordResetTool import ExpiredRequestError\nfrom Products.CMFPlone.PasswordResetTool import InvalidRequestError\nfrom Products.CMFPlone.RegistrationTool import get_member_by_login_name\nfrom Products.CMFPlone.utils import safe_unicode\nfrom Products.CMFPlone.utils import safeToInt\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.PlonePAS.events import UserInitialLoginInEvent\nfrom Products.PlonePAS.events import UserLoggedInEvent\nfrom Products.PluggableAuthService.interfaces.plugins import ICredentialsUpdatePlugin # noqa\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.event import notify\nfrom zope.i18n import translate\nfrom zope.interface import implementer\nfrom zope.publisher.interfaces import IPublishTraverse\n\n\n@implementer(IPasswordResetToolView)\nclass PasswordResetToolView(BrowserView):\n\n @view.memoize_contextless\n def portal_state(self):\n \"\"\" return portal_state of plone.app.layout\n \"\"\"\n return getMultiAdapter((self.context, self.request),\n name=u\"plone_portal_state\")\n\n def encode_mail_header(self, text):\n \"\"\" Encodes text into correctly encoded email header \"\"\"\n return Header(safe_unicode(text), 'utf-8')\n\n def encoded_mail_sender(self):\n \"\"\" returns encoded version of Portal name <portal_email> \"\"\"\n registry = getUtility(IRegistry)\n mail_settings = registry.forInterface(IMailSchema, prefix=\"plone\")\n from_ = mail_settings.email_from_name\n mail = mail_settings.email_from_address\n return '\"%s\" <%s>' % (self.encode_mail_header(from_).encode(), mail)\n\n def registered_notify_subject(self):\n portal_name = self.portal_state().portal_title()\n return translate(\n _(\n u'mailtemplate_user_account_info',\n default=u'User Account Information for ${portal_name}',\n mapping={'portal_name': safe_unicode(portal_name)},\n ),\n context=self.request,\n )\n\n def mail_password_subject(self):\n return translate(\n _(\n u'mailtemplate_subject_resetpasswordrequest',\n default=u'Password reset request',\n ),\n context=self.request,\n )\n\n def construct_url(self, randomstring):\n return '%s/passwordreset/%s' % (\n self.portal_state().navigation_root_url(), randomstring)\n\n def expiration_timeout(self):\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n timeout = int(pw_tool.getExpirationTimeout() or 0)\n return timeout * 24 # timeout is in days, but templates want in hours.\n\n\n@implementer(IPublishTraverse)\nclass PasswordResetView(BrowserView):\n \"\"\" \"\"\"\n\n invalid = ViewPageTemplateFile('templates/pwreset_invalid.pt')\n expired = ViewPageTemplateFile('templates/pwreset_expired.pt')\n finish = ViewPageTemplateFile('templates/pwreset_finish.pt')\n form = ViewPageTemplateFile('templates/pwreset_form.pt')\n subpath = None\n\n def _auto_login(self, userid, password):\n context = self.context\n aclu = getToolByName(context, 'acl_users')\n for name, plugin in aclu.plugins.listPlugins(ICredentialsUpdatePlugin):\n plugin.updateCredentials(\n self.request,\n self.request.response,\n userid,\n password\n )\n\n member = get_member_by_login_name(context, userid, False)\n\n if member:\n user = member.getUser()\n else:\n # Fallback in case we cannot find a user\n # with the given userid\n user = getSecurityManager().getUser()\n\n login_time = user.getProperty('login_time', None)\n if login_time is None:\n notify(UserInitialLoginInEvent(user))\n else:\n notify(UserLoggedInEvent(user))\n\n IStatusMessage(self.request).addStatusMessage(\n _(\n 'password_reset_successful',\n default='Password reset successful, '\n 'you are logged in now!',\n ),\n 'info',\n )\n url = INavigationRoot(self.context).absolute_url()\n self.request.response.redirect(url)\n return\n\n def _reset_password(self, pw_tool, randomstring):\n state = self.getErrors()\n if state:\n return self.form()\n userid = self.request.form.get('userid')\n password = self.request.form.get('password')\n try:\n pw_tool.resetPassword(userid, randomstring, password)\n except ExpiredRequestError:\n return self.expired()\n except InvalidRequestError:\n return self.invalid()\n except RuntimeError:\n return self.invalid()\n registry = getUtility(IRegistry)\n if registry.get('plone.autologin_after_password_reset', False):\n return self._auto_login(userid, password)\n return self.finish()\n\n def __call__(self):\n if self.subpath:\n # Try traverse subpath first:\n randomstring = self.subpath[0]\n else:\n randomstring = self.request.get('key', None)\n\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n if self.request.method == 'POST':\n return self._reset_password(pw_tool, randomstring)\n try:\n pw_tool.verifyKey(randomstring)\n except InvalidRequestError:\n return self.invalid()\n except ExpiredRequestError:\n return self.expired()\n return self.form()\n\n def publishTraverse(self, request, name):\n if self.subpath is None:\n self.subpath = []\n self.subpath.append(name)\n return self\n\n def getErrors(self):\n if self.request.method != 'POST':\n return\n password = self.request.form.get('password')\n password2 = self.request.form.get('password2')\n userid = self.request.form.get('userid')\n reg_tool = getToolByName(self.context, 'portal_registration')\n pw_fail = reg_tool.testPasswordValidity(password, password2)\n state = {}\n if pw_fail:\n state['password'] = pw_fail\n\n # Determine if we're checking userids or not\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n if not pw_tool.checkUser():\n return state\n\n if not userid:\n state['userid'] = _(\n 'This field is required, please provide some information.',\n )\n if state:\n state['status'] = 'failure'\n state['portal_status_message'] = _(\n 'Please correct the indicated errors.',\n )\n return state\n\n def login_url(self):\n portal_state = getMultiAdapter((self.context, self.request),\n name=u\"plone_portal_state\")\n return '{0}/login?__ac_name={1}'.format(\n portal_state.navigation_root_url(),\n self.request.form.get('userid', ''))\n\n def expiration_timeout(self):\n pw_tool = getToolByName(self.context, 'portal_password_reset')\n timeout = int(pw_tool.getExpirationTimeout() or 0)\n return timeout * 24 # timeout is in days, but templates want in hours.\n\n\nclass ExplainPWResetToolView(BrowserView):\n \"\"\" \"\"\"\n\n def timeout_days(self):\n return self.context.getExpirationTimeout()\n\n def user_check(self):\n return self.context._user_check and 'checked' or None\n\n @property\n def stats(self):\n \"\"\"Return a dictionary like so:\n {\"open\":3, \"expired\":0}\n about the number of open and expired reset requests.\n \"\"\"\n # count expired reset requests by creating a list of it\n bad = len([1 for expiry in self.context._requests.values()\n if self.context.expired(expiry)])\n # open reset requests are all requests without the expired ones\n good = len(self.context._requests) - bad\n return {\"open\": good, \"expired\": bad}\n\n def __call__(self):\n if self.request.method == 'POST':\n timeout_days = safeToInt(self.request.get('timeout_days'), 7)\n self.context.setExpirationTimeout(timeout_days)\n self.context._user_check = bool(\n self.request.get('user_check', False),\n )\n return self.index()\n", "path": "Products/CMFPlone/browser/login/password_reset.py"}]}
| 2,876 | 376 |
gh_patches_debug_32183
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-529
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RFC] Make the default of `pre-commit autoupdate` use `--tags-only`?
I find that `--tags-only` to be much better than the default.
My proposal:
- Make the `--tags-only` behaviour the default behaviour
- Make `--tags-only` a noop argument which produces a warning and does the default
- Add a `--bleeding-edge` which does the current default behaviour
@chriskuehl thoughts?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/main.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import argparse
4 import os
5 import sys
6
7 import pre_commit.constants as C
8 from pre_commit import color
9 from pre_commit import five
10 from pre_commit import git
11 from pre_commit.commands.autoupdate import autoupdate
12 from pre_commit.commands.clean import clean
13 from pre_commit.commands.install_uninstall import install
14 from pre_commit.commands.install_uninstall import install_hooks
15 from pre_commit.commands.install_uninstall import uninstall
16 from pre_commit.commands.run import run
17 from pre_commit.commands.sample_config import sample_config
18 from pre_commit.error_handler import error_handler
19 from pre_commit.logging_handler import add_logging_handler
20 from pre_commit.runner import Runner
21
22
23 # https://github.com/pre-commit/pre-commit/issues/217
24 # On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
25 # to install packages to the wrong place. We don't want anything to deal with
26 # pyvenv
27 os.environ.pop('__PYVENV_LAUNCHER__', None)
28
29
30 def _add_color_option(parser):
31 parser.add_argument(
32 '--color', default='auto', type=color.use_color,
33 metavar='{' + ','.join(color.COLOR_CHOICES) + '}',
34 help='Whether to use color in output. Defaults to `%(default)s`.',
35 )
36
37
38 def _add_config_option(parser):
39 parser.add_argument(
40 '-c', '--config', default='.pre-commit-config.yaml',
41 help='Path to alternate config file'
42 )
43
44
45 def main(argv=None):
46 argv = argv if argv is not None else sys.argv[1:]
47 argv = [five.to_text(arg) for arg in argv]
48 parser = argparse.ArgumentParser()
49
50 # http://stackoverflow.com/a/8521644/812183
51 parser.add_argument(
52 '-V', '--version',
53 action='version',
54 version='%(prog)s {}'.format(C.VERSION),
55 )
56
57 subparsers = parser.add_subparsers(dest='command')
58
59 install_parser = subparsers.add_parser(
60 'install', help='Install the pre-commit script.',
61 )
62 _add_color_option(install_parser)
63 _add_config_option(install_parser)
64 install_parser.add_argument(
65 '-f', '--overwrite', action='store_true',
66 help='Overwrite existing hooks / remove migration mode.',
67 )
68 install_parser.add_argument(
69 '--install-hooks', action='store_true',
70 help=(
71 'Whether to install hook environments for all environments '
72 'in the config file.'
73 ),
74 )
75 install_parser.add_argument(
76 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
77 default='pre-commit',
78 )
79 install_parser.add_argument(
80 '--allow-missing-config', action='store_true', default=False,
81 help=(
82 'Whether to allow a missing `pre-config` configuration file '
83 'or exit with a failure code.'
84 ),
85 )
86
87 install_hooks_parser = subparsers.add_parser(
88 'install-hooks',
89 help=(
90 'Install hook environments for all environments in the config '
91 'file. You may find `pre-commit install --install-hooks` more '
92 'useful.'
93 ),
94 )
95 _add_color_option(install_hooks_parser)
96 _add_config_option(install_hooks_parser)
97
98 uninstall_parser = subparsers.add_parser(
99 'uninstall', help='Uninstall the pre-commit script.',
100 )
101 _add_color_option(uninstall_parser)
102 _add_config_option(uninstall_parser)
103 uninstall_parser.add_argument(
104 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
105 default='pre-commit',
106 )
107
108 clean_parser = subparsers.add_parser(
109 'clean', help='Clean out pre-commit files.',
110 )
111 _add_color_option(clean_parser)
112 _add_config_option(clean_parser)
113 autoupdate_parser = subparsers.add_parser(
114 'autoupdate',
115 help="Auto-update pre-commit config to the latest repos' versions.",
116 )
117 _add_color_option(autoupdate_parser)
118 _add_config_option(autoupdate_parser)
119 autoupdate_parser.add_argument(
120 '--tags-only', action='store_true', help='Update to tags only.',
121 )
122
123 run_parser = subparsers.add_parser('run', help='Run hooks.')
124 _add_color_option(run_parser)
125 _add_config_option(run_parser)
126 run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')
127 run_parser.add_argument(
128 '--no-stash', default=False, action='store_true',
129 help='Use this option to prevent auto stashing of unstaged files.',
130 )
131 run_parser.add_argument(
132 '--verbose', '-v', action='store_true', default=False,
133 )
134 run_parser.add_argument(
135 '--origin', '-o',
136 help="The origin branch's commit_id when using `git push`.",
137 )
138 run_parser.add_argument(
139 '--source', '-s',
140 help="The remote branch's commit_id when using `git push`.",
141 )
142 run_parser.add_argument(
143 '--allow-unstaged-config', default=False, action='store_true',
144 help=(
145 'Allow an unstaged config to be present. Note that this will '
146 'be stashed before parsing unless --no-stash is specified.'
147 ),
148 )
149 run_parser.add_argument(
150 '--hook-stage', choices=('commit', 'push'), default='commit',
151 help='The stage during which the hook is fired e.g. commit or push.',
152 )
153 run_parser.add_argument(
154 '--show-diff-on-failure', action='store_true',
155 help='When hooks fail, run `git diff` directly afterward.',
156 )
157 run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)
158 run_mutex_group.add_argument(
159 '--all-files', '-a', action='store_true', default=False,
160 help='Run on all the files in the repo. Implies --no-stash.',
161 )
162 run_mutex_group.add_argument(
163 '--files', nargs='*', default=[],
164 help='Specific filenames to run hooks on.',
165 )
166
167 sample_config_parser = subparsers.add_parser(
168 'sample-config', help='Produce a sample {} file'.format(C.CONFIG_FILE),
169 )
170 _add_color_option(sample_config_parser)
171 _add_config_option(sample_config_parser)
172
173 help = subparsers.add_parser(
174 'help', help='Show help for a specific command.',
175 )
176 help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
177
178 # Argparse doesn't really provide a way to use a `default` subparser
179 if len(argv) == 0:
180 argv = ['run']
181 args = parser.parse_args(argv)
182 if args.command == 'run':
183 args.files = [
184 os.path.relpath(os.path.abspath(filename), git.get_root())
185 for filename in args.files
186 ]
187
188 if args.command == 'help':
189 if args.help_cmd:
190 parser.parse_args([args.help_cmd, '--help'])
191 else:
192 parser.parse_args(['--help'])
193
194 with error_handler():
195 add_logging_handler(args.color)
196 runner = Runner.create(args.config)
197 git.check_for_cygwin_mismatch()
198
199 if args.command == 'install':
200 return install(
201 runner, overwrite=args.overwrite, hooks=args.install_hooks,
202 hook_type=args.hook_type,
203 skip_on_missing_conf=args.allow_missing_config,
204 )
205 elif args.command == 'install-hooks':
206 return install_hooks(runner)
207 elif args.command == 'uninstall':
208 return uninstall(runner, hook_type=args.hook_type)
209 elif args.command == 'clean':
210 return clean(runner)
211 elif args.command == 'autoupdate':
212 return autoupdate(runner, args.tags_only)
213 elif args.command == 'run':
214 return run(runner, args)
215 elif args.command == 'sample-config':
216 return sample_config()
217 else:
218 raise NotImplementedError(
219 'Command {} not implemented.'.format(args.command)
220 )
221
222 raise AssertionError(
223 'Command {} failed to exit with a returncode'.format(args.command)
224 )
225
226
227 if __name__ == '__main__':
228 exit(main())
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/main.py b/pre_commit/main.py
--- a/pre_commit/main.py
+++ b/pre_commit/main.py
@@ -1,6 +1,7 @@
from __future__ import unicode_literals
import argparse
+import logging
import os
import sys
@@ -20,6 +21,8 @@
from pre_commit.runner import Runner
+logger = logging.getLogger('pre_commit')
+
# https://github.com/pre-commit/pre-commit/issues/217
# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
# to install packages to the wrong place. We don't want anything to deal with
@@ -117,7 +120,14 @@
_add_color_option(autoupdate_parser)
_add_config_option(autoupdate_parser)
autoupdate_parser.add_argument(
- '--tags-only', action='store_true', help='Update to tags only.',
+ '--tags-only', action='store_true', help='LEGACY: for compatibility',
+ )
+ autoupdate_parser.add_argument(
+ '--bleeding-edge', action='store_true',
+ help=(
+ 'Update to the bleeding edge of `master` instead of the latest '
+ 'tagged version (the default behavior).'
+ ),
)
run_parser = subparsers.add_parser('run', help='Run hooks.')
@@ -209,7 +219,9 @@
elif args.command == 'clean':
return clean(runner)
elif args.command == 'autoupdate':
- return autoupdate(runner, args.tags_only)
+ if args.tags_only:
+ logger.warning('--tags-only is the default')
+ return autoupdate(runner, tags_only=not args.bleeding_edge)
elif args.command == 'run':
return run(runner, args)
elif args.command == 'sample-config':
|
{"golden_diff": "diff --git a/pre_commit/main.py b/pre_commit/main.py\n--- a/pre_commit/main.py\n+++ b/pre_commit/main.py\n@@ -1,6 +1,7 @@\n from __future__ import unicode_literals\n \n import argparse\n+import logging\n import os\n import sys\n \n@@ -20,6 +21,8 @@\n from pre_commit.runner import Runner\n \n \n+logger = logging.getLogger('pre_commit')\n+\n # https://github.com/pre-commit/pre-commit/issues/217\n # On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n # to install packages to the wrong place. We don't want anything to deal with\n@@ -117,7 +120,14 @@\n _add_color_option(autoupdate_parser)\n _add_config_option(autoupdate_parser)\n autoupdate_parser.add_argument(\n- '--tags-only', action='store_true', help='Update to tags only.',\n+ '--tags-only', action='store_true', help='LEGACY: for compatibility',\n+ )\n+ autoupdate_parser.add_argument(\n+ '--bleeding-edge', action='store_true',\n+ help=(\n+ 'Update to the bleeding edge of `master` instead of the latest '\n+ 'tagged version (the default behavior).'\n+ ),\n )\n \n run_parser = subparsers.add_parser('run', help='Run hooks.')\n@@ -209,7 +219,9 @@\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n- return autoupdate(runner, args.tags_only)\n+ if args.tags_only:\n+ logger.warning('--tags-only is the default')\n+ return autoupdate(runner, tags_only=not args.bleeding_edge)\n elif args.command == 'run':\n return run(runner, args)\n elif args.command == 'sample-config':\n", "issue": "[RFC] Make the default of `pre-commit autoupdate` use `--tags-only`?\nI find that `--tags-only` to be much better than the default.\r\n\r\nMy proposal:\r\n\r\n- Make the `--tags-only` behaviour the default behaviour\r\n- Make `--tags-only` a noop argument which produces a warning and does the default\r\n- Add a `--bleeding-edge` which does the current default behaviour\r\n\r\n@chriskuehl thoughts?\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport os\nimport sys\n\nimport pre_commit.constants as C\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit import git\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import install_hooks\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.run import run\nfrom pre_commit.commands.sample_config import sample_config\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.logging_handler import add_logging_handler\nfrom pre_commit.runner import Runner\n\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n\n\ndef _add_color_option(parser):\n parser.add_argument(\n '--color', default='auto', type=color.use_color,\n metavar='{' + ','.join(color.COLOR_CHOICES) + '}',\n help='Whether to use color in output. Defaults to `%(default)s`.',\n )\n\n\ndef _add_config_option(parser):\n parser.add_argument(\n '-c', '--config', default='.pre-commit-config.yaml',\n help='Path to alternate config file'\n )\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # http://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {}'.format(C.VERSION),\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n _add_color_option(install_parser)\n _add_config_option(install_parser)\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n install_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n install_parser.add_argument(\n '--allow-missing-config', action='store_true', default=False,\n help=(\n 'Whether to allow a missing `pre-config` configuration file '\n 'or exit with a failure code.'\n ),\n )\n\n install_hooks_parser = subparsers.add_parser(\n 'install-hooks',\n help=(\n 'Install hook environments for all environments in the config '\n 'file. You may find `pre-commit install --install-hooks` more '\n 'useful.'\n ),\n )\n _add_color_option(install_hooks_parser)\n _add_config_option(install_hooks_parser)\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n _add_color_option(uninstall_parser)\n _add_config_option(uninstall_parser)\n uninstall_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n clean_parser = subparsers.add_parser(\n 'clean', help='Clean out pre-commit files.',\n )\n _add_color_option(clean_parser)\n _add_config_option(clean_parser)\n autoupdate_parser = subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n _add_color_option(autoupdate_parser)\n _add_config_option(autoupdate_parser)\n autoupdate_parser.add_argument(\n '--tags-only', action='store_true', help='Update to tags only.',\n )\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n _add_color_option(run_parser)\n _add_config_option(run_parser)\n run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n run_parser.add_argument(\n '--no-stash', default=False, action='store_true',\n help='Use this option to prevent auto stashing of unstaged files.',\n )\n run_parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n )\n run_parser.add_argument(\n '--origin', '-o',\n help=\"The origin branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--source', '-s',\n help=\"The remote branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--allow-unstaged-config', default=False, action='store_true',\n help=(\n 'Allow an unstaged config to be present. Note that this will '\n 'be stashed before parsing unless --no-stash is specified.'\n ),\n )\n run_parser.add_argument(\n '--hook-stage', choices=('commit', 'push'), default='commit',\n help='The stage during which the hook is fired e.g. commit or push.',\n )\n run_parser.add_argument(\n '--show-diff-on-failure', action='store_true',\n help='When hooks fail, run `git diff` directly afterward.',\n )\n run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)\n run_mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo. Implies --no-stash.',\n )\n run_mutex_group.add_argument(\n '--files', nargs='*', default=[],\n help='Specific filenames to run hooks on.',\n )\n\n sample_config_parser = subparsers.add_parser(\n 'sample-config', help='Produce a sample {} file'.format(C.CONFIG_FILE),\n )\n _add_color_option(sample_config_parser)\n _add_config_option(sample_config_parser)\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.',\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # Argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n if args.command == 'run':\n args.files = [\n os.path.relpath(os.path.abspath(filename), git.get_root())\n for filename in args.files\n ]\n\n if args.command == 'help':\n if args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n else:\n parser.parse_args(['--help'])\n\n with error_handler():\n add_logging_handler(args.color)\n runner = Runner.create(args.config)\n git.check_for_cygwin_mismatch()\n\n if args.command == 'install':\n return install(\n runner, overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n skip_on_missing_conf=args.allow_missing_config,\n )\n elif args.command == 'install-hooks':\n return install_hooks(runner)\n elif args.command == 'uninstall':\n return uninstall(runner, hook_type=args.hook_type)\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n return autoupdate(runner, args.tags_only)\n elif args.command == 'run':\n return run(runner, args)\n elif args.command == 'sample-config':\n return sample_config()\n else:\n raise NotImplementedError(\n 'Command {} not implemented.'.format(args.command)\n )\n\n raise AssertionError(\n 'Command {} failed to exit with a returncode'.format(args.command)\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport pre_commit.constants as C\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit import git\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import install_hooks\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.run import run\nfrom pre_commit.commands.sample_config import sample_config\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.logging_handler import add_logging_handler\nfrom pre_commit.runner import Runner\n\n\nlogger = logging.getLogger('pre_commit')\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n\n\ndef _add_color_option(parser):\n parser.add_argument(\n '--color', default='auto', type=color.use_color,\n metavar='{' + ','.join(color.COLOR_CHOICES) + '}',\n help='Whether to use color in output. Defaults to `%(default)s`.',\n )\n\n\ndef _add_config_option(parser):\n parser.add_argument(\n '-c', '--config', default='.pre-commit-config.yaml',\n help='Path to alternate config file'\n )\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # http://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {}'.format(C.VERSION),\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n _add_color_option(install_parser)\n _add_config_option(install_parser)\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n install_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n install_parser.add_argument(\n '--allow-missing-config', action='store_true', default=False,\n help=(\n 'Whether to allow a missing `pre-config` configuration file '\n 'or exit with a failure code.'\n ),\n )\n\n install_hooks_parser = subparsers.add_parser(\n 'install-hooks',\n help=(\n 'Install hook environments for all environments in the config '\n 'file. You may find `pre-commit install --install-hooks` more '\n 'useful.'\n ),\n )\n _add_color_option(install_hooks_parser)\n _add_config_option(install_hooks_parser)\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n _add_color_option(uninstall_parser)\n _add_config_option(uninstall_parser)\n uninstall_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n clean_parser = subparsers.add_parser(\n 'clean', help='Clean out pre-commit files.',\n )\n _add_color_option(clean_parser)\n _add_config_option(clean_parser)\n autoupdate_parser = subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n _add_color_option(autoupdate_parser)\n _add_config_option(autoupdate_parser)\n autoupdate_parser.add_argument(\n '--tags-only', action='store_true', help='LEGACY: for compatibility',\n )\n autoupdate_parser.add_argument(\n '--bleeding-edge', action='store_true',\n help=(\n 'Update to the bleeding edge of `master` instead of the latest '\n 'tagged version (the default behavior).'\n ),\n )\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n _add_color_option(run_parser)\n _add_config_option(run_parser)\n run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n run_parser.add_argument(\n '--no-stash', default=False, action='store_true',\n help='Use this option to prevent auto stashing of unstaged files.',\n )\n run_parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n )\n run_parser.add_argument(\n '--origin', '-o',\n help=\"The origin branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--source', '-s',\n help=\"The remote branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--allow-unstaged-config', default=False, action='store_true',\n help=(\n 'Allow an unstaged config to be present. Note that this will '\n 'be stashed before parsing unless --no-stash is specified.'\n ),\n )\n run_parser.add_argument(\n '--hook-stage', choices=('commit', 'push'), default='commit',\n help='The stage during which the hook is fired e.g. commit or push.',\n )\n run_parser.add_argument(\n '--show-diff-on-failure', action='store_true',\n help='When hooks fail, run `git diff` directly afterward.',\n )\n run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)\n run_mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo. Implies --no-stash.',\n )\n run_mutex_group.add_argument(\n '--files', nargs='*', default=[],\n help='Specific filenames to run hooks on.',\n )\n\n sample_config_parser = subparsers.add_parser(\n 'sample-config', help='Produce a sample {} file'.format(C.CONFIG_FILE),\n )\n _add_color_option(sample_config_parser)\n _add_config_option(sample_config_parser)\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.',\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # Argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n if args.command == 'run':\n args.files = [\n os.path.relpath(os.path.abspath(filename), git.get_root())\n for filename in args.files\n ]\n\n if args.command == 'help':\n if args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n else:\n parser.parse_args(['--help'])\n\n with error_handler():\n add_logging_handler(args.color)\n runner = Runner.create(args.config)\n git.check_for_cygwin_mismatch()\n\n if args.command == 'install':\n return install(\n runner, overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n skip_on_missing_conf=args.allow_missing_config,\n )\n elif args.command == 'install-hooks':\n return install_hooks(runner)\n elif args.command == 'uninstall':\n return uninstall(runner, hook_type=args.hook_type)\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n if args.tags_only:\n logger.warning('--tags-only is the default')\n return autoupdate(runner, tags_only=not args.bleeding_edge)\n elif args.command == 'run':\n return run(runner, args)\n elif args.command == 'sample-config':\n return sample_config()\n else:\n raise NotImplementedError(\n 'Command {} not implemented.'.format(args.command)\n )\n\n raise AssertionError(\n 'Command {} failed to exit with a returncode'.format(args.command)\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}]}
| 2,686 | 420 |
gh_patches_debug_1902
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-2777
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[BUG]: Wrong import in `zero/sharded_optim/_utils.py`
### 🐛 Describe the bug
In issue #2774 , thanks @malfet for pointing out that we should not use `torch._six` to import `inf` and use `torch` to import `inf` instead, however, there is a small mistake in PR #2775 use an invalid `torch.six` module to import `inf`. We should fix this typo.
### Environment
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/zero/sharded_optim/_utils.py`
Content:
```
1 import math
2 from typing import Optional
3
4 import torch
5 import torch.distributed as dist
6 from torch.six import inf
7 from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
8
9 from colossalai.tensor import ColoParameter
10 from colossalai.utils import is_model_parallel_parameter
11
12
13 def flatten(input_):
14 return _flatten_dense_tensors(input_)
15
16
17 def unflatten(flat, tensors):
18 return _unflatten_dense_tensors(flat, tensors)
19
20
21 def count_numel(tensor_list):
22 res = 0
23 for tensor in tensor_list:
24 res += tensor.numel()
25 return res
26
27
28 def calculate_padding(numel, unit_size):
29 remainder = numel % unit_size
30 return unit_size - remainder if remainder else remainder
31
32
33 def shuffle_by_round_robin(tensor_list, num_partitions):
34 partitions = dict()
35
36 for tensor_idx, tensor in enumerate(tensor_list):
37 partition_to_go = tensor_idx % num_partitions
38 if partition_to_go not in partitions:
39 partitions[partition_to_go] = []
40 partitions[partition_to_go].append(dict(tensor=tensor, index=tensor_idx))
41
42 partitions_count = len(partitions)
43 new_tensor_list = []
44 tensor_index_mapping = dict()
45
46 for partition_id in range(partitions_count):
47 partition_tensors = partitions[partition_id]
48 for item in partition_tensors:
49 tensor_index_mapping[item['index']] = len(new_tensor_list)
50 new_tensor_list.append(item['tensor'])
51
52 return new_tensor_list, tensor_index_mapping
53
54
55 # create a flat tensor aligned at the alignment boundary
56 def flatten_dense_tensors_with_padding(tensor_list, unit_size):
57 num_elements = count_numel(tensor_list)
58 padding = calculate_padding(num_elements, unit_size=unit_size)
59
60 if padding > 0:
61 pad_tensor = torch.zeros(padding, device=tensor_list[0].device, dtype=tensor_list[0].dtype)
62 padded_tensor_list = tensor_list + [pad_tensor]
63 else:
64 padded_tensor_list = tensor_list
65
66 return flatten(padded_tensor_list)
67
68
69 def is_nccl_aligned(tensor):
70 return tensor.data_ptr() % 4 == 0
71
72
73 def get_grad_accumulate_object(tensor):
74 """
75 Return the AccumulateGrad of the input tensor
76 """
77
78 # grad_fn reference:
79 # https://discuss.pytorch.org/t/in-the-grad-fn-i-find-a-next-functions-but-i-dont-understand-the-meaning-of-the-attribute/24463
80 # expand_as reference: https://pytorch.org/docs/stable/generated/torch.Tensor.expand.html#torch.Tensor.expand
81 #
82 # `next_functions` will return the backward graph where
83 # the first element is the AccumulateGrad of the leaf nodes.
84 # we want to get the AccumulateGrad of the input tensor instead of the leaf
85 # node in the whole computation graph.
86 # Therefore, we call expand_as to create a dummy graph
87 # where tensor_tmp and tensor indeed point to the same object.
88 # You can check this by print(tensor.data_ptr() == tensor_tmp.data_ptr())
89 tensor_tmp = tensor.expand_as(tensor)
90 grad_acc_obj = tensor_tmp.grad_fn.next_functions[0][0]
91 return grad_acc_obj
92
93
94 def split_half_float_double(tensor_list):
95 dtypes = ["torch.cuda.HalfTensor", "torch.cuda.FloatTensor", "torch.cuda.DoubleTensor", "torch.cuda.BFloat16Tensor"]
96 buckets = []
97 for i, dtype in enumerate(dtypes):
98 bucket = [t for t in tensor_list if t.type() == dtype]
99 if bucket:
100 buckets.append(bucket)
101 return buckets
102
103
104 def reduce_tensor_dp_group(tensor: torch.Tensor,
105 dtype: Optional[torch.dtype] = None,
106 dst_local_rank: Optional[int] = None,
107 dst_global_rank: Optional[int] = None,
108 group: Optional[dist.ProcessGroup] = None):
109 """
110 Reduce the tensor in the data parallel process group
111
112 :param tensor: A tensor object to reduce/all-reduce
113 :param dtype: The data type used in communication
114 :param dst_rank: The source rank for reduce. If dst_rank is None,
115 :param parallel_mode: Communication parallel mode
116 all-reduce will be used instead of reduce. Default is None.
117
118 :type tensor: torch.Tensor
119 :type dtype: torch.dtype, optional
120 :type dst_rank: int, optional
121 :type pg: ProcessGroup, optional
122 """
123 # use the original dtype
124 if dtype is None:
125 dtype = tensor.dtype
126
127 # cast the data to specified dtype for reduce/all-reduce
128 if tensor.dtype != dtype:
129 tensor_to_reduce = tensor.to(dtype)
130 else:
131 tensor_to_reduce = tensor
132
133 world_size = dist.get_world_size(group=group)
134 tensor_to_reduce.div_(world_size)
135
136 # if rank is None, all reduce will be used
137 # else, reduce is used
138 use_all_reduce = dst_local_rank is None
139
140 if use_all_reduce:
141 dist.all_reduce(tensor_to_reduce, group=group)
142 else:
143 dist.reduce(tensor=tensor_to_reduce, dst=dst_global_rank, group=group)
144
145 # recover the original dtype
146 if tensor.dtype != dtype and tensor is not tensor_to_reduce:
147 local_rank = dist.get_rank(group=group)
148 if use_all_reduce or dst_local_rank == local_rank:
149 tensor.copy_(tensor_to_reduce)
150
151 return tensor
152
153
154 def has_inf_or_nan(tensor):
155 try:
156 # if tensor is half, the .float() incurs an additional deep copy, but it's necessary if
157 # Pytorch's .sum() creates a one-element tensor of the same type as tensor
158 # (which is true for some recent version of pytorch).
159 tensor_sum = float(tensor.float().sum())
160 # More efficient version that can be used if .sum() returns a Python scalar
161 # tensor_sum = float(tensor.sum())
162 except RuntimeError as instance:
163 # We want to check if inst is actually an overflow exception.
164 # RuntimeError could come from a different error.
165 # If so, we still want the exception to propagate.
166 if "value cannot be converted" not in instance.args[0]:
167 raise
168 return True
169 else:
170 if tensor_sum == float('inf') or tensor_sum == -float('inf') or tensor_sum != tensor_sum:
171 return True
172 return False
173
174
175 def release_param_grad(tensor_list):
176 for tensor in tensor_list:
177 tensor.grad = None
178
179
180 def calculate_global_norm_from_list(norm_list):
181 """ Compute total from a list of norms
182 """
183 total_norm = 0.0
184 for norm in norm_list:
185 total_norm += norm**2.0
186 return math.sqrt(total_norm)
187
188
189 def compute_norm(gradients, params, dp_group, mp_group, norm_type=2):
190 """Clips gradient norm of an iterable of parameters.
191 This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
192 added functionality to handle model parallel parameters. Note that
193 the gradients are modified in place.
194 Arguments:
195 parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
196 single Tensor that will have gradients normalized
197 max_norm (float or int): max norm of the gradients
198 norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
199 infinity norm.
200 Returns:
201 Total norm of the parameters (viewed as a single vector).
202 """
203
204 if mp_group is None:
205 mp_rank = 0
206 else:
207 mp_rank = dist.get_rank(mp_group)
208
209 norm_type = float(norm_type)
210 if norm_type == inf:
211 total_norm = max(g.data.abs().max() for g in gradients)
212 total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
213 dist.all_reduce(total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=dp_group)
214
215 # Take max across all GPUs.
216 if mp_group is not None:
217 dist.all_reduce(tensor=total_norm_cuda, op=torch.distributed.ReduceOp.MAX)
218 total_norm = total_norm_cuda[0].item()
219 else:
220 total_norm = 0.0
221 # if dist.get_rank() == 0:
222 # logger.info(f"Total Norm beginning {total_norm}")
223
224 for g, p in zip(gradients, params):
225 # Pipeline parallelism may replicate parameters. Avoid multi-counting.
226 tp_param_flag = False
227 if is_model_parallel_parameter(p) or (isinstance(p, ColoParameter) and not p.is_replicate()):
228 tp_param_flag = True
229 if tp_param_flag or mp_rank == 0:
230 param_norm = g.data.double().norm(2)
231 total_norm += param_norm.item()**2
232
233 # Sum across all model parallel GPUs.
234 total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
235 torch.distributed.all_reduce(total_norm_cuda, op=torch.distributed.ReduceOp.SUM, group=dp_group)
236
237 if mp_group is not None:
238 dist.all_reduce(tensor=total_norm_cuda, op=torch.distributed.ReduceOp.SUM, group=mp_group)
239
240 total_norm = total_norm_cuda[0].item()**(1. / norm_type)
241
242 if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
243 total_norm = -1
244
245 return total_norm
246
247
248 def sync_param(flat_tensor, tensor_list):
249 """
250 Synchronize the flattened tensor and unflattened tensor list. When
251 a list of tensor are flattened with `torch._utils._unflatten_dense_tensors`,
252 a new tensor is created. Thus, the flat tensor and original tensor list do not
253 share the same memory space. This function will update the tensor list so that
254 they point to the same value.
255
256 :param flat_tensor: A flat tensor obtained by calling `torch._utils._unflatten_dense_tensors` on a tensor lsit
257 :param tensor_list: A list of tensors corresponding to the flattened tensor
258 :type flat_tensor: torch.Tensor
259 :type tensor_list: List[torch.Tensor]
260 """
261 updated_params = unflatten(flat_tensor, tensor_list)
262
263 # update the tensor data
264 for p, q in zip(tensor_list, updated_params):
265 p.data = q.data
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/colossalai/zero/sharded_optim/_utils.py b/colossalai/zero/sharded_optim/_utils.py
--- a/colossalai/zero/sharded_optim/_utils.py
+++ b/colossalai/zero/sharded_optim/_utils.py
@@ -3,7 +3,7 @@
import torch
import torch.distributed as dist
-from torch.six import inf
+from torch import inf
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from colossalai.tensor import ColoParameter
|
{"golden_diff": "diff --git a/colossalai/zero/sharded_optim/_utils.py b/colossalai/zero/sharded_optim/_utils.py\n--- a/colossalai/zero/sharded_optim/_utils.py\n+++ b/colossalai/zero/sharded_optim/_utils.py\n@@ -3,7 +3,7 @@\n \n import torch\n import torch.distributed as dist\n-from torch.six import inf\n+from torch import inf\n from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n \n from colossalai.tensor import ColoParameter\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[BUG]: Wrong import in `zero/sharded_optim/_utils.py`\n### \ud83d\udc1b Describe the bug\n\nIn issue #2774 , thanks @malfet for pointing out that we should not use `torch._six` to import `inf` and use `torch` to import `inf` instead, however, there is a small mistake in PR #2775 use an invalid `torch.six` module to import `inf`. We should fix this typo.\n\n### Environment\n\n_No response_\n", "before_files": [{"content": "import math\nfrom typing import Optional\n\nimport torch\nimport torch.distributed as dist\nfrom torch.six import inf\nfrom torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n\nfrom colossalai.tensor import ColoParameter\nfrom colossalai.utils import is_model_parallel_parameter\n\n\ndef flatten(input_):\n return _flatten_dense_tensors(input_)\n\n\ndef unflatten(flat, tensors):\n return _unflatten_dense_tensors(flat, tensors)\n\n\ndef count_numel(tensor_list):\n res = 0\n for tensor in tensor_list:\n res += tensor.numel()\n return res\n\n\ndef calculate_padding(numel, unit_size):\n remainder = numel % unit_size\n return unit_size - remainder if remainder else remainder\n\n\ndef shuffle_by_round_robin(tensor_list, num_partitions):\n partitions = dict()\n\n for tensor_idx, tensor in enumerate(tensor_list):\n partition_to_go = tensor_idx % num_partitions\n if partition_to_go not in partitions:\n partitions[partition_to_go] = []\n partitions[partition_to_go].append(dict(tensor=tensor, index=tensor_idx))\n\n partitions_count = len(partitions)\n new_tensor_list = []\n tensor_index_mapping = dict()\n\n for partition_id in range(partitions_count):\n partition_tensors = partitions[partition_id]\n for item in partition_tensors:\n tensor_index_mapping[item['index']] = len(new_tensor_list)\n new_tensor_list.append(item['tensor'])\n\n return new_tensor_list, tensor_index_mapping\n\n\n# create a flat tensor aligned at the alignment boundary\ndef flatten_dense_tensors_with_padding(tensor_list, unit_size):\n num_elements = count_numel(tensor_list)\n padding = calculate_padding(num_elements, unit_size=unit_size)\n\n if padding > 0:\n pad_tensor = torch.zeros(padding, device=tensor_list[0].device, dtype=tensor_list[0].dtype)\n padded_tensor_list = tensor_list + [pad_tensor]\n else:\n padded_tensor_list = tensor_list\n\n return flatten(padded_tensor_list)\n\n\ndef is_nccl_aligned(tensor):\n return tensor.data_ptr() % 4 == 0\n\n\ndef get_grad_accumulate_object(tensor):\n \"\"\"\n Return the AccumulateGrad of the input tensor\n \"\"\"\n\n # grad_fn reference:\n # https://discuss.pytorch.org/t/in-the-grad-fn-i-find-a-next-functions-but-i-dont-understand-the-meaning-of-the-attribute/24463\n # expand_as reference: https://pytorch.org/docs/stable/generated/torch.Tensor.expand.html#torch.Tensor.expand\n #\n # `next_functions` will return the backward graph where\n # the first element is the AccumulateGrad of the leaf nodes.\n # we want to get the AccumulateGrad of the input tensor instead of the leaf\n # node in the whole computation graph.\n # Therefore, we call expand_as to create a dummy graph\n # where tensor_tmp and tensor indeed point to the same object.\n # You can check this by print(tensor.data_ptr() == tensor_tmp.data_ptr())\n tensor_tmp = tensor.expand_as(tensor)\n grad_acc_obj = tensor_tmp.grad_fn.next_functions[0][0]\n return grad_acc_obj\n\n\ndef split_half_float_double(tensor_list):\n dtypes = [\"torch.cuda.HalfTensor\", \"torch.cuda.FloatTensor\", \"torch.cuda.DoubleTensor\", \"torch.cuda.BFloat16Tensor\"]\n buckets = []\n for i, dtype in enumerate(dtypes):\n bucket = [t for t in tensor_list if t.type() == dtype]\n if bucket:\n buckets.append(bucket)\n return buckets\n\n\ndef reduce_tensor_dp_group(tensor: torch.Tensor,\n dtype: Optional[torch.dtype] = None,\n dst_local_rank: Optional[int] = None,\n dst_global_rank: Optional[int] = None,\n group: Optional[dist.ProcessGroup] = None):\n \"\"\"\n Reduce the tensor in the data parallel process group\n\n :param tensor: A tensor object to reduce/all-reduce\n :param dtype: The data type used in communication\n :param dst_rank: The source rank for reduce. If dst_rank is None,\n :param parallel_mode: Communication parallel mode\n all-reduce will be used instead of reduce. Default is None.\n\n :type tensor: torch.Tensor\n :type dtype: torch.dtype, optional\n :type dst_rank: int, optional\n :type pg: ProcessGroup, optional\n \"\"\"\n # use the original dtype\n if dtype is None:\n dtype = tensor.dtype\n\n # cast the data to specified dtype for reduce/all-reduce\n if tensor.dtype != dtype:\n tensor_to_reduce = tensor.to(dtype)\n else:\n tensor_to_reduce = tensor\n\n world_size = dist.get_world_size(group=group)\n tensor_to_reduce.div_(world_size)\n\n # if rank is None, all reduce will be used\n # else, reduce is used\n use_all_reduce = dst_local_rank is None\n\n if use_all_reduce:\n dist.all_reduce(tensor_to_reduce, group=group)\n else:\n dist.reduce(tensor=tensor_to_reduce, dst=dst_global_rank, group=group)\n\n # recover the original dtype\n if tensor.dtype != dtype and tensor is not tensor_to_reduce:\n local_rank = dist.get_rank(group=group)\n if use_all_reduce or dst_local_rank == local_rank:\n tensor.copy_(tensor_to_reduce)\n\n return tensor\n\n\ndef has_inf_or_nan(tensor):\n try:\n # if tensor is half, the .float() incurs an additional deep copy, but it's necessary if\n # Pytorch's .sum() creates a one-element tensor of the same type as tensor\n # (which is true for some recent version of pytorch).\n tensor_sum = float(tensor.float().sum())\n # More efficient version that can be used if .sum() returns a Python scalar\n # tensor_sum = float(tensor.sum())\n except RuntimeError as instance:\n # We want to check if inst is actually an overflow exception.\n # RuntimeError could come from a different error.\n # If so, we still want the exception to propagate.\n if \"value cannot be converted\" not in instance.args[0]:\n raise\n return True\n else:\n if tensor_sum == float('inf') or tensor_sum == -float('inf') or tensor_sum != tensor_sum:\n return True\n return False\n\n\ndef release_param_grad(tensor_list):\n for tensor in tensor_list:\n tensor.grad = None\n\n\ndef calculate_global_norm_from_list(norm_list):\n \"\"\" Compute total from a list of norms\n \"\"\"\n total_norm = 0.0\n for norm in norm_list:\n total_norm += norm**2.0\n return math.sqrt(total_norm)\n\n\ndef compute_norm(gradients, params, dp_group, mp_group, norm_type=2):\n \"\"\"Clips gradient norm of an iterable of parameters.\n This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and\n added functionality to handle model parallel parameters. Note that\n the gradients are modified in place.\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor that will have gradients normalized\n max_norm (float or int): max norm of the gradients\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n Returns:\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n\n if mp_group is None:\n mp_rank = 0\n else:\n mp_rank = dist.get_rank(mp_group)\n\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(g.data.abs().max() for g in gradients)\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n dist.all_reduce(total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=dp_group)\n\n # Take max across all GPUs.\n if mp_group is not None:\n dist.all_reduce(tensor=total_norm_cuda, op=torch.distributed.ReduceOp.MAX)\n total_norm = total_norm_cuda[0].item()\n else:\n total_norm = 0.0\n # if dist.get_rank() == 0:\n # logger.info(f\"Total Norm beginning {total_norm}\")\n\n for g, p in zip(gradients, params):\n # Pipeline parallelism may replicate parameters. Avoid multi-counting.\n tp_param_flag = False\n if is_model_parallel_parameter(p) or (isinstance(p, ColoParameter) and not p.is_replicate()):\n tp_param_flag = True\n if tp_param_flag or mp_rank == 0:\n param_norm = g.data.double().norm(2)\n total_norm += param_norm.item()**2\n\n # Sum across all model parallel GPUs.\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n torch.distributed.all_reduce(total_norm_cuda, op=torch.distributed.ReduceOp.SUM, group=dp_group)\n\n if mp_group is not None:\n dist.all_reduce(tensor=total_norm_cuda, op=torch.distributed.ReduceOp.SUM, group=mp_group)\n\n total_norm = total_norm_cuda[0].item()**(1. / norm_type)\n\n if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:\n total_norm = -1\n\n return total_norm\n\n\ndef sync_param(flat_tensor, tensor_list):\n \"\"\"\n Synchronize the flattened tensor and unflattened tensor list. When\n a list of tensor are flattened with `torch._utils._unflatten_dense_tensors`,\n a new tensor is created. Thus, the flat tensor and original tensor list do not\n share the same memory space. This function will update the tensor list so that\n they point to the same value.\n\n :param flat_tensor: A flat tensor obtained by calling `torch._utils._unflatten_dense_tensors` on a tensor lsit\n :param tensor_list: A list of tensors corresponding to the flattened tensor\n :type flat_tensor: torch.Tensor\n :type tensor_list: List[torch.Tensor]\n \"\"\"\n updated_params = unflatten(flat_tensor, tensor_list)\n\n # update the tensor data\n for p, q in zip(tensor_list, updated_params):\n p.data = q.data\n", "path": "colossalai/zero/sharded_optim/_utils.py"}], "after_files": [{"content": "import math\nfrom typing import Optional\n\nimport torch\nimport torch.distributed as dist\nfrom torch import inf\nfrom torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n\nfrom colossalai.tensor import ColoParameter\nfrom colossalai.utils import is_model_parallel_parameter\n\n\ndef flatten(input_):\n return _flatten_dense_tensors(input_)\n\n\ndef unflatten(flat, tensors):\n return _unflatten_dense_tensors(flat, tensors)\n\n\ndef count_numel(tensor_list):\n res = 0\n for tensor in tensor_list:\n res += tensor.numel()\n return res\n\n\ndef calculate_padding(numel, unit_size):\n remainder = numel % unit_size\n return unit_size - remainder if remainder else remainder\n\n\ndef shuffle_by_round_robin(tensor_list, num_partitions):\n partitions = dict()\n\n for tensor_idx, tensor in enumerate(tensor_list):\n partition_to_go = tensor_idx % num_partitions\n if partition_to_go not in partitions:\n partitions[partition_to_go] = []\n partitions[partition_to_go].append(dict(tensor=tensor, index=tensor_idx))\n\n partitions_count = len(partitions)\n new_tensor_list = []\n tensor_index_mapping = dict()\n\n for partition_id in range(partitions_count):\n partition_tensors = partitions[partition_id]\n for item in partition_tensors:\n tensor_index_mapping[item['index']] = len(new_tensor_list)\n new_tensor_list.append(item['tensor'])\n\n return new_tensor_list, tensor_index_mapping\n\n\n# create a flat tensor aligned at the alignment boundary\ndef flatten_dense_tensors_with_padding(tensor_list, unit_size):\n num_elements = count_numel(tensor_list)\n padding = calculate_padding(num_elements, unit_size=unit_size)\n\n if padding > 0:\n pad_tensor = torch.zeros(padding, device=tensor_list[0].device, dtype=tensor_list[0].dtype)\n padded_tensor_list = tensor_list + [pad_tensor]\n else:\n padded_tensor_list = tensor_list\n\n return flatten(padded_tensor_list)\n\n\ndef is_nccl_aligned(tensor):\n return tensor.data_ptr() % 4 == 0\n\n\ndef get_grad_accumulate_object(tensor):\n \"\"\"\n Return the AccumulateGrad of the input tensor\n \"\"\"\n\n # grad_fn reference:\n # https://discuss.pytorch.org/t/in-the-grad-fn-i-find-a-next-functions-but-i-dont-understand-the-meaning-of-the-attribute/24463\n # expand_as reference: https://pytorch.org/docs/stable/generated/torch.Tensor.expand.html#torch.Tensor.expand\n #\n # `next_functions` will return the backward graph where\n # the first element is the AccumulateGrad of the leaf nodes.\n # we want to get the AccumulateGrad of the input tensor instead of the leaf\n # node in the whole computation graph.\n # Therefore, we call expand_as to create a dummy graph\n # where tensor_tmp and tensor indeed point to the same object.\n # You can check this by print(tensor.data_ptr() == tensor_tmp.data_ptr())\n tensor_tmp = tensor.expand_as(tensor)\n grad_acc_obj = tensor_tmp.grad_fn.next_functions[0][0]\n return grad_acc_obj\n\n\ndef split_half_float_double(tensor_list):\n dtypes = [\"torch.cuda.HalfTensor\", \"torch.cuda.FloatTensor\", \"torch.cuda.DoubleTensor\", \"torch.cuda.BFloat16Tensor\"]\n buckets = []\n for i, dtype in enumerate(dtypes):\n bucket = [t for t in tensor_list if t.type() == dtype]\n if bucket:\n buckets.append(bucket)\n return buckets\n\n\ndef reduce_tensor_dp_group(tensor: torch.Tensor,\n dtype: Optional[torch.dtype] = None,\n dst_local_rank: Optional[int] = None,\n dst_global_rank: Optional[int] = None,\n group: Optional[dist.ProcessGroup] = None):\n \"\"\"\n Reduce the tensor in the data parallel process group\n\n :param tensor: A tensor object to reduce/all-reduce\n :param dtype: The data type used in communication\n :param dst_rank: The source rank for reduce. If dst_rank is None,\n :param parallel_mode: Communication parallel mode\n all-reduce will be used instead of reduce. Default is None.\n\n :type tensor: torch.Tensor\n :type dtype: torch.dtype, optional\n :type dst_rank: int, optional\n :type pg: ProcessGroup, optional\n \"\"\"\n # use the original dtype\n if dtype is None:\n dtype = tensor.dtype\n\n # cast the data to specified dtype for reduce/all-reduce\n if tensor.dtype != dtype:\n tensor_to_reduce = tensor.to(dtype)\n else:\n tensor_to_reduce = tensor\n\n world_size = dist.get_world_size(group=group)\n tensor_to_reduce.div_(world_size)\n\n # if rank is None, all reduce will be used\n # else, reduce is used\n use_all_reduce = dst_local_rank is None\n\n if use_all_reduce:\n dist.all_reduce(tensor_to_reduce, group=group)\n else:\n dist.reduce(tensor=tensor_to_reduce, dst=dst_global_rank, group=group)\n\n # recover the original dtype\n if tensor.dtype != dtype and tensor is not tensor_to_reduce:\n local_rank = dist.get_rank(group=group)\n if use_all_reduce or dst_local_rank == local_rank:\n tensor.copy_(tensor_to_reduce)\n\n return tensor\n\n\ndef has_inf_or_nan(tensor):\n try:\n # if tensor is half, the .float() incurs an additional deep copy, but it's necessary if\n # Pytorch's .sum() creates a one-element tensor of the same type as tensor\n # (which is true for some recent version of pytorch).\n tensor_sum = float(tensor.float().sum())\n # More efficient version that can be used if .sum() returns a Python scalar\n # tensor_sum = float(tensor.sum())\n except RuntimeError as instance:\n # We want to check if inst is actually an overflow exception.\n # RuntimeError could come from a different error.\n # If so, we still want the exception to propagate.\n if \"value cannot be converted\" not in instance.args[0]:\n raise\n return True\n else:\n if tensor_sum == float('inf') or tensor_sum == -float('inf') or tensor_sum != tensor_sum:\n return True\n return False\n\n\ndef release_param_grad(tensor_list):\n for tensor in tensor_list:\n tensor.grad = None\n\n\ndef calculate_global_norm_from_list(norm_list):\n \"\"\" Compute total from a list of norms\n \"\"\"\n total_norm = 0.0\n for norm in norm_list:\n total_norm += norm**2.0\n return math.sqrt(total_norm)\n\n\ndef compute_norm(gradients, params, dp_group, mp_group, norm_type=2):\n \"\"\"Clips gradient norm of an iterable of parameters.\n This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and\n added functionality to handle model parallel parameters. Note that\n the gradients are modified in place.\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor that will have gradients normalized\n max_norm (float or int): max norm of the gradients\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n Returns:\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n\n if mp_group is None:\n mp_rank = 0\n else:\n mp_rank = dist.get_rank(mp_group)\n\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(g.data.abs().max() for g in gradients)\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n dist.all_reduce(total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=dp_group)\n\n # Take max across all GPUs.\n if mp_group is not None:\n dist.all_reduce(tensor=total_norm_cuda, op=torch.distributed.ReduceOp.MAX)\n total_norm = total_norm_cuda[0].item()\n else:\n total_norm = 0.0\n # if dist.get_rank() == 0:\n # logger.info(f\"Total Norm beginning {total_norm}\")\n\n for g, p in zip(gradients, params):\n # Pipeline parallelism may replicate parameters. Avoid multi-counting.\n tp_param_flag = False\n if is_model_parallel_parameter(p) or (isinstance(p, ColoParameter) and not p.is_replicate()):\n tp_param_flag = True\n if tp_param_flag or mp_rank == 0:\n param_norm = g.data.double().norm(2)\n total_norm += param_norm.item()**2\n\n # Sum across all model parallel GPUs.\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n torch.distributed.all_reduce(total_norm_cuda, op=torch.distributed.ReduceOp.SUM, group=dp_group)\n\n if mp_group is not None:\n dist.all_reduce(tensor=total_norm_cuda, op=torch.distributed.ReduceOp.SUM, group=mp_group)\n\n total_norm = total_norm_cuda[0].item()**(1. / norm_type)\n\n if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:\n total_norm = -1\n\n return total_norm\n\n\ndef sync_param(flat_tensor, tensor_list):\n \"\"\"\n Synchronize the flattened tensor and unflattened tensor list. When\n a list of tensor are flattened with `torch._utils._unflatten_dense_tensors`,\n a new tensor is created. Thus, the flat tensor and original tensor list do not\n share the same memory space. This function will update the tensor list so that\n they point to the same value.\n\n :param flat_tensor: A flat tensor obtained by calling `torch._utils._unflatten_dense_tensors` on a tensor lsit\n :param tensor_list: A list of tensors corresponding to the flattened tensor\n :type flat_tensor: torch.Tensor\n :type tensor_list: List[torch.Tensor]\n \"\"\"\n updated_params = unflatten(flat_tensor, tensor_list)\n\n # update the tensor data\n for p, q in zip(tensor_list, updated_params):\n p.data = q.data\n", "path": "colossalai/zero/sharded_optim/_utils.py"}]}
| 3,328 | 122 |
gh_patches_debug_10059
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5269
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ItemLoader: support non-TextResponse
At the moment, `ItemLoader(response=response)` fails if `response` is not a `TextResponse` instance.
Passing a binary response can still be useful, though. For example, to allow processors to access the response from their loader context, and hence be able to report the source URL (`response.url`) when reporting input issues.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/loader/__init__.py`
Content:
```
1 """
2 Item Loader
3
4 See documentation in docs/topics/loaders.rst
5 """
6 import itemloaders
7
8 from scrapy.item import Item
9 from scrapy.selector import Selector
10
11
12 class ItemLoader(itemloaders.ItemLoader):
13 """
14 A user-friendly abstraction to populate an :ref:`item <topics-items>` with data
15 by applying :ref:`field processors <topics-loaders-processors>` to scraped data.
16 When instantiated with a ``selector`` or a ``response`` it supports
17 data extraction from web pages using :ref:`selectors <topics-selectors>`.
18
19 :param item: The item instance to populate using subsequent calls to
20 :meth:`~ItemLoader.add_xpath`, :meth:`~ItemLoader.add_css`,
21 or :meth:`~ItemLoader.add_value`.
22 :type item: scrapy.item.Item
23
24 :param selector: The selector to extract data from, when using the
25 :meth:`add_xpath`, :meth:`add_css`, :meth:`replace_xpath`, or
26 :meth:`replace_css` method.
27 :type selector: :class:`~scrapy.selector.Selector` object
28
29 :param response: The response used to construct the selector using the
30 :attr:`default_selector_class`, unless the selector argument is given,
31 in which case this argument is ignored.
32 :type response: :class:`~scrapy.http.Response` object
33
34 If no item is given, one is instantiated automatically using the class in
35 :attr:`default_item_class`.
36
37 The item, selector, response and remaining keyword arguments are
38 assigned to the Loader context (accessible through the :attr:`context` attribute).
39
40 .. attribute:: item
41
42 The item object being parsed by this Item Loader.
43 This is mostly used as a property so, when attempting to override this
44 value, you may want to check out :attr:`default_item_class` first.
45
46 .. attribute:: context
47
48 The currently active :ref:`Context <loaders-context>` of this Item Loader.
49
50 .. attribute:: default_item_class
51
52 An :ref:`item <topics-items>` class (or factory), used to instantiate
53 items when not given in the ``__init__`` method.
54
55 .. attribute:: default_input_processor
56
57 The default input processor to use for those fields which don't specify
58 one.
59
60 .. attribute:: default_output_processor
61
62 The default output processor to use for those fields which don't specify
63 one.
64
65 .. attribute:: default_selector_class
66
67 The class used to construct the :attr:`selector` of this
68 :class:`ItemLoader`, if only a response is given in the ``__init__`` method.
69 If a selector is given in the ``__init__`` method this attribute is ignored.
70 This attribute is sometimes overridden in subclasses.
71
72 .. attribute:: selector
73
74 The :class:`~scrapy.selector.Selector` object to extract data from.
75 It's either the selector given in the ``__init__`` method or one created from
76 the response given in the ``__init__`` method using the
77 :attr:`default_selector_class`. This attribute is meant to be
78 read-only.
79 """
80
81 default_item_class = Item
82 default_selector_class = Selector
83
84 def __init__(self, item=None, selector=None, response=None, parent=None, **context):
85 if selector is None and response is not None:
86 selector = self.default_selector_class(response)
87 context.update(response=response)
88 super().__init__(item=item, selector=selector, parent=parent, **context)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/loader/__init__.py b/scrapy/loader/__init__.py
--- a/scrapy/loader/__init__.py
+++ b/scrapy/loader/__init__.py
@@ -83,6 +83,9 @@
def __init__(self, item=None, selector=None, response=None, parent=None, **context):
if selector is None and response is not None:
- selector = self.default_selector_class(response)
+ try:
+ selector = self.default_selector_class(response)
+ except AttributeError:
+ selector = None
context.update(response=response)
super().__init__(item=item, selector=selector, parent=parent, **context)
|
{"golden_diff": "diff --git a/scrapy/loader/__init__.py b/scrapy/loader/__init__.py\n--- a/scrapy/loader/__init__.py\n+++ b/scrapy/loader/__init__.py\n@@ -83,6 +83,9 @@\n \n def __init__(self, item=None, selector=None, response=None, parent=None, **context):\n if selector is None and response is not None:\n- selector = self.default_selector_class(response)\n+ try:\n+ selector = self.default_selector_class(response)\n+ except AttributeError:\n+ selector = None\n context.update(response=response)\n super().__init__(item=item, selector=selector, parent=parent, **context)\n", "issue": "ItemLoader: support non-TextResponse\nAt the moment, `ItemLoader(response=response)` fails if `response` is not a `TextResponse` instance.\r\n\r\nPassing a binary response can still be useful, though. For example, to allow processors to access the response from their loader context, and hence be able to report the source URL (`response.url`) when reporting input issues.\n", "before_files": [{"content": "\"\"\"\nItem Loader\n\nSee documentation in docs/topics/loaders.rst\n\"\"\"\nimport itemloaders\n\nfrom scrapy.item import Item\nfrom scrapy.selector import Selector\n\n\nclass ItemLoader(itemloaders.ItemLoader):\n \"\"\"\n A user-friendly abstraction to populate an :ref:`item <topics-items>` with data\n by applying :ref:`field processors <topics-loaders-processors>` to scraped data.\n When instantiated with a ``selector`` or a ``response`` it supports\n data extraction from web pages using :ref:`selectors <topics-selectors>`.\n\n :param item: The item instance to populate using subsequent calls to\n :meth:`~ItemLoader.add_xpath`, :meth:`~ItemLoader.add_css`,\n or :meth:`~ItemLoader.add_value`.\n :type item: scrapy.item.Item\n\n :param selector: The selector to extract data from, when using the\n :meth:`add_xpath`, :meth:`add_css`, :meth:`replace_xpath`, or\n :meth:`replace_css` method.\n :type selector: :class:`~scrapy.selector.Selector` object\n\n :param response: The response used to construct the selector using the\n :attr:`default_selector_class`, unless the selector argument is given,\n in which case this argument is ignored.\n :type response: :class:`~scrapy.http.Response` object\n\n If no item is given, one is instantiated automatically using the class in\n :attr:`default_item_class`.\n\n The item, selector, response and remaining keyword arguments are\n assigned to the Loader context (accessible through the :attr:`context` attribute).\n\n .. attribute:: item\n\n The item object being parsed by this Item Loader.\n This is mostly used as a property so, when attempting to override this\n value, you may want to check out :attr:`default_item_class` first.\n\n .. attribute:: context\n\n The currently active :ref:`Context <loaders-context>` of this Item Loader.\n\n .. attribute:: default_item_class\n\n An :ref:`item <topics-items>` class (or factory), used to instantiate\n items when not given in the ``__init__`` method.\n\n .. attribute:: default_input_processor\n\n The default input processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_output_processor\n\n The default output processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_selector_class\n\n The class used to construct the :attr:`selector` of this\n :class:`ItemLoader`, if only a response is given in the ``__init__`` method.\n If a selector is given in the ``__init__`` method this attribute is ignored.\n This attribute is sometimes overridden in subclasses.\n\n .. attribute:: selector\n\n The :class:`~scrapy.selector.Selector` object to extract data from.\n It's either the selector given in the ``__init__`` method or one created from\n the response given in the ``__init__`` method using the\n :attr:`default_selector_class`. This attribute is meant to be\n read-only.\n \"\"\"\n\n default_item_class = Item\n default_selector_class = Selector\n\n def __init__(self, item=None, selector=None, response=None, parent=None, **context):\n if selector is None and response is not None:\n selector = self.default_selector_class(response)\n context.update(response=response)\n super().__init__(item=item, selector=selector, parent=parent, **context)\n", "path": "scrapy/loader/__init__.py"}], "after_files": [{"content": "\"\"\"\nItem Loader\n\nSee documentation in docs/topics/loaders.rst\n\"\"\"\nimport itemloaders\n\nfrom scrapy.item import Item\nfrom scrapy.selector import Selector\n\n\nclass ItemLoader(itemloaders.ItemLoader):\n \"\"\"\n A user-friendly abstraction to populate an :ref:`item <topics-items>` with data\n by applying :ref:`field processors <topics-loaders-processors>` to scraped data.\n When instantiated with a ``selector`` or a ``response`` it supports\n data extraction from web pages using :ref:`selectors <topics-selectors>`.\n\n :param item: The item instance to populate using subsequent calls to\n :meth:`~ItemLoader.add_xpath`, :meth:`~ItemLoader.add_css`,\n or :meth:`~ItemLoader.add_value`.\n :type item: scrapy.item.Item\n\n :param selector: The selector to extract data from, when using the\n :meth:`add_xpath`, :meth:`add_css`, :meth:`replace_xpath`, or\n :meth:`replace_css` method.\n :type selector: :class:`~scrapy.selector.Selector` object\n\n :param response: The response used to construct the selector using the\n :attr:`default_selector_class`, unless the selector argument is given,\n in which case this argument is ignored.\n :type response: :class:`~scrapy.http.Response` object\n\n If no item is given, one is instantiated automatically using the class in\n :attr:`default_item_class`.\n\n The item, selector, response and remaining keyword arguments are\n assigned to the Loader context (accessible through the :attr:`context` attribute).\n\n .. attribute:: item\n\n The item object being parsed by this Item Loader.\n This is mostly used as a property so, when attempting to override this\n value, you may want to check out :attr:`default_item_class` first.\n\n .. attribute:: context\n\n The currently active :ref:`Context <loaders-context>` of this Item Loader.\n\n .. attribute:: default_item_class\n\n An :ref:`item <topics-items>` class (or factory), used to instantiate\n items when not given in the ``__init__`` method.\n\n .. attribute:: default_input_processor\n\n The default input processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_output_processor\n\n The default output processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_selector_class\n\n The class used to construct the :attr:`selector` of this\n :class:`ItemLoader`, if only a response is given in the ``__init__`` method.\n If a selector is given in the ``__init__`` method this attribute is ignored.\n This attribute is sometimes overridden in subclasses.\n\n .. attribute:: selector\n\n The :class:`~scrapy.selector.Selector` object to extract data from.\n It's either the selector given in the ``__init__`` method or one created from\n the response given in the ``__init__`` method using the\n :attr:`default_selector_class`. This attribute is meant to be\n read-only.\n \"\"\"\n\n default_item_class = Item\n default_selector_class = Selector\n\n def __init__(self, item=None, selector=None, response=None, parent=None, **context):\n if selector is None and response is not None:\n try:\n selector = self.default_selector_class(response)\n except AttributeError:\n selector = None\n context.update(response=response)\n super().__init__(item=item, selector=selector, parent=parent, **context)\n", "path": "scrapy/loader/__init__.py"}]}
| 1,269 | 146 |
gh_patches_debug_19233
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-5692
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`backprop_step` only partially documented
The `backprop_step` function in `chainer/_backprop_utils.py` is documented but misses an explanation for the `func` argument. https://github.com/chainer/chainer/blob/master/chainer/_backprop_utils.py#L73-L89
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/_backprop_utils.py`
Content:
```
1 import six
2
3 import chainer
4
5
6 def _reduce(grad_list):
7 if not grad_list:
8 return None
9 if len(grad_list) >= 2:
10 grad_list[:] = [chainer.functions.add(*grad_list)]
11 return grad_list[0]
12
13
14 def _pure(grad):
15 return [] if grad is None else [grad]
16
17
18 def _pop_or_none(grad_list):
19 return grad_list.pop() if grad_list else None
20
21
22 class GradTable(object):
23
24 """Dict of nodes to references of gradients
25
26 The gradients are stored as references to them in the backprop process. The
27 current implementation uses lists. Keep the lengths of lists <= 1 for the
28 strict accumulation of gradients. Leave them to accumulate gradients
29 lazily.
30
31 Args:
32 load_if_new (bool): read ``grad_var`` of node when the node has not
33 been added.
34
35 """
36
37 def __init__(self, load_if_new=False):
38 self.grads = {}
39 self._load_if_new = load_if_new
40
41 def __setitem__(self, node, grad):
42 assert node is not None
43 self.grads[node] = _pure(grad)
44
45 def get_as_list(self, node):
46 assert node is not None
47 grads = self.grads
48 if node not in grads:
49 if self._load_if_new and node.creator_node is None:
50 node._check_old_style_gradient()
51 # accumulate the gradient only if the node is a leaf
52 grads[node] = _pure(node.grad_var)
53 else:
54 grads[node] = []
55 return grads[node]
56
57 def pop(self, node):
58 if node is None:
59 return None
60 grads = self.grads
61 if node in grads:
62 return _reduce(grads.pop(node))
63 if self._load_if_new:
64 return node.grad_var
65 else:
66 return None
67
68 def assert_no_grads(self):
69 for gx in self.grads.values():
70 assert gx == []
71
72
73 def backprop_step(
74 func, target_input_indexes, grad_outputs, grad_inputs):
75 """Accumulates gradients of a FunctionNode
76
77 This routine is used by :meth:`chainer.Variable.backward` and
78 :func:`chainer.grad`.
79
80 Args:
81 target_input_indexes (tuple of int): Sorted indices of the input
82 variables w.r.t. which the gradients are required. It is
83 guaranteed that this tuple contains at least one element.
84 grad_outputs (tuple of Variable): Gradients w.r.t. the output
85 variables. If the gradient w.r.t. an output variable is not
86 given, the corresponding element is ``None``.
87 grad_inputs (dict): References of radients w.r.t. the input variables.
88
89 """
90 is_debug = chainer.is_debug()
91 if is_debug:
92 assert isinstance(target_input_indexes, tuple)
93 assert target_input_indexes == tuple(sorted(target_input_indexes))
94 assert isinstance(grad_outputs, tuple)
95 if func.backward_accumulate.__code__ \
96 is not chainer.FunctionNode.backward_accumulate.__code__:
97 # backward_accumulate is overridden
98 grad_inputs_tuple = tuple([
99 _pop_or_none(grad_inputs[func.inputs[i]])
100 for i in target_input_indexes
101 ])
102 gxs = func.backward_accumulate(
103 target_input_indexes, grad_outputs, grad_inputs_tuple)
104 else: # otherwise, backward should be overridden
105 gxs = func.backward(
106 target_input_indexes, grad_outputs)
107
108 if is_debug:
109 for gx in gxs:
110 if not (gx is None or isinstance(gx, chainer.Variable)):
111 raise ValueError(func._get_error_message(
112 'type of gradients returned from backward is '
113 'incorrect: '
114 '{} != expected {}'.format(
115 type(gx), chainer.Variable)))
116
117 len_gxs = len(gxs)
118 if len_gxs == len(func.inputs):
119 gxs = tuple([gxs[i] for i in target_input_indexes])
120 elif len_gxs != len(target_input_indexes):
121 msg = 'number of gradients returned from backward is incorrect: '
122 if len(func.inputs) == len(target_input_indexes):
123 msg += (
124 '%s != expected %s' % (len_gxs, len(func.inputs)))
125 else:
126 msg += (
127 '%s != expected %s or %s'
128 % (len_gxs, len(func.inputs), len(target_input_indexes)))
129 raise ValueError(func._get_error_message(msg))
130
131 for i, gx in six.moves.zip(target_input_indexes, gxs):
132 if gx is not None:
133 grad_inputs[func.inputs[i]].append(gx)
134
135 if is_debug:
136 node_x = func.inputs[i]
137 g_input_list = grad_inputs[node_x]
138 if gx.shape != node_x.shape:
139 raise ValueError(func._get_error_message(
140 'shape of gradients returned from backward is '
141 'incorrect: '
142 'input-index={}, actual {} != expected {}'.format(
143 i, gx.shape, node_x.shape)))
144 if gx is not None and g_input_list:
145 g_input = g_input_list[0]
146 if gx.shape != g_input.shape:
147 raise ValueError(func._get_error_message(
148 'shape of gradients returned from backward is '
149 'incorrect: '
150 'input-index={}, actual {} != expected {}'.format(
151 i, gx.shape, g_input.shape)))
152 if gx.dtype != g_input.dtype:
153 raise ValueError(func._get_error_message(
154 'dtype of gradients returned from backward is '
155 'incorrect: '
156 'input-index={}, actual {} != expected {}'.format(
157 i, gx.dtype, g_input.dtype)))
158 del gxs
159
160 if is_debug:
161 # each grad is a list of variables
162 # iter_gxs expands it as a sequence of variables.
163 def iter_gxs(gxs):
164 for gx in gxs:
165 for gx_elem in gx:
166 yield gx_elem
167
168 for gx in iter_gxs(grad_inputs.values()):
169 if chainer.backend._contains_nan(gx.data):
170 raise RuntimeError(
171 'NaN is detected on backward computation of {}'
172 .format(func.label))
173
174 if not func.lazy_grad_sum:
175 for gx in grad_inputs.values():
176 _reduce(gx)
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/_backprop_utils.py b/chainer/_backprop_utils.py
--- a/chainer/_backprop_utils.py
+++ b/chainer/_backprop_utils.py
@@ -78,13 +78,16 @@
:func:`chainer.grad`.
Args:
- target_input_indexes (tuple of int): Sorted indices of the input
- variables w.r.t. which the gradients are required. It is
- guaranteed that this tuple contains at least one element.
+ func (~chainer.FunctionNode): The function for which gradients are
+ accumulated.
+ target_input_indexes (tuple of int): Sorted indices of the inputs
+ that require gradients. It is guaranteed that this tuple contains
+ at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
- grad_inputs (dict): References of radients w.r.t. the input variables.
+ grad_inputs (dict): References of the gradients w.r.t. the input
+ variables.
"""
is_debug = chainer.is_debug()
|
{"golden_diff": "diff --git a/chainer/_backprop_utils.py b/chainer/_backprop_utils.py\n--- a/chainer/_backprop_utils.py\n+++ b/chainer/_backprop_utils.py\n@@ -78,13 +78,16 @@\n :func:`chainer.grad`.\n \n Args:\n- target_input_indexes (tuple of int): Sorted indices of the input\n- variables w.r.t. which the gradients are required. It is\n- guaranteed that this tuple contains at least one element.\n+ func (~chainer.FunctionNode): The function for which gradients are\n+ accumulated.\n+ target_input_indexes (tuple of int): Sorted indices of the inputs\n+ that require gradients. It is guaranteed that this tuple contains\n+ at least one element.\n grad_outputs (tuple of Variable): Gradients w.r.t. the output\n variables. If the gradient w.r.t. an output variable is not\n given, the corresponding element is ``None``.\n- grad_inputs (dict): References of radients w.r.t. the input variables.\n+ grad_inputs (dict): References of the gradients w.r.t. the input\n+ variables.\n \n \"\"\"\n is_debug = chainer.is_debug()\n", "issue": "`backprop_step` only partially documented\nThe `backprop_step` function in `chainer/_backprop_utils.py` is documented but misses an explanation for the `func` argument. https://github.com/chainer/chainer/blob/master/chainer/_backprop_utils.py#L73-L89\n", "before_files": [{"content": "import six\n\nimport chainer\n\n\ndef _reduce(grad_list):\n if not grad_list:\n return None\n if len(grad_list) >= 2:\n grad_list[:] = [chainer.functions.add(*grad_list)]\n return grad_list[0]\n\n\ndef _pure(grad):\n return [] if grad is None else [grad]\n\n\ndef _pop_or_none(grad_list):\n return grad_list.pop() if grad_list else None\n\n\nclass GradTable(object):\n\n \"\"\"Dict of nodes to references of gradients\n\n The gradients are stored as references to them in the backprop process. The\n current implementation uses lists. Keep the lengths of lists <= 1 for the\n strict accumulation of gradients. Leave them to accumulate gradients\n lazily.\n\n Args:\n load_if_new (bool): read ``grad_var`` of node when the node has not\n been added.\n\n \"\"\"\n\n def __init__(self, load_if_new=False):\n self.grads = {}\n self._load_if_new = load_if_new\n\n def __setitem__(self, node, grad):\n assert node is not None\n self.grads[node] = _pure(grad)\n\n def get_as_list(self, node):\n assert node is not None\n grads = self.grads\n if node not in grads:\n if self._load_if_new and node.creator_node is None:\n node._check_old_style_gradient()\n # accumulate the gradient only if the node is a leaf\n grads[node] = _pure(node.grad_var)\n else:\n grads[node] = []\n return grads[node]\n\n def pop(self, node):\n if node is None:\n return None\n grads = self.grads\n if node in grads:\n return _reduce(grads.pop(node))\n if self._load_if_new:\n return node.grad_var\n else:\n return None\n\n def assert_no_grads(self):\n for gx in self.grads.values():\n assert gx == []\n\n\ndef backprop_step(\n func, target_input_indexes, grad_outputs, grad_inputs):\n \"\"\"Accumulates gradients of a FunctionNode\n\n This routine is used by :meth:`chainer.Variable.backward` and\n :func:`chainer.grad`.\n\n Args:\n target_input_indexes (tuple of int): Sorted indices of the input\n variables w.r.t. which the gradients are required. It is\n guaranteed that this tuple contains at least one element.\n grad_outputs (tuple of Variable): Gradients w.r.t. the output\n variables. If the gradient w.r.t. an output variable is not\n given, the corresponding element is ``None``.\n grad_inputs (dict): References of radients w.r.t. the input variables.\n\n \"\"\"\n is_debug = chainer.is_debug()\n if is_debug:\n assert isinstance(target_input_indexes, tuple)\n assert target_input_indexes == tuple(sorted(target_input_indexes))\n assert isinstance(grad_outputs, tuple)\n if func.backward_accumulate.__code__ \\\n is not chainer.FunctionNode.backward_accumulate.__code__:\n # backward_accumulate is overridden\n grad_inputs_tuple = tuple([\n _pop_or_none(grad_inputs[func.inputs[i]])\n for i in target_input_indexes\n ])\n gxs = func.backward_accumulate(\n target_input_indexes, grad_outputs, grad_inputs_tuple)\n else: # otherwise, backward should be overridden\n gxs = func.backward(\n target_input_indexes, grad_outputs)\n\n if is_debug:\n for gx in gxs:\n if not (gx is None or isinstance(gx, chainer.Variable)):\n raise ValueError(func._get_error_message(\n 'type of gradients returned from backward is '\n 'incorrect: '\n '{} != expected {}'.format(\n type(gx), chainer.Variable)))\n\n len_gxs = len(gxs)\n if len_gxs == len(func.inputs):\n gxs = tuple([gxs[i] for i in target_input_indexes])\n elif len_gxs != len(target_input_indexes):\n msg = 'number of gradients returned from backward is incorrect: '\n if len(func.inputs) == len(target_input_indexes):\n msg += (\n '%s != expected %s' % (len_gxs, len(func.inputs)))\n else:\n msg += (\n '%s != expected %s or %s'\n % (len_gxs, len(func.inputs), len(target_input_indexes)))\n raise ValueError(func._get_error_message(msg))\n\n for i, gx in six.moves.zip(target_input_indexes, gxs):\n if gx is not None:\n grad_inputs[func.inputs[i]].append(gx)\n\n if is_debug:\n node_x = func.inputs[i]\n g_input_list = grad_inputs[node_x]\n if gx.shape != node_x.shape:\n raise ValueError(func._get_error_message(\n 'shape of gradients returned from backward is '\n 'incorrect: '\n 'input-index={}, actual {} != expected {}'.format(\n i, gx.shape, node_x.shape)))\n if gx is not None and g_input_list:\n g_input = g_input_list[0]\n if gx.shape != g_input.shape:\n raise ValueError(func._get_error_message(\n 'shape of gradients returned from backward is '\n 'incorrect: '\n 'input-index={}, actual {} != expected {}'.format(\n i, gx.shape, g_input.shape)))\n if gx.dtype != g_input.dtype:\n raise ValueError(func._get_error_message(\n 'dtype of gradients returned from backward is '\n 'incorrect: '\n 'input-index={}, actual {} != expected {}'.format(\n i, gx.dtype, g_input.dtype)))\n del gxs\n\n if is_debug:\n # each grad is a list of variables\n # iter_gxs expands it as a sequence of variables.\n def iter_gxs(gxs):\n for gx in gxs:\n for gx_elem in gx:\n yield gx_elem\n\n for gx in iter_gxs(grad_inputs.values()):\n if chainer.backend._contains_nan(gx.data):\n raise RuntimeError(\n 'NaN is detected on backward computation of {}'\n .format(func.label))\n\n if not func.lazy_grad_sum:\n for gx in grad_inputs.values():\n _reduce(gx)\n", "path": "chainer/_backprop_utils.py"}], "after_files": [{"content": "import six\n\nimport chainer\n\n\ndef _reduce(grad_list):\n if not grad_list:\n return None\n if len(grad_list) >= 2:\n grad_list[:] = [chainer.functions.add(*grad_list)]\n return grad_list[0]\n\n\ndef _pure(grad):\n return [] if grad is None else [grad]\n\n\ndef _pop_or_none(grad_list):\n return grad_list.pop() if grad_list else None\n\n\nclass GradTable(object):\n\n \"\"\"Dict of nodes to references of gradients\n\n The gradients are stored as references to them in the backprop process. The\n current implementation uses lists. Keep the lengths of lists <= 1 for the\n strict accumulation of gradients. Leave them to accumulate gradients\n lazily.\n\n Args:\n load_if_new (bool): read ``grad_var`` of node when the node has not\n been added.\n\n \"\"\"\n\n def __init__(self, load_if_new=False):\n self.grads = {}\n self._load_if_new = load_if_new\n\n def __setitem__(self, node, grad):\n assert node is not None\n self.grads[node] = _pure(grad)\n\n def get_as_list(self, node):\n assert node is not None\n grads = self.grads\n if node not in grads:\n if self._load_if_new and node.creator_node is None:\n node._check_old_style_gradient()\n # accumulate the gradient only if the node is a leaf\n grads[node] = _pure(node.grad_var)\n else:\n grads[node] = []\n return grads[node]\n\n def pop(self, node):\n if node is None:\n return None\n grads = self.grads\n if node in grads:\n return _reduce(grads.pop(node))\n if self._load_if_new:\n return node.grad_var\n else:\n return None\n\n def assert_no_grads(self):\n for gx in self.grads.values():\n assert gx == []\n\n\ndef backprop_step(\n func, target_input_indexes, grad_outputs, grad_inputs):\n \"\"\"Accumulates gradients of a FunctionNode\n\n This routine is used by :meth:`chainer.Variable.backward` and\n :func:`chainer.grad`.\n\n Args:\n func (~chainer.FunctionNode): The function for which gradients are\n accumulated.\n target_input_indexes (tuple of int): Sorted indices of the inputs\n that require gradients. It is guaranteed that this tuple contains\n at least one element.\n grad_outputs (tuple of Variable): Gradients w.r.t. the output\n variables. If the gradient w.r.t. an output variable is not\n given, the corresponding element is ``None``.\n grad_inputs (dict): References of the gradients w.r.t. the input\n variables.\n\n \"\"\"\n is_debug = chainer.is_debug()\n if is_debug:\n assert isinstance(target_input_indexes, tuple)\n assert target_input_indexes == tuple(sorted(target_input_indexes))\n assert isinstance(grad_outputs, tuple)\n if func.backward_accumulate.__code__ \\\n is not chainer.FunctionNode.backward_accumulate.__code__:\n # backward_accumulate is overridden\n grad_inputs_tuple = tuple([\n _pop_or_none(grad_inputs[func.inputs[i]])\n for i in target_input_indexes\n ])\n gxs = func.backward_accumulate(\n target_input_indexes, grad_outputs, grad_inputs_tuple)\n else: # otherwise, backward should be overridden\n gxs = func.backward(\n target_input_indexes, grad_outputs)\n\n if is_debug:\n for gx in gxs:\n if not (gx is None or isinstance(gx, chainer.Variable)):\n raise ValueError(func._get_error_message(\n 'type of gradients returned from backward is '\n 'incorrect: '\n '{} != expected {}'.format(\n type(gx), chainer.Variable)))\n\n len_gxs = len(gxs)\n if len_gxs == len(func.inputs):\n gxs = tuple([gxs[i] for i in target_input_indexes])\n elif len_gxs != len(target_input_indexes):\n msg = 'number of gradients returned from backward is incorrect: '\n if len(func.inputs) == len(target_input_indexes):\n msg += (\n '%s != expected %s' % (len_gxs, len(func.inputs)))\n else:\n msg += (\n '%s != expected %s or %s'\n % (len_gxs, len(func.inputs), len(target_input_indexes)))\n raise ValueError(func._get_error_message(msg))\n\n for i, gx in six.moves.zip(target_input_indexes, gxs):\n if gx is not None:\n grad_inputs[func.inputs[i]].append(gx)\n\n if is_debug:\n node_x = func.inputs[i]\n g_input_list = grad_inputs[node_x]\n if gx.shape != node_x.shape:\n raise ValueError(func._get_error_message(\n 'shape of gradients returned from backward is '\n 'incorrect: '\n 'input-index={}, actual {} != expected {}'.format(\n i, gx.shape, node_x.shape)))\n if gx is not None and g_input_list:\n g_input = g_input_list[0]\n if gx.shape != g_input.shape:\n raise ValueError(func._get_error_message(\n 'shape of gradients returned from backward is '\n 'incorrect: '\n 'input-index={}, actual {} != expected {}'.format(\n i, gx.shape, g_input.shape)))\n if gx.dtype != g_input.dtype:\n raise ValueError(func._get_error_message(\n 'dtype of gradients returned from backward is '\n 'incorrect: '\n 'input-index={}, actual {} != expected {}'.format(\n i, gx.dtype, g_input.dtype)))\n del gxs\n\n if is_debug:\n # each grad is a list of variables\n # iter_gxs expands it as a sequence of variables.\n def iter_gxs(gxs):\n for gx in gxs:\n for gx_elem in gx:\n yield gx_elem\n\n for gx in iter_gxs(grad_inputs.values()):\n if chainer.backend._contains_nan(gx.data):\n raise RuntimeError(\n 'NaN is detected on backward computation of {}'\n .format(func.label))\n\n if not func.lazy_grad_sum:\n for gx in grad_inputs.values():\n _reduce(gx)\n", "path": "chainer/_backprop_utils.py"}]}
| 2,083 | 264 |
gh_patches_debug_5555
|
rasdani/github-patches
|
git_diff
|
getredash__redash-4638
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
error running query : ** '>' is not supported between instance of NoneType and 'int'
Issue Summary:
Database = Oracle 12c
`select count(*) from table `
throwing the following error
`error running query : ** '>' is not supported between instance of NoneType and 'int'`
Redash v9.0.0-alpha(dev)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/query_runner/oracle.py`
Content:
```
1 import logging
2
3 from redash.utils import json_dumps, json_loads
4 from redash.query_runner import *
5
6 try:
7 import cx_Oracle
8
9 TYPES_MAP = {
10 cx_Oracle.DATETIME: TYPE_DATETIME,
11 cx_Oracle.CLOB: TYPE_STRING,
12 cx_Oracle.LOB: TYPE_STRING,
13 cx_Oracle.FIXED_CHAR: TYPE_STRING,
14 cx_Oracle.FIXED_NCHAR: TYPE_STRING,
15 cx_Oracle.INTERVAL: TYPE_DATETIME,
16 cx_Oracle.LONG_STRING: TYPE_STRING,
17 cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,
18 cx_Oracle.NCHAR: TYPE_STRING,
19 cx_Oracle.NUMBER: TYPE_FLOAT,
20 cx_Oracle.ROWID: TYPE_INTEGER,
21 cx_Oracle.STRING: TYPE_STRING,
22 cx_Oracle.TIMESTAMP: TYPE_DATETIME,
23 }
24
25 ENABLED = True
26 except ImportError:
27 ENABLED = False
28
29 logger = logging.getLogger(__name__)
30
31
32 class Oracle(BaseSQLQueryRunner):
33 noop_query = "SELECT 1 FROM dual"
34
35 @classmethod
36 def get_col_type(cls, col_type, scale):
37 if col_type == cx_Oracle.NUMBER:
38 return TYPE_FLOAT if scale > 0 else TYPE_INTEGER
39 else:
40 return TYPES_MAP.get(col_type, None)
41
42 @classmethod
43 def enabled(cls):
44 return ENABLED
45
46 @classmethod
47 def configuration_schema(cls):
48 return {
49 "type": "object",
50 "properties": {
51 "user": {"type": "string"},
52 "password": {"type": "string"},
53 "host": {"type": "string"},
54 "port": {"type": "number"},
55 "servicename": {"type": "string", "title": "DSN Service Name"},
56 },
57 "required": ["servicename", "user", "password", "host", "port"],
58 "secret": ["password"],
59 }
60
61 @classmethod
62 def type(cls):
63 return "oracle"
64
65 def __init__(self, configuration):
66 super(Oracle, self).__init__(configuration)
67
68 dsn = cx_Oracle.makedsn(
69 self.configuration["host"],
70 self.configuration["port"],
71 service_name=self.configuration["servicename"],
72 )
73
74 self.connection_string = "{}/{}@{}".format(
75 self.configuration["user"], self.configuration["password"], dsn
76 )
77
78 def _get_tables(self, schema):
79 query = """
80 SELECT
81 all_tab_cols.OWNER,
82 all_tab_cols.TABLE_NAME,
83 all_tab_cols.COLUMN_NAME
84 FROM all_tab_cols
85 WHERE all_tab_cols.OWNER NOT IN('SYS','SYSTEM','ORDSYS','CTXSYS','WMSYS','MDSYS','ORDDATA','XDB','OUTLN','DMSYS','DSSYS','EXFSYS','LBACSYS','TSMSYS')
86 """
87
88 results, error = self.run_query(query, None)
89
90 if error is not None:
91 raise Exception("Failed getting schema.")
92
93 results = json_loads(results)
94
95 for row in results["rows"]:
96 if row["OWNER"] != None:
97 table_name = "{}.{}".format(row["OWNER"], row["TABLE_NAME"])
98 else:
99 table_name = row["TABLE_NAME"]
100
101 if table_name not in schema:
102 schema[table_name] = {"name": table_name, "columns": []}
103
104 schema[table_name]["columns"].append(row["COLUMN_NAME"])
105
106 return list(schema.values())
107
108 @classmethod
109 def _convert_number(cls, value):
110 try:
111 return int(value)
112 except:
113 return value
114
115 @classmethod
116 def output_handler(cls, cursor, name, default_type, length, precision, scale):
117 if default_type in (cx_Oracle.CLOB, cx_Oracle.LOB):
118 return cursor.var(cx_Oracle.LONG_STRING, 80000, cursor.arraysize)
119
120 if default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
121 return cursor.var(str, length, cursor.arraysize)
122
123 if default_type == cx_Oracle.NUMBER:
124 if scale <= 0:
125 return cursor.var(
126 cx_Oracle.STRING,
127 255,
128 outconverter=Oracle._convert_number,
129 arraysize=cursor.arraysize,
130 )
131
132 def run_query(self, query, user):
133 connection = cx_Oracle.connect(self.connection_string)
134 connection.outputtypehandler = Oracle.output_handler
135
136 cursor = connection.cursor()
137
138 try:
139 cursor.execute(query)
140 rows_count = cursor.rowcount
141 if cursor.description is not None:
142 columns = self.fetch_columns(
143 [
144 (i[0], Oracle.get_col_type(i[1], i[5]))
145 for i in cursor.description
146 ]
147 )
148 rows = [
149 dict(zip((column["name"] for column in columns), row))
150 for row in cursor
151 ]
152 data = {"columns": columns, "rows": rows}
153 error = None
154 json_data = json_dumps(data)
155 else:
156 columns = [{"name": "Row(s) Affected", "type": "TYPE_INTEGER"}]
157 rows = [{"Row(s) Affected": rows_count}]
158 data = {"columns": columns, "rows": rows}
159 json_data = json_dumps(data)
160 connection.commit()
161 except cx_Oracle.DatabaseError as err:
162 error = "Query failed. {}.".format(str(err))
163 json_data = None
164 except KeyboardInterrupt:
165 connection.cancel()
166 error = "Query cancelled by user."
167 json_data = None
168 finally:
169 connection.close()
170
171 return json_data, error
172
173
174 register(Oracle)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/query_runner/oracle.py b/redash/query_runner/oracle.py
--- a/redash/query_runner/oracle.py
+++ b/redash/query_runner/oracle.py
@@ -35,7 +35,11 @@
@classmethod
def get_col_type(cls, col_type, scale):
if col_type == cx_Oracle.NUMBER:
- return TYPE_FLOAT if scale > 0 else TYPE_INTEGER
+ if scale is None:
+ return TYPE_INTEGER
+ if scale > 0:
+ return TYPE_FLOAT
+ return TYPE_INTEGER
else:
return TYPES_MAP.get(col_type, None)
|
{"golden_diff": "diff --git a/redash/query_runner/oracle.py b/redash/query_runner/oracle.py\n--- a/redash/query_runner/oracle.py\n+++ b/redash/query_runner/oracle.py\n@@ -35,7 +35,11 @@\n @classmethod\n def get_col_type(cls, col_type, scale):\n if col_type == cx_Oracle.NUMBER:\n- return TYPE_FLOAT if scale > 0 else TYPE_INTEGER\n+ if scale is None:\n+ return TYPE_INTEGER\n+ if scale > 0:\n+ return TYPE_FLOAT\n+ return TYPE_INTEGER\n else:\n return TYPES_MAP.get(col_type, None)\n", "issue": "error running query : ** '>' is not supported between instance of NoneType and 'int'\nIssue Summary:\r\nDatabase = Oracle 12c\r\n\r\n`select count(*) from table `\r\n\r\nthrowing the following error\r\n\r\n`error running query : ** '>' is not supported between instance of NoneType and 'int'`\r\n\r\nRedash v9.0.0-alpha(dev)\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom redash.utils import json_dumps, json_loads\nfrom redash.query_runner import *\n\ntry:\n import cx_Oracle\n\n TYPES_MAP = {\n cx_Oracle.DATETIME: TYPE_DATETIME,\n cx_Oracle.CLOB: TYPE_STRING,\n cx_Oracle.LOB: TYPE_STRING,\n cx_Oracle.FIXED_CHAR: TYPE_STRING,\n cx_Oracle.FIXED_NCHAR: TYPE_STRING,\n cx_Oracle.INTERVAL: TYPE_DATETIME,\n cx_Oracle.LONG_STRING: TYPE_STRING,\n cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,\n cx_Oracle.NCHAR: TYPE_STRING,\n cx_Oracle.NUMBER: TYPE_FLOAT,\n cx_Oracle.ROWID: TYPE_INTEGER,\n cx_Oracle.STRING: TYPE_STRING,\n cx_Oracle.TIMESTAMP: TYPE_DATETIME,\n }\n\n ENABLED = True\nexcept ImportError:\n ENABLED = False\n\nlogger = logging.getLogger(__name__)\n\n\nclass Oracle(BaseSQLQueryRunner):\n noop_query = \"SELECT 1 FROM dual\"\n\n @classmethod\n def get_col_type(cls, col_type, scale):\n if col_type == cx_Oracle.NUMBER:\n return TYPE_FLOAT if scale > 0 else TYPE_INTEGER\n else:\n return TYPES_MAP.get(col_type, None)\n\n @classmethod\n def enabled(cls):\n return ENABLED\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"number\"},\n \"servicename\": {\"type\": \"string\", \"title\": \"DSN Service Name\"},\n },\n \"required\": [\"servicename\", \"user\", \"password\", \"host\", \"port\"],\n \"secret\": [\"password\"],\n }\n\n @classmethod\n def type(cls):\n return \"oracle\"\n\n def __init__(self, configuration):\n super(Oracle, self).__init__(configuration)\n\n dsn = cx_Oracle.makedsn(\n self.configuration[\"host\"],\n self.configuration[\"port\"],\n service_name=self.configuration[\"servicename\"],\n )\n\n self.connection_string = \"{}/{}@{}\".format(\n self.configuration[\"user\"], self.configuration[\"password\"], dsn\n )\n\n def _get_tables(self, schema):\n query = \"\"\"\n SELECT\n all_tab_cols.OWNER,\n all_tab_cols.TABLE_NAME,\n all_tab_cols.COLUMN_NAME\n FROM all_tab_cols\n WHERE all_tab_cols.OWNER NOT IN('SYS','SYSTEM','ORDSYS','CTXSYS','WMSYS','MDSYS','ORDDATA','XDB','OUTLN','DMSYS','DSSYS','EXFSYS','LBACSYS','TSMSYS')\n \"\"\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n if row[\"OWNER\"] != None:\n table_name = \"{}.{}\".format(row[\"OWNER\"], row[\"TABLE_NAME\"])\n else:\n table_name = row[\"TABLE_NAME\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n schema[table_name][\"columns\"].append(row[\"COLUMN_NAME\"])\n\n return list(schema.values())\n\n @classmethod\n def _convert_number(cls, value):\n try:\n return int(value)\n except:\n return value\n\n @classmethod\n def output_handler(cls, cursor, name, default_type, length, precision, scale):\n if default_type in (cx_Oracle.CLOB, cx_Oracle.LOB):\n return cursor.var(cx_Oracle.LONG_STRING, 80000, cursor.arraysize)\n\n if default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):\n return cursor.var(str, length, cursor.arraysize)\n\n if default_type == cx_Oracle.NUMBER:\n if scale <= 0:\n return cursor.var(\n cx_Oracle.STRING,\n 255,\n outconverter=Oracle._convert_number,\n arraysize=cursor.arraysize,\n )\n\n def run_query(self, query, user):\n connection = cx_Oracle.connect(self.connection_string)\n connection.outputtypehandler = Oracle.output_handler\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n rows_count = cursor.rowcount\n if cursor.description is not None:\n columns = self.fetch_columns(\n [\n (i[0], Oracle.get_col_type(i[1], i[5]))\n for i in cursor.description\n ]\n )\n rows = [\n dict(zip((column[\"name\"] for column in columns), row))\n for row in cursor\n ]\n data = {\"columns\": columns, \"rows\": rows}\n error = None\n json_data = json_dumps(data)\n else:\n columns = [{\"name\": \"Row(s) Affected\", \"type\": \"TYPE_INTEGER\"}]\n rows = [{\"Row(s) Affected\": rows_count}]\n data = {\"columns\": columns, \"rows\": rows}\n json_data = json_dumps(data)\n connection.commit()\n except cx_Oracle.DatabaseError as err:\n error = \"Query failed. {}.\".format(str(err))\n json_data = None\n except KeyboardInterrupt:\n connection.cancel()\n error = \"Query cancelled by user.\"\n json_data = None\n finally:\n connection.close()\n\n return json_data, error\n\n\nregister(Oracle)\n", "path": "redash/query_runner/oracle.py"}], "after_files": [{"content": "import logging\n\nfrom redash.utils import json_dumps, json_loads\nfrom redash.query_runner import *\n\ntry:\n import cx_Oracle\n\n TYPES_MAP = {\n cx_Oracle.DATETIME: TYPE_DATETIME,\n cx_Oracle.CLOB: TYPE_STRING,\n cx_Oracle.LOB: TYPE_STRING,\n cx_Oracle.FIXED_CHAR: TYPE_STRING,\n cx_Oracle.FIXED_NCHAR: TYPE_STRING,\n cx_Oracle.INTERVAL: TYPE_DATETIME,\n cx_Oracle.LONG_STRING: TYPE_STRING,\n cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,\n cx_Oracle.NCHAR: TYPE_STRING,\n cx_Oracle.NUMBER: TYPE_FLOAT,\n cx_Oracle.ROWID: TYPE_INTEGER,\n cx_Oracle.STRING: TYPE_STRING,\n cx_Oracle.TIMESTAMP: TYPE_DATETIME,\n }\n\n ENABLED = True\nexcept ImportError:\n ENABLED = False\n\nlogger = logging.getLogger(__name__)\n\n\nclass Oracle(BaseSQLQueryRunner):\n noop_query = \"SELECT 1 FROM dual\"\n\n @classmethod\n def get_col_type(cls, col_type, scale):\n if col_type == cx_Oracle.NUMBER:\n if scale is None:\n return TYPE_INTEGER\n if scale > 0:\n return TYPE_FLOAT\n return TYPE_INTEGER\n else:\n return TYPES_MAP.get(col_type, None)\n\n @classmethod\n def enabled(cls):\n return ENABLED\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"host\": {\"type\": \"string\"},\n \"port\": {\"type\": \"number\"},\n \"servicename\": {\"type\": \"string\", \"title\": \"DSN Service Name\"},\n },\n \"required\": [\"servicename\", \"user\", \"password\", \"host\", \"port\"],\n \"secret\": [\"password\"],\n }\n\n @classmethod\n def type(cls):\n return \"oracle\"\n\n def __init__(self, configuration):\n super(Oracle, self).__init__(configuration)\n\n dsn = cx_Oracle.makedsn(\n self.configuration[\"host\"],\n self.configuration[\"port\"],\n service_name=self.configuration[\"servicename\"],\n )\n\n self.connection_string = \"{}/{}@{}\".format(\n self.configuration[\"user\"], self.configuration[\"password\"], dsn\n )\n\n def _get_tables(self, schema):\n query = \"\"\"\n SELECT\n all_tab_cols.OWNER,\n all_tab_cols.TABLE_NAME,\n all_tab_cols.COLUMN_NAME\n FROM all_tab_cols\n WHERE all_tab_cols.OWNER NOT IN('SYS','SYSTEM','ORDSYS','CTXSYS','WMSYS','MDSYS','ORDDATA','XDB','OUTLN','DMSYS','DSSYS','EXFSYS','LBACSYS','TSMSYS')\n \"\"\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n if row[\"OWNER\"] != None:\n table_name = \"{}.{}\".format(row[\"OWNER\"], row[\"TABLE_NAME\"])\n else:\n table_name = row[\"TABLE_NAME\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n schema[table_name][\"columns\"].append(row[\"COLUMN_NAME\"])\n\n return list(schema.values())\n\n @classmethod\n def _convert_number(cls, value):\n try:\n return int(value)\n except:\n return value\n\n @classmethod\n def output_handler(cls, cursor, name, default_type, length, precision, scale):\n if default_type in (cx_Oracle.CLOB, cx_Oracle.LOB):\n return cursor.var(cx_Oracle.LONG_STRING, 80000, cursor.arraysize)\n\n if default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):\n return cursor.var(str, length, cursor.arraysize)\n\n if default_type == cx_Oracle.NUMBER:\n if scale <= 0:\n return cursor.var(\n cx_Oracle.STRING,\n 255,\n outconverter=Oracle._convert_number,\n arraysize=cursor.arraysize,\n )\n\n def run_query(self, query, user):\n connection = cx_Oracle.connect(self.connection_string)\n connection.outputtypehandler = Oracle.output_handler\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n rows_count = cursor.rowcount\n if cursor.description is not None:\n columns = self.fetch_columns(\n [\n (i[0], Oracle.get_col_type(i[1], i[5]))\n for i in cursor.description\n ]\n )\n rows = [\n dict(zip((column[\"name\"] for column in columns), row))\n for row in cursor\n ]\n data = {\"columns\": columns, \"rows\": rows}\n error = None\n json_data = json_dumps(data)\n else:\n columns = [{\"name\": \"Row(s) Affected\", \"type\": \"TYPE_INTEGER\"}]\n rows = [{\"Row(s) Affected\": rows_count}]\n data = {\"columns\": columns, \"rows\": rows}\n json_data = json_dumps(data)\n connection.commit()\n except cx_Oracle.DatabaseError as err:\n error = \"Query failed. {}.\".format(str(err))\n json_data = None\n except KeyboardInterrupt:\n connection.cancel()\n error = \"Query cancelled by user.\"\n json_data = None\n finally:\n connection.close()\n\n return json_data, error\n\n\nregister(Oracle)\n", "path": "redash/query_runner/oracle.py"}]}
| 1,982 | 140 |
gh_patches_debug_41424
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-291
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Login screen should redirect somewhere appropriate if visited while already logged in
Rather confusing, visiting the login screen while already logged in presents the user with a login form again. It should redirect them to the dashboard, I'd suggest.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `runtests.py`
Content:
```
1 #!/usr/bin/env python
2 import sys
3 import os
4 import shutil
5
6 from django.conf import settings, global_settings
7 from django.core.management import execute_from_command_line
8
9 WAGTAIL_ROOT = os.path.dirname(__file__)
10 STATIC_ROOT = os.path.join(WAGTAIL_ROOT, 'test-static')
11 MEDIA_ROOT = os.path.join(WAGTAIL_ROOT, 'test-media')
12
13 if not settings.configured:
14
15 try:
16 import elasticutils
17 has_elasticsearch = True
18 except ImportError:
19 has_elasticsearch = False
20
21 WAGTAILSEARCH_BACKENDS = {
22 'default': {
23 'BACKEND': 'wagtail.wagtailsearch.backends.db.DBSearch',
24 }
25 }
26 if has_elasticsearch:
27 WAGTAILSEARCH_BACKENDS['elasticsearch'] = {
28 'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',
29 'TIMEOUT': 10,
30 'max_retries': 1,
31 }
32
33 settings.configure(
34 DATABASES={
35 'default': {
36 'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.postgresql_psycopg2'),
37 'NAME': 'wagtaildemo',
38 'USER': os.environ.get('DATABASE_USER', 'postgres'),
39 }
40 },
41 ROOT_URLCONF='wagtail.tests.urls',
42 STATIC_URL='/static/',
43 STATIC_ROOT=STATIC_ROOT,
44 MEDIA_ROOT=MEDIA_ROOT,
45 USE_TZ=True,
46 STATICFILES_FINDERS=(
47 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
48 'compressor.finders.CompressorFinder',
49 ),
50 TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
51 'django.core.context_processors.request',
52 ),
53 MIDDLEWARE_CLASSES=(
54 'django.middleware.common.CommonMiddleware',
55 'django.contrib.sessions.middleware.SessionMiddleware',
56 'django.middleware.csrf.CsrfViewMiddleware',
57 'django.contrib.auth.middleware.AuthenticationMiddleware',
58 'django.contrib.messages.middleware.MessageMiddleware',
59 'django.middleware.clickjacking.XFrameOptionsMiddleware',
60
61 'wagtail.wagtailcore.middleware.SiteMiddleware',
62
63 'wagtail.wagtailredirects.middleware.RedirectMiddleware',
64 ),
65 INSTALLED_APPS=[
66 'django.contrib.contenttypes',
67 'django.contrib.sessions',
68 'django.contrib.auth',
69 'django.contrib.messages',
70 'django.contrib.staticfiles',
71 'django.contrib.admin',
72
73 'taggit',
74 'south',
75 'compressor',
76
77 'wagtail.wagtailcore',
78 'wagtail.wagtailadmin',
79 'wagtail.wagtaildocs',
80 'wagtail.wagtailsnippets',
81 'wagtail.wagtailusers',
82 'wagtail.wagtailimages',
83 'wagtail.wagtailembeds',
84 'wagtail.wagtailsearch',
85 'wagtail.wagtailredirects',
86 'wagtail.wagtailforms',
87 'wagtail.tests',
88 ],
89
90 # Using DatabaseCache to make sure that the cache is cleared between tests.
91 # This prevents false-positives in some wagtail core tests where we are
92 # changing the 'wagtail_root_paths' key which may cause future tests to fail.
93 CACHES = {
94 'default': {
95 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
96 'LOCATION': 'cache',
97 }
98 },
99 PASSWORD_HASHERS=(
100 'django.contrib.auth.hashers.MD5PasswordHasher', # don't use the intentionally slow default password hasher
101 ),
102 COMPRESS_ENABLED=False, # disable compression so that we can run tests on the content of the compress tag
103 WAGTAILSEARCH_BACKENDS=WAGTAILSEARCH_BACKENDS,
104 WAGTAIL_SITE_NAME='Test Site',
105 LOGIN_REDIRECT_URL='wagtailadmin_home',
106 )
107
108
109 def runtests():
110 argv = sys.argv[:1] + ['test'] + sys.argv[1:]
111 try:
112 execute_from_command_line(argv)
113 finally:
114 shutil.rmtree(STATIC_ROOT, ignore_errors=True)
115 shutil.rmtree(MEDIA_ROOT, ignore_errors=True)
116
117
118 if __name__ == '__main__':
119 runtests()
120
```
Path: `wagtail/wagtailadmin/views/account.py`
Content:
```
1 from django.conf import settings
2 from django.shortcuts import render, redirect
3 from django.contrib import messages
4 from django.contrib.auth.forms import SetPasswordForm
5 from django.contrib.auth.decorators import permission_required
6 from django.contrib.auth.views import logout as auth_logout
7 from django.utils.translation import ugettext as _
8
9 @permission_required('wagtailadmin.access_admin')
10 def account(request):
11 return render(request, 'wagtailadmin/account/account.html', {
12 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),
13 })
14
15
16 @permission_required('wagtailadmin.access_admin')
17 def change_password(request):
18 can_change_password = request.user.has_usable_password()
19
20 if can_change_password:
21 if request.POST:
22 form = SetPasswordForm(request.user, request.POST)
23
24 if form.is_valid():
25 form.save()
26
27 messages.success(request, _("Your password has been changed successfully!"))
28 return redirect('wagtailadmin_account')
29 else:
30 form = SetPasswordForm(request.user)
31 else:
32 form = None
33
34 return render(request, 'wagtailadmin/account/change_password.html', {
35 'form': form,
36 'can_change_password': can_change_password,
37 })
38
39
40 def logout(request):
41 response = auth_logout(request, next_page = 'wagtailadmin_login')
42
43 # By default, logging out will generate a fresh sessionid cookie. We want to use the
44 # absence of sessionid as an indication that front-end pages are being viewed by a
45 # non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.
46 response.delete_cookie(settings.SESSION_COOKIE_NAME,
47 domain=settings.SESSION_COOKIE_DOMAIN,
48 path=settings.SESSION_COOKIE_PATH)
49
50 # HACK: pretend that the session hasn't been modified, so that SessionMiddleware
51 # won't override the above and write a new cookie.
52 request.session.modified = False
53
54 return response
55
```
Path: `wagtail/wagtailadmin/urls.py`
Content:
```
1 from django.conf.urls import url
2 from django.conf import settings
3
4 from wagtail.wagtailadmin.forms import LoginForm, PasswordResetForm
5 from wagtail.wagtailadmin.views import account, chooser, home, pages, tags, userbar
6 from wagtail.wagtailadmin import hooks
7
8 urlpatterns = [
9 url(
10 r'^login/$', 'django.contrib.auth.views.login', {
11 'template_name': 'wagtailadmin/login.html',
12 'authentication_form': LoginForm,
13 'extra_context': {'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True)},
14 }, name='wagtailadmin_login'
15 ),
16
17 # Password reset
18 url(
19 r'^password_reset/$', 'django.contrib.auth.views.password_reset', {
20 'template_name': 'wagtailadmin/account/password_reset/form.html',
21 'email_template_name': 'wagtailadmin/account/password_reset/email.txt',
22 'subject_template_name': 'wagtailadmin/account/password_reset/email_subject.txt',
23 'password_reset_form': PasswordResetForm,
24 }, name='password_reset'
25 ),
26 url(
27 r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', {
28 'template_name': 'wagtailadmin/account/password_reset/done.html'
29 }, name='password_reset_done'
30 ),
31 url(
32 r'^password_reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
33 'django.contrib.auth.views.password_reset_confirm',
34 {'template_name': 'wagtailadmin/account/password_reset/confirm.html'},
35 name='password_reset_confirm',
36 ),
37 url(
38 r'^password_reset/complete/$', 'django.contrib.auth.views.password_reset_complete',
39 {'template_name': 'wagtailadmin/account/password_reset/complete.html'},
40 name='password_reset_complete'
41 ),
42 ]
43
44 urlpatterns += [
45 url(r'^$', home.home, name='wagtailadmin_home'),
46
47 url(r'^failwhale/$', home.error_test, name='wagtailadmin_error_test'),
48
49 url(r'^pages/$', pages.index, name='wagtailadmin_explore_root'),
50 url(r'^pages/(\d+)/$', pages.index, name='wagtailadmin_explore'),
51
52 url(r'^pages/new/(\w+)/(\w+)/(\d+)/$', pages.create, name='wagtailadmin_pages_create'),
53 url(r'^pages/new/(\w+)/(\w+)/(\d+)/preview/$', pages.preview_on_create, name='wagtailadmin_pages_preview_on_create'),
54 url(r'^pages/usage/(\w+)/(\w+)/$', pages.content_type_use, name='wagtailadmin_pages_type_use'),
55
56 url(r'^pages/(\d+)/edit/$', pages.edit, name='wagtailadmin_pages_edit'),
57 url(r'^pages/(\d+)/edit/preview/$', pages.preview_on_edit, name='wagtailadmin_pages_preview_on_edit'),
58
59 url(r'^pages/preview_placeholder/$', pages.preview_placeholder, name='wagtailadmin_pages_preview_placeholder'),
60
61 url(r'^pages/(\d+)/view_draft/$', pages.view_draft, name='wagtailadmin_pages_view_draft'),
62 url(r'^pages/(\d+)/add_subpage/$', pages.add_subpage, name='wagtailadmin_pages_add_subpage'),
63 url(r'^pages/(\d+)/delete/$', pages.delete, name='wagtailadmin_pages_delete'),
64 url(r'^pages/(\d+)/unpublish/$', pages.unpublish, name='wagtailadmin_pages_unpublish'),
65
66 url(r'^pages/search/$', pages.search, name='wagtailadmin_pages_search'),
67
68 url(r'^pages/(\d+)/move/$', pages.move_choose_destination, name='wagtailadmin_pages_move'),
69 url(r'^pages/(\d+)/move/(\d+)/$', pages.move_choose_destination, name='wagtailadmin_pages_move_choose_destination'),
70 url(r'^pages/(\d+)/move/(\d+)/confirm/$', pages.move_confirm, name='wagtailadmin_pages_move_confirm'),
71 url(r'^pages/(\d+)/set_position/$', pages.set_page_position, name='wagtailadmin_pages_set_page_position'),
72
73 url(r'^pages/moderation/(\d+)/approve/$', pages.approve_moderation, name='wagtailadmin_pages_approve_moderation'),
74 url(r'^pages/moderation/(\d+)/reject/$', pages.reject_moderation, name='wagtailadmin_pages_reject_moderation'),
75 url(r'^pages/moderation/(\d+)/preview/$', pages.preview_for_moderation, name='wagtailadmin_pages_preview_for_moderation'),
76
77 url(r'^choose-page/$', chooser.browse, name='wagtailadmin_choose_page'),
78 url(r'^choose-page/(\d+)/$', chooser.browse, name='wagtailadmin_choose_page_child'),
79 url(r'^choose-external-link/$', chooser.external_link, name='wagtailadmin_choose_page_external_link'),
80 url(r'^choose-email-link/$', chooser.email_link, name='wagtailadmin_choose_page_email_link'),
81
82 url(r'^tag-autocomplete/$', tags.autocomplete, name='wagtailadmin_tag_autocomplete'),
83
84 url(r'^account/$', account.account, name='wagtailadmin_account'),
85 url(r'^account/change_password/$', account.change_password, name='wagtailadmin_account_change_password'),
86 url(r'^logout/$', account.logout, name='wagtailadmin_logout'),
87
88 url(r'^userbar/(\d+)/$', userbar.for_frontend, name='wagtailadmin_userbar_frontend'),
89 url(r'^userbar/moderation/(\d+)/$', userbar.for_moderation, name='wagtailadmin_userbar_moderation'),
90 ]
91
92
93 # Import additional urlpatterns from any apps that define a register_admin_urls hook
94 for fn in hooks.get_hooks('register_admin_urls'):
95 urls = fn()
96 if urls:
97 urlpatterns += urls
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/runtests.py b/runtests.py
--- a/runtests.py
+++ b/runtests.py
@@ -103,6 +103,7 @@
WAGTAILSEARCH_BACKENDS=WAGTAILSEARCH_BACKENDS,
WAGTAIL_SITE_NAME='Test Site',
LOGIN_REDIRECT_URL='wagtailadmin_home',
+ LOGIN_URL='wagtailadmin_login',
)
diff --git a/wagtail/wagtailadmin/urls.py b/wagtail/wagtailadmin/urls.py
--- a/wagtail/wagtailadmin/urls.py
+++ b/wagtail/wagtailadmin/urls.py
@@ -5,15 +5,8 @@
from wagtail.wagtailadmin.views import account, chooser, home, pages, tags, userbar
from wagtail.wagtailadmin import hooks
-urlpatterns = [
- url(
- r'^login/$', 'django.contrib.auth.views.login', {
- 'template_name': 'wagtailadmin/login.html',
- 'authentication_form': LoginForm,
- 'extra_context': {'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True)},
- }, name='wagtailadmin_login'
- ),
+urlpatterns = [
# Password reset
url(
r'^password_reset/$', 'django.contrib.auth.views.password_reset', {
@@ -81,6 +74,7 @@
url(r'^tag-autocomplete/$', tags.autocomplete, name='wagtailadmin_tag_autocomplete'),
+ url(r'^login/$', account.login, name='wagtailadmin_login'),
url(r'^account/$', account.account, name='wagtailadmin_account'),
url(r'^account/change_password/$', account.change_password, name='wagtailadmin_account_change_password'),
url(r'^logout/$', account.logout, name='wagtailadmin_logout'),
@@ -90,6 +84,13 @@
]
+# This is here to make sure that 'django.contrib.auth.views.login' is reversed correctly
+# It must be placed after 'wagtailadmin_login' to prevent this from being used
+urlpatterns += [
+ url(r'^login/$', 'django.contrib.auth.views.login'),
+]
+
+
# Import additional urlpatterns from any apps that define a register_admin_urls hook
for fn in hooks.get_hooks('register_admin_urls'):
urls = fn()
diff --git a/wagtail/wagtailadmin/views/account.py b/wagtail/wagtailadmin/views/account.py
--- a/wagtail/wagtailadmin/views/account.py
+++ b/wagtail/wagtailadmin/views/account.py
@@ -3,8 +3,13 @@
from django.contrib import messages
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.decorators import permission_required
-from django.contrib.auth.views import logout as auth_logout
+from django.contrib.auth.views import logout as auth_logout, login as auth_login
from django.utils.translation import ugettext as _
+from django.views.decorators.debug import sensitive_post_parameters
+from django.views.decorators.cache import never_cache
+
+from wagtail.wagtailadmin import forms
+
@permission_required('wagtailadmin.access_admin')
def account(request):
@@ -37,6 +42,21 @@
})
+@sensitive_post_parameters()
+@never_cache
+def login(request):
+ if request.user.is_authenticated():
+ return redirect('wagtailadmin_home')
+ else:
+ return auth_login(request,
+ template_name='wagtailadmin/login.html',
+ authentication_form=forms.LoginForm,
+ extra_context={
+ 'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True),
+ },
+ )
+
+
def logout(request):
response = auth_logout(request, next_page = 'wagtailadmin_login')
|
{"golden_diff": "diff --git a/runtests.py b/runtests.py\n--- a/runtests.py\n+++ b/runtests.py\n@@ -103,6 +103,7 @@\n WAGTAILSEARCH_BACKENDS=WAGTAILSEARCH_BACKENDS,\n WAGTAIL_SITE_NAME='Test Site',\n LOGIN_REDIRECT_URL='wagtailadmin_home',\n+ LOGIN_URL='wagtailadmin_login',\n )\n \n \ndiff --git a/wagtail/wagtailadmin/urls.py b/wagtail/wagtailadmin/urls.py\n--- a/wagtail/wagtailadmin/urls.py\n+++ b/wagtail/wagtailadmin/urls.py\n@@ -5,15 +5,8 @@\n from wagtail.wagtailadmin.views import account, chooser, home, pages, tags, userbar\n from wagtail.wagtailadmin import hooks\n \n-urlpatterns = [\n- url(\n- r'^login/$', 'django.contrib.auth.views.login', {\n- 'template_name': 'wagtailadmin/login.html',\n- 'authentication_form': LoginForm,\n- 'extra_context': {'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True)},\n- }, name='wagtailadmin_login'\n- ),\n \n+urlpatterns = [\n # Password reset\n url(\n r'^password_reset/$', 'django.contrib.auth.views.password_reset', {\n@@ -81,6 +74,7 @@\n \n url(r'^tag-autocomplete/$', tags.autocomplete, name='wagtailadmin_tag_autocomplete'),\n \n+ url(r'^login/$', account.login, name='wagtailadmin_login'),\n url(r'^account/$', account.account, name='wagtailadmin_account'),\n url(r'^account/change_password/$', account.change_password, name='wagtailadmin_account_change_password'),\n url(r'^logout/$', account.logout, name='wagtailadmin_logout'),\n@@ -90,6 +84,13 @@\n ]\n \n \n+# This is here to make sure that 'django.contrib.auth.views.login' is reversed correctly\n+# It must be placed after 'wagtailadmin_login' to prevent this from being used\n+urlpatterns += [\n+ url(r'^login/$', 'django.contrib.auth.views.login'),\n+]\n+\n+\n # Import additional urlpatterns from any apps that define a register_admin_urls hook\n for fn in hooks.get_hooks('register_admin_urls'):\n urls = fn()\ndiff --git a/wagtail/wagtailadmin/views/account.py b/wagtail/wagtailadmin/views/account.py\n--- a/wagtail/wagtailadmin/views/account.py\n+++ b/wagtail/wagtailadmin/views/account.py\n@@ -3,8 +3,13 @@\n from django.contrib import messages\n from django.contrib.auth.forms import SetPasswordForm\n from django.contrib.auth.decorators import permission_required\n-from django.contrib.auth.views import logout as auth_logout\n+from django.contrib.auth.views import logout as auth_logout, login as auth_login\n from django.utils.translation import ugettext as _ \n+from django.views.decorators.debug import sensitive_post_parameters\n+from django.views.decorators.cache import never_cache\n+\n+from wagtail.wagtailadmin import forms\n+\n \n @permission_required('wagtailadmin.access_admin')\n def account(request):\n@@ -37,6 +42,21 @@\n })\n \n \n+@sensitive_post_parameters()\n+@never_cache\n+def login(request):\n+ if request.user.is_authenticated():\n+ return redirect('wagtailadmin_home')\n+ else:\n+ return auth_login(request,\n+ template_name='wagtailadmin/login.html',\n+ authentication_form=forms.LoginForm,\n+ extra_context={\n+ 'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True),\n+ },\n+ )\n+\n+\n def logout(request):\n response = auth_logout(request, next_page = 'wagtailadmin_login')\n", "issue": "Login screen should redirect somewhere appropriate if visited while already logged in\nRather confusing, visiting the login screen while already logged in presents the user with a login form again. It should redirect them to the dashboard, I'd suggest.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport sys\nimport os\nimport shutil\n\nfrom django.conf import settings, global_settings\nfrom django.core.management import execute_from_command_line\n\nWAGTAIL_ROOT = os.path.dirname(__file__)\nSTATIC_ROOT = os.path.join(WAGTAIL_ROOT, 'test-static')\nMEDIA_ROOT = os.path.join(WAGTAIL_ROOT, 'test-media')\n\nif not settings.configured:\n\n try:\n import elasticutils\n has_elasticsearch = True\n except ImportError:\n has_elasticsearch = False\n\n WAGTAILSEARCH_BACKENDS = {\n 'default': {\n 'BACKEND': 'wagtail.wagtailsearch.backends.db.DBSearch',\n }\n }\n if has_elasticsearch:\n WAGTAILSEARCH_BACKENDS['elasticsearch'] = {\n 'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',\n 'TIMEOUT': 10,\n 'max_retries': 1,\n }\n\n settings.configure(\n DATABASES={\n 'default': {\n 'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.postgresql_psycopg2'),\n 'NAME': 'wagtaildemo',\n 'USER': os.environ.get('DATABASE_USER', 'postgres'),\n }\n },\n ROOT_URLCONF='wagtail.tests.urls',\n STATIC_URL='/static/',\n STATIC_ROOT=STATIC_ROOT,\n MEDIA_ROOT=MEDIA_ROOT,\n USE_TZ=True,\n STATICFILES_FINDERS=(\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n ),\n TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS + (\n 'django.core.context_processors.request',\n ),\n MIDDLEWARE_CLASSES=(\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n ),\n INSTALLED_APPS=[\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n\n 'taggit',\n 'south',\n 'compressor',\n\n 'wagtail.wagtailcore',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailforms',\n 'wagtail.tests',\n ],\n\n # Using DatabaseCache to make sure that the cache is cleared between tests.\n # This prevents false-positives in some wagtail core tests where we are\n # changing the 'wagtail_root_paths' key which may cause future tests to fail.\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',\n 'LOCATION': 'cache',\n }\n },\n PASSWORD_HASHERS=(\n 'django.contrib.auth.hashers.MD5PasswordHasher', # don't use the intentionally slow default password hasher\n ),\n COMPRESS_ENABLED=False, # disable compression so that we can run tests on the content of the compress tag\n WAGTAILSEARCH_BACKENDS=WAGTAILSEARCH_BACKENDS,\n WAGTAIL_SITE_NAME='Test Site',\n LOGIN_REDIRECT_URL='wagtailadmin_home',\n )\n\n\ndef runtests():\n argv = sys.argv[:1] + ['test'] + sys.argv[1:]\n try:\n execute_from_command_line(argv)\n finally:\n shutil.rmtree(STATIC_ROOT, ignore_errors=True)\n shutil.rmtree(MEDIA_ROOT, ignore_errors=True)\n\n\nif __name__ == '__main__':\n runtests()\n", "path": "runtests.py"}, {"content": "from django.conf import settings\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.auth.views import logout as auth_logout\nfrom django.utils.translation import ugettext as _ \n\n@permission_required('wagtailadmin.access_admin')\ndef account(request):\n return render(request, 'wagtailadmin/account/account.html', {\n 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef change_password(request):\n can_change_password = request.user.has_usable_password()\n\n if can_change_password:\n if request.POST:\n form = SetPasswordForm(request.user, request.POST)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Your password has been changed successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = SetPasswordForm(request.user)\n else:\n form = None\n\n return render(request, 'wagtailadmin/account/change_password.html', {\n 'form': form,\n 'can_change_password': can_change_password,\n })\n\n\ndef logout(request):\n response = auth_logout(request, next_page = 'wagtailadmin_login')\n\n # By default, logging out will generate a fresh sessionid cookie. We want to use the\n # absence of sessionid as an indication that front-end pages are being viewed by a\n # non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.\n response.delete_cookie(settings.SESSION_COOKIE_NAME,\n domain=settings.SESSION_COOKIE_DOMAIN,\n path=settings.SESSION_COOKIE_PATH)\n\n # HACK: pretend that the session hasn't been modified, so that SessionMiddleware\n # won't override the above and write a new cookie.\n request.session.modified = False\n\n return response\n", "path": "wagtail/wagtailadmin/views/account.py"}, {"content": "from django.conf.urls import url\nfrom django.conf import settings\n\nfrom wagtail.wagtailadmin.forms import LoginForm, PasswordResetForm\nfrom wagtail.wagtailadmin.views import account, chooser, home, pages, tags, userbar\nfrom wagtail.wagtailadmin import hooks\n\nurlpatterns = [\n url(\n r'^login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'wagtailadmin/login.html',\n 'authentication_form': LoginForm,\n 'extra_context': {'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True)},\n }, name='wagtailadmin_login'\n ),\n\n # Password reset\n url(\n r'^password_reset/$', 'django.contrib.auth.views.password_reset', {\n 'template_name': 'wagtailadmin/account/password_reset/form.html',\n 'email_template_name': 'wagtailadmin/account/password_reset/email.txt',\n 'subject_template_name': 'wagtailadmin/account/password_reset/email_subject.txt',\n 'password_reset_form': PasswordResetForm,\n }, name='password_reset'\n ),\n url(\n r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', {\n 'template_name': 'wagtailadmin/account/password_reset/done.html'\n }, name='password_reset_done'\n ),\n url(\n r'^password_reset/confirm/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',\n 'django.contrib.auth.views.password_reset_confirm',\n {'template_name': 'wagtailadmin/account/password_reset/confirm.html'},\n name='password_reset_confirm',\n ),\n url(\n r'^password_reset/complete/$', 'django.contrib.auth.views.password_reset_complete',\n {'template_name': 'wagtailadmin/account/password_reset/complete.html'},\n name='password_reset_complete'\n ),\n]\n\nurlpatterns += [\n url(r'^$', home.home, name='wagtailadmin_home'),\n\n url(r'^failwhale/$', home.error_test, name='wagtailadmin_error_test'),\n\n url(r'^pages/$', pages.index, name='wagtailadmin_explore_root'),\n url(r'^pages/(\\d+)/$', pages.index, name='wagtailadmin_explore'),\n\n url(r'^pages/new/(\\w+)/(\\w+)/(\\d+)/$', pages.create, name='wagtailadmin_pages_create'),\n url(r'^pages/new/(\\w+)/(\\w+)/(\\d+)/preview/$', pages.preview_on_create, name='wagtailadmin_pages_preview_on_create'),\n url(r'^pages/usage/(\\w+)/(\\w+)/$', pages.content_type_use, name='wagtailadmin_pages_type_use'),\n\n url(r'^pages/(\\d+)/edit/$', pages.edit, name='wagtailadmin_pages_edit'),\n url(r'^pages/(\\d+)/edit/preview/$', pages.preview_on_edit, name='wagtailadmin_pages_preview_on_edit'),\n\n url(r'^pages/preview_placeholder/$', pages.preview_placeholder, name='wagtailadmin_pages_preview_placeholder'),\n\n url(r'^pages/(\\d+)/view_draft/$', pages.view_draft, name='wagtailadmin_pages_view_draft'),\n url(r'^pages/(\\d+)/add_subpage/$', pages.add_subpage, name='wagtailadmin_pages_add_subpage'),\n url(r'^pages/(\\d+)/delete/$', pages.delete, name='wagtailadmin_pages_delete'),\n url(r'^pages/(\\d+)/unpublish/$', pages.unpublish, name='wagtailadmin_pages_unpublish'),\n\n url(r'^pages/search/$', pages.search, name='wagtailadmin_pages_search'),\n\n url(r'^pages/(\\d+)/move/$', pages.move_choose_destination, name='wagtailadmin_pages_move'),\n url(r'^pages/(\\d+)/move/(\\d+)/$', pages.move_choose_destination, name='wagtailadmin_pages_move_choose_destination'),\n url(r'^pages/(\\d+)/move/(\\d+)/confirm/$', pages.move_confirm, name='wagtailadmin_pages_move_confirm'),\n url(r'^pages/(\\d+)/set_position/$', pages.set_page_position, name='wagtailadmin_pages_set_page_position'),\n\n url(r'^pages/moderation/(\\d+)/approve/$', pages.approve_moderation, name='wagtailadmin_pages_approve_moderation'),\n url(r'^pages/moderation/(\\d+)/reject/$', pages.reject_moderation, name='wagtailadmin_pages_reject_moderation'),\n url(r'^pages/moderation/(\\d+)/preview/$', pages.preview_for_moderation, name='wagtailadmin_pages_preview_for_moderation'),\n\n url(r'^choose-page/$', chooser.browse, name='wagtailadmin_choose_page'),\n url(r'^choose-page/(\\d+)/$', chooser.browse, name='wagtailadmin_choose_page_child'),\n url(r'^choose-external-link/$', chooser.external_link, name='wagtailadmin_choose_page_external_link'),\n url(r'^choose-email-link/$', chooser.email_link, name='wagtailadmin_choose_page_email_link'),\n\n url(r'^tag-autocomplete/$', tags.autocomplete, name='wagtailadmin_tag_autocomplete'),\n\n url(r'^account/$', account.account, name='wagtailadmin_account'),\n url(r'^account/change_password/$', account.change_password, name='wagtailadmin_account_change_password'),\n url(r'^logout/$', account.logout, name='wagtailadmin_logout'),\n\n url(r'^userbar/(\\d+)/$', userbar.for_frontend, name='wagtailadmin_userbar_frontend'),\n url(r'^userbar/moderation/(\\d+)/$', userbar.for_moderation, name='wagtailadmin_userbar_moderation'),\n]\n\n\n# Import additional urlpatterns from any apps that define a register_admin_urls hook\nfor fn in hooks.get_hooks('register_admin_urls'):\n urls = fn()\n if urls:\n urlpatterns += urls\n", "path": "wagtail/wagtailadmin/urls.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport sys\nimport os\nimport shutil\n\nfrom django.conf import settings, global_settings\nfrom django.core.management import execute_from_command_line\n\nWAGTAIL_ROOT = os.path.dirname(__file__)\nSTATIC_ROOT = os.path.join(WAGTAIL_ROOT, 'test-static')\nMEDIA_ROOT = os.path.join(WAGTAIL_ROOT, 'test-media')\n\nif not settings.configured:\n\n try:\n import elasticutils\n has_elasticsearch = True\n except ImportError:\n has_elasticsearch = False\n\n WAGTAILSEARCH_BACKENDS = {\n 'default': {\n 'BACKEND': 'wagtail.wagtailsearch.backends.db.DBSearch',\n }\n }\n if has_elasticsearch:\n WAGTAILSEARCH_BACKENDS['elasticsearch'] = {\n 'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',\n 'TIMEOUT': 10,\n 'max_retries': 1,\n }\n\n settings.configure(\n DATABASES={\n 'default': {\n 'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.postgresql_psycopg2'),\n 'NAME': 'wagtaildemo',\n 'USER': os.environ.get('DATABASE_USER', 'postgres'),\n }\n },\n ROOT_URLCONF='wagtail.tests.urls',\n STATIC_URL='/static/',\n STATIC_ROOT=STATIC_ROOT,\n MEDIA_ROOT=MEDIA_ROOT,\n USE_TZ=True,\n STATICFILES_FINDERS=(\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n ),\n TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS + (\n 'django.core.context_processors.request',\n ),\n MIDDLEWARE_CLASSES=(\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n ),\n INSTALLED_APPS=[\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n\n 'taggit',\n 'south',\n 'compressor',\n\n 'wagtail.wagtailcore',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailforms',\n 'wagtail.tests',\n ],\n\n # Using DatabaseCache to make sure that the cache is cleared between tests.\n # This prevents false-positives in some wagtail core tests where we are\n # changing the 'wagtail_root_paths' key which may cause future tests to fail.\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',\n 'LOCATION': 'cache',\n }\n },\n PASSWORD_HASHERS=(\n 'django.contrib.auth.hashers.MD5PasswordHasher', # don't use the intentionally slow default password hasher\n ),\n COMPRESS_ENABLED=False, # disable compression so that we can run tests on the content of the compress tag\n WAGTAILSEARCH_BACKENDS=WAGTAILSEARCH_BACKENDS,\n WAGTAIL_SITE_NAME='Test Site',\n LOGIN_REDIRECT_URL='wagtailadmin_home',\n LOGIN_URL='wagtailadmin_login',\n )\n\n\ndef runtests():\n argv = sys.argv[:1] + ['test'] + sys.argv[1:]\n try:\n execute_from_command_line(argv)\n finally:\n shutil.rmtree(STATIC_ROOT, ignore_errors=True)\n shutil.rmtree(MEDIA_ROOT, ignore_errors=True)\n\n\nif __name__ == '__main__':\n runtests()\n", "path": "runtests.py"}, {"content": "from django.conf import settings\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.auth.views import logout as auth_logout, login as auth_login\nfrom django.utils.translation import ugettext as _ \nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.decorators.cache import never_cache\n\nfrom wagtail.wagtailadmin import forms\n\n\n@permission_required('wagtailadmin.access_admin')\ndef account(request):\n return render(request, 'wagtailadmin/account/account.html', {\n 'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),\n })\n\n\n@permission_required('wagtailadmin.access_admin')\ndef change_password(request):\n can_change_password = request.user.has_usable_password()\n\n if can_change_password:\n if request.POST:\n form = SetPasswordForm(request.user, request.POST)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Your password has been changed successfully!\"))\n return redirect('wagtailadmin_account')\n else:\n form = SetPasswordForm(request.user)\n else:\n form = None\n\n return render(request, 'wagtailadmin/account/change_password.html', {\n 'form': form,\n 'can_change_password': can_change_password,\n })\n\n\n@sensitive_post_parameters()\n@never_cache\ndef login(request):\n if request.user.is_authenticated():\n return redirect('wagtailadmin_home')\n else:\n return auth_login(request,\n template_name='wagtailadmin/login.html',\n authentication_form=forms.LoginForm,\n extra_context={\n 'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True),\n },\n )\n\n\ndef logout(request):\n response = auth_logout(request, next_page = 'wagtailadmin_login')\n\n # By default, logging out will generate a fresh sessionid cookie. We want to use the\n # absence of sessionid as an indication that front-end pages are being viewed by a\n # non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.\n response.delete_cookie(settings.SESSION_COOKIE_NAME,\n domain=settings.SESSION_COOKIE_DOMAIN,\n path=settings.SESSION_COOKIE_PATH)\n\n # HACK: pretend that the session hasn't been modified, so that SessionMiddleware\n # won't override the above and write a new cookie.\n request.session.modified = False\n\n return response\n", "path": "wagtail/wagtailadmin/views/account.py"}, {"content": "from django.conf.urls import url\nfrom django.conf import settings\n\nfrom wagtail.wagtailadmin.forms import LoginForm, PasswordResetForm\nfrom wagtail.wagtailadmin.views import account, chooser, home, pages, tags, userbar\nfrom wagtail.wagtailadmin import hooks\n\n\nurlpatterns = [\n # Password reset\n url(\n r'^password_reset/$', 'django.contrib.auth.views.password_reset', {\n 'template_name': 'wagtailadmin/account/password_reset/form.html',\n 'email_template_name': 'wagtailadmin/account/password_reset/email.txt',\n 'subject_template_name': 'wagtailadmin/account/password_reset/email_subject.txt',\n 'password_reset_form': PasswordResetForm,\n }, name='password_reset'\n ),\n url(\n r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', {\n 'template_name': 'wagtailadmin/account/password_reset/done.html'\n }, name='password_reset_done'\n ),\n url(\n r'^password_reset/confirm/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',\n 'django.contrib.auth.views.password_reset_confirm',\n {'template_name': 'wagtailadmin/account/password_reset/confirm.html'},\n name='password_reset_confirm',\n ),\n url(\n r'^password_reset/complete/$', 'django.contrib.auth.views.password_reset_complete',\n {'template_name': 'wagtailadmin/account/password_reset/complete.html'},\n name='password_reset_complete'\n ),\n]\n\nurlpatterns += [\n url(r'^$', home.home, name='wagtailadmin_home'),\n\n url(r'^failwhale/$', home.error_test, name='wagtailadmin_error_test'),\n\n url(r'^pages/$', pages.index, name='wagtailadmin_explore_root'),\n url(r'^pages/(\\d+)/$', pages.index, name='wagtailadmin_explore'),\n\n url(r'^pages/new/(\\w+)/(\\w+)/(\\d+)/$', pages.create, name='wagtailadmin_pages_create'),\n url(r'^pages/new/(\\w+)/(\\w+)/(\\d+)/preview/$', pages.preview_on_create, name='wagtailadmin_pages_preview_on_create'),\n url(r'^pages/usage/(\\w+)/(\\w+)/$', pages.content_type_use, name='wagtailadmin_pages_type_use'),\n\n url(r'^pages/(\\d+)/edit/$', pages.edit, name='wagtailadmin_pages_edit'),\n url(r'^pages/(\\d+)/edit/preview/$', pages.preview_on_edit, name='wagtailadmin_pages_preview_on_edit'),\n\n url(r'^pages/preview_placeholder/$', pages.preview_placeholder, name='wagtailadmin_pages_preview_placeholder'),\n\n url(r'^pages/(\\d+)/view_draft/$', pages.view_draft, name='wagtailadmin_pages_view_draft'),\n url(r'^pages/(\\d+)/add_subpage/$', pages.add_subpage, name='wagtailadmin_pages_add_subpage'),\n url(r'^pages/(\\d+)/delete/$', pages.delete, name='wagtailadmin_pages_delete'),\n url(r'^pages/(\\d+)/unpublish/$', pages.unpublish, name='wagtailadmin_pages_unpublish'),\n\n url(r'^pages/search/$', pages.search, name='wagtailadmin_pages_search'),\n\n url(r'^pages/(\\d+)/move/$', pages.move_choose_destination, name='wagtailadmin_pages_move'),\n url(r'^pages/(\\d+)/move/(\\d+)/$', pages.move_choose_destination, name='wagtailadmin_pages_move_choose_destination'),\n url(r'^pages/(\\d+)/move/(\\d+)/confirm/$', pages.move_confirm, name='wagtailadmin_pages_move_confirm'),\n url(r'^pages/(\\d+)/set_position/$', pages.set_page_position, name='wagtailadmin_pages_set_page_position'),\n\n url(r'^pages/moderation/(\\d+)/approve/$', pages.approve_moderation, name='wagtailadmin_pages_approve_moderation'),\n url(r'^pages/moderation/(\\d+)/reject/$', pages.reject_moderation, name='wagtailadmin_pages_reject_moderation'),\n url(r'^pages/moderation/(\\d+)/preview/$', pages.preview_for_moderation, name='wagtailadmin_pages_preview_for_moderation'),\n\n url(r'^choose-page/$', chooser.browse, name='wagtailadmin_choose_page'),\n url(r'^choose-page/(\\d+)/$', chooser.browse, name='wagtailadmin_choose_page_child'),\n url(r'^choose-external-link/$', chooser.external_link, name='wagtailadmin_choose_page_external_link'),\n url(r'^choose-email-link/$', chooser.email_link, name='wagtailadmin_choose_page_email_link'),\n\n url(r'^tag-autocomplete/$', tags.autocomplete, name='wagtailadmin_tag_autocomplete'),\n\n url(r'^login/$', account.login, name='wagtailadmin_login'),\n url(r'^account/$', account.account, name='wagtailadmin_account'),\n url(r'^account/change_password/$', account.change_password, name='wagtailadmin_account_change_password'),\n url(r'^logout/$', account.logout, name='wagtailadmin_logout'),\n\n url(r'^userbar/(\\d+)/$', userbar.for_frontend, name='wagtailadmin_userbar_frontend'),\n url(r'^userbar/moderation/(\\d+)/$', userbar.for_moderation, name='wagtailadmin_userbar_moderation'),\n]\n\n\n# This is here to make sure that 'django.contrib.auth.views.login' is reversed correctly\n# It must be placed after 'wagtailadmin_login' to prevent this from being used\nurlpatterns += [\n url(r'^login/$', 'django.contrib.auth.views.login'),\n]\n\n\n# Import additional urlpatterns from any apps that define a register_admin_urls hook\nfor fn in hooks.get_hooks('register_admin_urls'):\n urls = fn()\n if urls:\n urlpatterns += urls\n", "path": "wagtail/wagtailadmin/urls.py"}]}
| 3,484 | 824 |
gh_patches_debug_27556
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1851
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Safety throws error with positives
<!-- Before reporting a bug, please search both open *and closed* issues to
see if it has already been reported. If you can, try to reproduce the problem
on an unmodified copy of the `master` branch first, as sometimes bugs are found
and fixed without a report. If the problem is unreported and persists in
`master`, please help us fix it quickly by filling out as much of this
information as you can. Thanks! -->
### Description
When Safety is enabled with a valid personal free API key whenever a new URL is seen by the bot in a safety enabled channel, it throws an error.
### Reproduction steps
1. Set `safety` to `on` or `strict`
2. Send new message containing a URL
### Expected behavior
What did you expect to happen instead?
### Logs
From the bot
```
<Ad> https://www.cnn.com/2020/04/21/uk/uk-death-toll-coronavirus-statistics-gbr-intl/index.html
<^quark^> [ UK coronavirus: Death toll in England and Wales 41% higher than government figures - CNN ] - www.cnn.com
<^quark^> Unexpected error ('positives') from Ad at 2020-04-21 21:47:29.752477. Message was: https://www.cnn.com/2020/04/21/uk/uk-death-toll-coronavirus-statistics-gbr-intl/index.html
```
From logs
```
[2020-04-21 21:47:29,752] sopel.bot ERROR - Unexpected error ('positives') from Ad at 2020-04-21 21:47:29.752477. Message was: https://www.cnn.com/2020/04/21/uk/uk-death-toll-coronavirus-statistics-gbr-intl/index.html
Traceback (most recent call last):
File "/home/sopel/.local/lib/python3.8/site-packages/sopel-7.0.0-py3.8.egg/sopel/bot.py", line 590, in call
exit_code = func(sopel, trigger)
File "/home/sopel/.local/lib/python3.8/site-packages/sopel-7.0.0-py3.8.egg/sopel/modules/safety.py", line 166, in url_handler
data = {'positives': result['positives'],
KeyError: 'positives'
```
### Environment
- Sopel `.version`: 7.0.1
- Sopel installed via: git
- Python version: 3.8
- Operating system: alpine
- IRCd `/version`: UnrealIRCd-5.0.3.1
- Relevant plugins: safety
### Notes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/safety.py`
Content:
```
1 # coding=utf-8
2 """
3 safety.py - Alerts about malicious URLs
4 Copyright © 2014, Elad Alfassa, <[email protected]>
5 Licensed under the Eiffel Forum License 2.
6
7 This module uses virustotal.com
8 """
9 from __future__ import unicode_literals, absolute_import, print_function, division
10
11 import logging
12 import os.path
13 import re
14 import sys
15 import threading
16 import time
17
18 import requests
19
20 from sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute
21 from sopel.formatting import color, bold
22 from sopel.module import OP
23 import sopel.tools
24
25 try:
26 # This is done separately from the below version if/else because JSONDecodeError
27 # didn't appear until Python 3.5, but Sopel claims support for 3.3+
28 # Redo this whole block of nonsense when dropping py2/old py3 support
29 from json import JSONDecodeError as InvalidJSONResponse
30 except ImportError:
31 InvalidJSONResponse = ValueError
32
33 if sys.version_info.major > 2:
34 unicode = str
35 from urllib.request import urlretrieve
36 from urllib.parse import urlparse
37 else:
38 from urllib import urlretrieve
39 from urlparse import urlparse
40
41
42 LOGGER = logging.getLogger(__name__)
43
44 vt_base_api_url = 'https://www.virustotal.com/vtapi/v2/url/'
45 malware_domains = set()
46 known_good = []
47 cache_limit = 512
48
49
50 class SafetySection(StaticSection):
51 enabled_by_default = ValidatedAttribute('enabled_by_default', bool, default=True)
52 """Whether to enable URL safety in all channels where it isn't explicitly disabled."""
53 known_good = ListAttribute('known_good')
54 """List of "known good" domains to ignore."""
55 vt_api_key = ValidatedAttribute('vt_api_key')
56 """Optional VirusTotal API key (improves malicious URL detection)."""
57
58
59 def configure(config):
60 """
61 | name | example | purpose |
62 | ---- | ------- | ------- |
63 | enabled\\_by\\_default | True | Enable URL safety in all channels where it isn't explicitly disabled. |
64 | known\\_good | sopel.chat,dftba.net | List of "known good" domains to ignore. |
65 | vt\\_api\\_key | 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef | Optional VirusTotal API key to improve malicious URL detection |
66 """
67 config.define_section('safety', SafetySection)
68 config.safety.configure_setting(
69 'enabled_by_default',
70 "Enable URL safety in channels that don't specifically disable it?",
71 )
72 config.safety.configure_setting(
73 'known_good',
74 'Enter any domains to whitelist',
75 )
76 config.safety.configure_setting(
77 'vt_api_key',
78 "Optionally, enter a VirusTotal API key to improve malicious URL "
79 "protection.\nOtherwise, only the Malwarebytes DB will be used."
80 )
81
82
83 def setup(bot):
84 bot.config.define_section('safety', SafetySection)
85
86 if 'safety_cache' not in bot.memory:
87 bot.memory['safety_cache'] = sopel.tools.SopelMemory()
88 if 'safety_cache_lock' not in bot.memory:
89 bot.memory['safety_cache_lock'] = threading.Lock()
90 for item in bot.config.safety.known_good:
91 known_good.append(re.compile(item, re.I))
92
93 loc = os.path.join(bot.config.homedir, 'malwaredomains.txt')
94 if os.path.isfile(loc):
95 if os.path.getmtime(loc) < time.time() - 24 * 60 * 60 * 7:
96 # File exists but older than one week — update it
97 _download_malwaredomains_db(loc)
98 else:
99 _download_malwaredomains_db(loc)
100 with open(loc, 'r') as f:
101 for line in f:
102 clean_line = unicode(line).strip().lower()
103 if clean_line != '':
104 malware_domains.add(clean_line)
105
106
107 def shutdown(bot):
108 bot.memory.pop('safety_cache', None)
109 bot.memory.pop('safety_cache_lock', None)
110
111
112 def _download_malwaredomains_db(path):
113 url = 'https://mirror1.malwaredomains.com/files/justdomains'
114 LOGGER.info('Downloading malwaredomains db from %s', url)
115 urlretrieve(url, path)
116
117
118 @sopel.module.rule(r'(?u).*(https?://\S+).*')
119 @sopel.module.priority('high')
120 def url_handler(bot, trigger):
121 """Checks for malicious URLs"""
122 check = True # Enable URL checking
123 strict = False # Strict mode: kick on malicious URL
124 positives = 0 # Number of engines saying it's malicious
125 total = 0 # Number of total engines
126 use_vt = True # Use VirusTotal
127 check = bot.config.safety.enabled_by_default
128 if check is None:
129 # If not set, assume default
130 check = True
131 # DB overrides config:
132 setting = bot.db.get_channel_value(trigger.sender, 'safety')
133 if setting is not None:
134 if setting == 'off':
135 return # Not checking
136 elif setting in ['on', 'strict', 'local', 'local strict']:
137 check = True
138 if setting == 'strict' or setting == 'local strict':
139 strict = True
140 if setting == 'local' or setting == 'local strict':
141 use_vt = False
142
143 if not check:
144 return # Not overridden by DB, configured default off
145
146 try:
147 netloc = urlparse(trigger.group(1)).netloc
148 except ValueError:
149 return # Invalid IPv6 URL
150
151 if any(regex.search(netloc) for regex in known_good):
152 return # Whitelisted
153
154 apikey = bot.config.safety.vt_api_key
155 try:
156 if apikey is not None and use_vt:
157 payload = {'resource': unicode(trigger),
158 'apikey': apikey,
159 'scan': '1'}
160
161 if trigger not in bot.memory['safety_cache']:
162 r = requests.post(vt_base_api_url + 'report', data=payload)
163 r.raise_for_status()
164 result = r.json()
165 fetched = time.time()
166 data = {'positives': result['positives'],
167 'total': result['total'],
168 'fetched': fetched}
169 bot.memory['safety_cache'][trigger] = data
170 if len(bot.memory['safety_cache']) >= (2 * cache_limit):
171 _clean_cache(bot)
172 else:
173 print('using cache')
174 result = bot.memory['safety_cache'][trigger]
175 positives = result['positives']
176 total = result['total']
177 except requests.exceptions.RequestException:
178 # Ignoring exceptions with VT so MalwareDomains will always work
179 LOGGER.debug('[VirusTotal] Error obtaining response.', exc_info=True)
180 except InvalidJSONResponse:
181 # Ignoring exceptions with VT so MalwareDomains will always work
182 LOGGER.debug('[VirusTotal] Malformed response (invalid JSON).', exc_info=True)
183
184 if unicode(netloc).lower() in malware_domains:
185 # malwaredomains is more trustworthy than some VT engines
186 # therefore it gets a weight of 10 engines when calculating confidence
187 positives += 10
188 total += 10
189
190 if positives > 1:
191 # Possibly malicious URL detected!
192 confidence = '{}%'.format(round((positives / total) * 100))
193 msg = 'link posted by %s is possibly malicious ' % bold(trigger.nick)
194 msg += '(confidence %s - %s/%s)' % (confidence, positives, total)
195 bot.say('[' + bold(color('WARNING', 'red')) + '] ' + msg)
196 if strict:
197 bot.kick(trigger.nick, trigger.sender, 'Posted a malicious link')
198
199
200 @sopel.module.commands('safety')
201 def toggle_safety(bot, trigger):
202 """Set safety setting for channel"""
203 if not trigger.admin and bot.channels[trigger.sender].privileges[trigger.nick] < OP:
204 bot.reply('Only channel operators can change safety settings')
205 return
206 allowed_states = ['strict', 'on', 'off', 'local', 'local strict']
207 if not trigger.group(2) or trigger.group(2).lower() not in allowed_states:
208 options = ' / '.join(allowed_states)
209 bot.reply('Available options: %s' % options)
210 return
211
212 channel = trigger.sender.lower()
213 bot.db.set_channel_value(channel, 'safety', trigger.group(2).lower())
214 bot.reply('Safety is now set to "%s" on this channel' % trigger.group(2))
215
216
217 # Clean the cache every day
218 # Code above also calls this if there are too many cache entries
219 @sopel.module.interval(24 * 60 * 60)
220 def _clean_cache(bot):
221 """Cleans up old entries in URL safety cache."""
222 if bot.memory['safety_cache_lock'].acquire(False):
223 LOGGER.info('Starting safety cache cleanup...')
224 try:
225 # clean up by age first
226 cutoff = time.time() - (7 * 24 * 60 * 60) # 7 days ago
227 old_keys = []
228 for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):
229 if data['fetched'] <= cutoff:
230 old_keys.append(key)
231 for key in old_keys:
232 bot.memory['safety_cache'].pop(key, None)
233
234 # clean up more values if the cache is still too big
235 overage = len(bot.memory['safety_cache']) - cache_limit
236 if overage > 0:
237 extra_keys = sorted(
238 (data.fetched, key)
239 for (key, data)
240 in bot.memory['safety_cache'].items())[:overage]
241 for (_, key) in extra_keys:
242 bot.memory['safety_cache'].pop(key, None)
243 finally:
244 # No matter what errors happen (or not), release the lock
245 bot.memory['safety_cache_lock'].release()
246
247 LOGGER.info('Safety cache cleanup finished.')
248 else:
249 LOGGER.info(
250 'Skipping safety cache cleanup: Cache is locked, '
251 'cleanup already running.')
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sopel/modules/safety.py b/sopel/modules/safety.py
--- a/sopel/modules/safety.py
+++ b/sopel/modules/safety.py
@@ -163,17 +163,20 @@
r.raise_for_status()
result = r.json()
fetched = time.time()
- data = {'positives': result['positives'],
- 'total': result['total'],
- 'fetched': fetched}
- bot.memory['safety_cache'][trigger] = data
- if len(bot.memory['safety_cache']) >= (2 * cache_limit):
- _clean_cache(bot)
+ if all(k in result for k in ['positives', 'total']):
+ # cache result only if it contains a scan report
+ # TODO: handle checking back for results from queued scans
+ data = {'positives': result['positives'],
+ 'total': result['total'],
+ 'fetched': fetched}
+ bot.memory['safety_cache'][trigger] = data
+ if len(bot.memory['safety_cache']) >= (2 * cache_limit):
+ _clean_cache(bot)
else:
print('using cache')
result = bot.memory['safety_cache'][trigger]
- positives = result['positives']
- total = result['total']
+ positives = result.get('positives', 0)
+ total = result.get('total', 0)
except requests.exceptions.RequestException:
# Ignoring exceptions with VT so MalwareDomains will always work
LOGGER.debug('[VirusTotal] Error obtaining response.', exc_info=True)
|
{"golden_diff": "diff --git a/sopel/modules/safety.py b/sopel/modules/safety.py\n--- a/sopel/modules/safety.py\n+++ b/sopel/modules/safety.py\n@@ -163,17 +163,20 @@\n r.raise_for_status()\n result = r.json()\n fetched = time.time()\n- data = {'positives': result['positives'],\n- 'total': result['total'],\n- 'fetched': fetched}\n- bot.memory['safety_cache'][trigger] = data\n- if len(bot.memory['safety_cache']) >= (2 * cache_limit):\n- _clean_cache(bot)\n+ if all(k in result for k in ['positives', 'total']):\n+ # cache result only if it contains a scan report\n+ # TODO: handle checking back for results from queued scans\n+ data = {'positives': result['positives'],\n+ 'total': result['total'],\n+ 'fetched': fetched}\n+ bot.memory['safety_cache'][trigger] = data\n+ if len(bot.memory['safety_cache']) >= (2 * cache_limit):\n+ _clean_cache(bot)\n else:\n print('using cache')\n result = bot.memory['safety_cache'][trigger]\n- positives = result['positives']\n- total = result['total']\n+ positives = result.get('positives', 0)\n+ total = result.get('total', 0)\n except requests.exceptions.RequestException:\n # Ignoring exceptions with VT so MalwareDomains will always work\n LOGGER.debug('[VirusTotal] Error obtaining response.', exc_info=True)\n", "issue": "Safety throws error with positives\n<!-- Before reporting a bug, please search both open *and closed* issues to\r\nsee if it has already been reported. If you can, try to reproduce the problem\r\non an unmodified copy of the `master` branch first, as sometimes bugs are found\r\nand fixed without a report. If the problem is unreported and persists in\r\n`master`, please help us fix it quickly by filling out as much of this\r\ninformation as you can. Thanks! -->\r\n\r\n### Description\r\nWhen Safety is enabled with a valid personal free API key whenever a new URL is seen by the bot in a safety enabled channel, it throws an error.\r\n\r\n### Reproduction steps\r\n1. Set `safety` to `on` or `strict`\r\n2. Send new message containing a URL\r\n\r\n### Expected behavior\r\nWhat did you expect to happen instead?\r\n\r\n### Logs\r\nFrom the bot\r\n```\r\n<Ad> https://www.cnn.com/2020/04/21/uk/uk-death-toll-coronavirus-statistics-gbr-intl/index.html\r\n<^quark^> [ UK coronavirus: Death toll in England and Wales 41% higher than government figures - CNN ] - www.cnn.com\r\n<^quark^> Unexpected error ('positives') from Ad at 2020-04-21 21:47:29.752477. Message was: https://www.cnn.com/2020/04/21/uk/uk-death-toll-coronavirus-statistics-gbr-intl/index.html\r\n```\r\n\r\nFrom logs\r\n```\r\n[2020-04-21 21:47:29,752] sopel.bot ERROR - Unexpected error ('positives') from Ad at 2020-04-21 21:47:29.752477. Message was: https://www.cnn.com/2020/04/21/uk/uk-death-toll-coronavirus-statistics-gbr-intl/index.html\r\nTraceback (most recent call last):\r\n File \"/home/sopel/.local/lib/python3.8/site-packages/sopel-7.0.0-py3.8.egg/sopel/bot.py\", line 590, in call\r\n exit_code = func(sopel, trigger)\r\n File \"/home/sopel/.local/lib/python3.8/site-packages/sopel-7.0.0-py3.8.egg/sopel/modules/safety.py\", line 166, in url_handler\r\n data = {'positives': result['positives'],\r\nKeyError: 'positives'\r\n\r\n```\r\n\r\n### Environment\r\n- Sopel `.version`: 7.0.1\r\n- Sopel installed via: git \r\n- Python version: 3.8\r\n- Operating system: alpine\r\n- IRCd `/version`: UnrealIRCd-5.0.3.1\r\n- Relevant plugins: safety\r\n\r\n### Notes\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nsafety.py - Alerts about malicious URLs\nCopyright \u00a9 2014, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nThis module uses virustotal.com\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport logging\nimport os.path\nimport re\nimport sys\nimport threading\nimport time\n\nimport requests\n\nfrom sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute\nfrom sopel.formatting import color, bold\nfrom sopel.module import OP\nimport sopel.tools\n\ntry:\n # This is done separately from the below version if/else because JSONDecodeError\n # didn't appear until Python 3.5, but Sopel claims support for 3.3+\n # Redo this whole block of nonsense when dropping py2/old py3 support\n from json import JSONDecodeError as InvalidJSONResponse\nexcept ImportError:\n InvalidJSONResponse = ValueError\n\nif sys.version_info.major > 2:\n unicode = str\n from urllib.request import urlretrieve\n from urllib.parse import urlparse\nelse:\n from urllib import urlretrieve\n from urlparse import urlparse\n\n\nLOGGER = logging.getLogger(__name__)\n\nvt_base_api_url = 'https://www.virustotal.com/vtapi/v2/url/'\nmalware_domains = set()\nknown_good = []\ncache_limit = 512\n\n\nclass SafetySection(StaticSection):\n enabled_by_default = ValidatedAttribute('enabled_by_default', bool, default=True)\n \"\"\"Whether to enable URL safety in all channels where it isn't explicitly disabled.\"\"\"\n known_good = ListAttribute('known_good')\n \"\"\"List of \"known good\" domains to ignore.\"\"\"\n vt_api_key = ValidatedAttribute('vt_api_key')\n \"\"\"Optional VirusTotal API key (improves malicious URL detection).\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | enabled\\\\_by\\\\_default | True | Enable URL safety in all channels where it isn't explicitly disabled. |\n | known\\\\_good | sopel.chat,dftba.net | List of \"known good\" domains to ignore. |\n | vt\\\\_api\\\\_key | 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef | Optional VirusTotal API key to improve malicious URL detection |\n \"\"\"\n config.define_section('safety', SafetySection)\n config.safety.configure_setting(\n 'enabled_by_default',\n \"Enable URL safety in channels that don't specifically disable it?\",\n )\n config.safety.configure_setting(\n 'known_good',\n 'Enter any domains to whitelist',\n )\n config.safety.configure_setting(\n 'vt_api_key',\n \"Optionally, enter a VirusTotal API key to improve malicious URL \"\n \"protection.\\nOtherwise, only the Malwarebytes DB will be used.\"\n )\n\n\ndef setup(bot):\n bot.config.define_section('safety', SafetySection)\n\n if 'safety_cache' not in bot.memory:\n bot.memory['safety_cache'] = sopel.tools.SopelMemory()\n if 'safety_cache_lock' not in bot.memory:\n bot.memory['safety_cache_lock'] = threading.Lock()\n for item in bot.config.safety.known_good:\n known_good.append(re.compile(item, re.I))\n\n loc = os.path.join(bot.config.homedir, 'malwaredomains.txt')\n if os.path.isfile(loc):\n if os.path.getmtime(loc) < time.time() - 24 * 60 * 60 * 7:\n # File exists but older than one week \u2014 update it\n _download_malwaredomains_db(loc)\n else:\n _download_malwaredomains_db(loc)\n with open(loc, 'r') as f:\n for line in f:\n clean_line = unicode(line).strip().lower()\n if clean_line != '':\n malware_domains.add(clean_line)\n\n\ndef shutdown(bot):\n bot.memory.pop('safety_cache', None)\n bot.memory.pop('safety_cache_lock', None)\n\n\ndef _download_malwaredomains_db(path):\n url = 'https://mirror1.malwaredomains.com/files/justdomains'\n LOGGER.info('Downloading malwaredomains db from %s', url)\n urlretrieve(url, path)\n\n\[email protected](r'(?u).*(https?://\\S+).*')\[email protected]('high')\ndef url_handler(bot, trigger):\n \"\"\"Checks for malicious URLs\"\"\"\n check = True # Enable URL checking\n strict = False # Strict mode: kick on malicious URL\n positives = 0 # Number of engines saying it's malicious\n total = 0 # Number of total engines\n use_vt = True # Use VirusTotal\n check = bot.config.safety.enabled_by_default\n if check is None:\n # If not set, assume default\n check = True\n # DB overrides config:\n setting = bot.db.get_channel_value(trigger.sender, 'safety')\n if setting is not None:\n if setting == 'off':\n return # Not checking\n elif setting in ['on', 'strict', 'local', 'local strict']:\n check = True\n if setting == 'strict' or setting == 'local strict':\n strict = True\n if setting == 'local' or setting == 'local strict':\n use_vt = False\n\n if not check:\n return # Not overridden by DB, configured default off\n\n try:\n netloc = urlparse(trigger.group(1)).netloc\n except ValueError:\n return # Invalid IPv6 URL\n\n if any(regex.search(netloc) for regex in known_good):\n return # Whitelisted\n\n apikey = bot.config.safety.vt_api_key\n try:\n if apikey is not None and use_vt:\n payload = {'resource': unicode(trigger),\n 'apikey': apikey,\n 'scan': '1'}\n\n if trigger not in bot.memory['safety_cache']:\n r = requests.post(vt_base_api_url + 'report', data=payload)\n r.raise_for_status()\n result = r.json()\n fetched = time.time()\n data = {'positives': result['positives'],\n 'total': result['total'],\n 'fetched': fetched}\n bot.memory['safety_cache'][trigger] = data\n if len(bot.memory['safety_cache']) >= (2 * cache_limit):\n _clean_cache(bot)\n else:\n print('using cache')\n result = bot.memory['safety_cache'][trigger]\n positives = result['positives']\n total = result['total']\n except requests.exceptions.RequestException:\n # Ignoring exceptions with VT so MalwareDomains will always work\n LOGGER.debug('[VirusTotal] Error obtaining response.', exc_info=True)\n except InvalidJSONResponse:\n # Ignoring exceptions with VT so MalwareDomains will always work\n LOGGER.debug('[VirusTotal] Malformed response (invalid JSON).', exc_info=True)\n\n if unicode(netloc).lower() in malware_domains:\n # malwaredomains is more trustworthy than some VT engines\n # therefore it gets a weight of 10 engines when calculating confidence\n positives += 10\n total += 10\n\n if positives > 1:\n # Possibly malicious URL detected!\n confidence = '{}%'.format(round((positives / total) * 100))\n msg = 'link posted by %s is possibly malicious ' % bold(trigger.nick)\n msg += '(confidence %s - %s/%s)' % (confidence, positives, total)\n bot.say('[' + bold(color('WARNING', 'red')) + '] ' + msg)\n if strict:\n bot.kick(trigger.nick, trigger.sender, 'Posted a malicious link')\n\n\[email protected]('safety')\ndef toggle_safety(bot, trigger):\n \"\"\"Set safety setting for channel\"\"\"\n if not trigger.admin and bot.channels[trigger.sender].privileges[trigger.nick] < OP:\n bot.reply('Only channel operators can change safety settings')\n return\n allowed_states = ['strict', 'on', 'off', 'local', 'local strict']\n if not trigger.group(2) or trigger.group(2).lower() not in allowed_states:\n options = ' / '.join(allowed_states)\n bot.reply('Available options: %s' % options)\n return\n\n channel = trigger.sender.lower()\n bot.db.set_channel_value(channel, 'safety', trigger.group(2).lower())\n bot.reply('Safety is now set to \"%s\" on this channel' % trigger.group(2))\n\n\n# Clean the cache every day\n# Code above also calls this if there are too many cache entries\[email protected](24 * 60 * 60)\ndef _clean_cache(bot):\n \"\"\"Cleans up old entries in URL safety cache.\"\"\"\n if bot.memory['safety_cache_lock'].acquire(False):\n LOGGER.info('Starting safety cache cleanup...')\n try:\n # clean up by age first\n cutoff = time.time() - (7 * 24 * 60 * 60) # 7 days ago\n old_keys = []\n for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):\n if data['fetched'] <= cutoff:\n old_keys.append(key)\n for key in old_keys:\n bot.memory['safety_cache'].pop(key, None)\n\n # clean up more values if the cache is still too big\n overage = len(bot.memory['safety_cache']) - cache_limit\n if overage > 0:\n extra_keys = sorted(\n (data.fetched, key)\n for (key, data)\n in bot.memory['safety_cache'].items())[:overage]\n for (_, key) in extra_keys:\n bot.memory['safety_cache'].pop(key, None)\n finally:\n # No matter what errors happen (or not), release the lock\n bot.memory['safety_cache_lock'].release()\n\n LOGGER.info('Safety cache cleanup finished.')\n else:\n LOGGER.info(\n 'Skipping safety cache cleanup: Cache is locked, '\n 'cleanup already running.')\n", "path": "sopel/modules/safety.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nsafety.py - Alerts about malicious URLs\nCopyright \u00a9 2014, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nThis module uses virustotal.com\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport logging\nimport os.path\nimport re\nimport sys\nimport threading\nimport time\n\nimport requests\n\nfrom sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute\nfrom sopel.formatting import color, bold\nfrom sopel.module import OP\nimport sopel.tools\n\ntry:\n # This is done separately from the below version if/else because JSONDecodeError\n # didn't appear until Python 3.5, but Sopel claims support for 3.3+\n # Redo this whole block of nonsense when dropping py2/old py3 support\n from json import JSONDecodeError as InvalidJSONResponse\nexcept ImportError:\n InvalidJSONResponse = ValueError\n\nif sys.version_info.major > 2:\n unicode = str\n from urllib.request import urlretrieve\n from urllib.parse import urlparse\nelse:\n from urllib import urlretrieve\n from urlparse import urlparse\n\n\nLOGGER = logging.getLogger(__name__)\n\nvt_base_api_url = 'https://www.virustotal.com/vtapi/v2/url/'\nmalware_domains = set()\nknown_good = []\ncache_limit = 512\n\n\nclass SafetySection(StaticSection):\n enabled_by_default = ValidatedAttribute('enabled_by_default', bool, default=True)\n \"\"\"Whether to enable URL safety in all channels where it isn't explicitly disabled.\"\"\"\n known_good = ListAttribute('known_good')\n \"\"\"List of \"known good\" domains to ignore.\"\"\"\n vt_api_key = ValidatedAttribute('vt_api_key')\n \"\"\"Optional VirusTotal API key (improves malicious URL detection).\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | enabled\\\\_by\\\\_default | True | Enable URL safety in all channels where it isn't explicitly disabled. |\n | known\\\\_good | sopel.chat,dftba.net | List of \"known good\" domains to ignore. |\n | vt\\\\_api\\\\_key | 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef | Optional VirusTotal API key to improve malicious URL detection |\n \"\"\"\n config.define_section('safety', SafetySection)\n config.safety.configure_setting(\n 'enabled_by_default',\n \"Enable URL safety in channels that don't specifically disable it?\",\n )\n config.safety.configure_setting(\n 'known_good',\n 'Enter any domains to whitelist',\n )\n config.safety.configure_setting(\n 'vt_api_key',\n \"Optionally, enter a VirusTotal API key to improve malicious URL \"\n \"protection.\\nOtherwise, only the Malwarebytes DB will be used.\"\n )\n\n\ndef setup(bot):\n bot.config.define_section('safety', SafetySection)\n\n if 'safety_cache' not in bot.memory:\n bot.memory['safety_cache'] = sopel.tools.SopelMemory()\n if 'safety_cache_lock' not in bot.memory:\n bot.memory['safety_cache_lock'] = threading.Lock()\n for item in bot.config.safety.known_good:\n known_good.append(re.compile(item, re.I))\n\n loc = os.path.join(bot.config.homedir, 'malwaredomains.txt')\n if os.path.isfile(loc):\n if os.path.getmtime(loc) < time.time() - 24 * 60 * 60 * 7:\n # File exists but older than one week \u2014 update it\n _download_malwaredomains_db(loc)\n else:\n _download_malwaredomains_db(loc)\n with open(loc, 'r') as f:\n for line in f:\n clean_line = unicode(line).strip().lower()\n if clean_line != '':\n malware_domains.add(clean_line)\n\n\ndef shutdown(bot):\n bot.memory.pop('safety_cache', None)\n bot.memory.pop('safety_cache_lock', None)\n\n\ndef _download_malwaredomains_db(path):\n url = 'https://mirror1.malwaredomains.com/files/justdomains'\n LOGGER.info('Downloading malwaredomains db from %s', url)\n urlretrieve(url, path)\n\n\[email protected](r'(?u).*(https?://\\S+).*')\[email protected]('high')\ndef url_handler(bot, trigger):\n \"\"\"Checks for malicious URLs\"\"\"\n check = True # Enable URL checking\n strict = False # Strict mode: kick on malicious URL\n positives = 0 # Number of engines saying it's malicious\n total = 0 # Number of total engines\n use_vt = True # Use VirusTotal\n check = bot.config.safety.enabled_by_default\n if check is None:\n # If not set, assume default\n check = True\n # DB overrides config:\n setting = bot.db.get_channel_value(trigger.sender, 'safety')\n if setting is not None:\n if setting == 'off':\n return # Not checking\n elif setting in ['on', 'strict', 'local', 'local strict']:\n check = True\n if setting == 'strict' or setting == 'local strict':\n strict = True\n if setting == 'local' or setting == 'local strict':\n use_vt = False\n\n if not check:\n return # Not overridden by DB, configured default off\n\n try:\n netloc = urlparse(trigger.group(1)).netloc\n except ValueError:\n return # Invalid IPv6 URL\n\n if any(regex.search(netloc) for regex in known_good):\n return # Whitelisted\n\n apikey = bot.config.safety.vt_api_key\n try:\n if apikey is not None and use_vt:\n payload = {'resource': unicode(trigger),\n 'apikey': apikey,\n 'scan': '1'}\n\n if trigger not in bot.memory['safety_cache']:\n r = requests.post(vt_base_api_url + 'report', data=payload)\n r.raise_for_status()\n result = r.json()\n fetched = time.time()\n if all(k in result for k in ['positives', 'total']):\n # cache result only if it contains a scan report\n # TODO: handle checking back for results from queued scans\n data = {'positives': result['positives'],\n 'total': result['total'],\n 'fetched': fetched}\n bot.memory['safety_cache'][trigger] = data\n if len(bot.memory['safety_cache']) >= (2 * cache_limit):\n _clean_cache(bot)\n else:\n print('using cache')\n result = bot.memory['safety_cache'][trigger]\n positives = result.get('positives', 0)\n total = result.get('total', 0)\n except requests.exceptions.RequestException:\n # Ignoring exceptions with VT so MalwareDomains will always work\n LOGGER.debug('[VirusTotal] Error obtaining response.', exc_info=True)\n except InvalidJSONResponse:\n # Ignoring exceptions with VT so MalwareDomains will always work\n LOGGER.debug('[VirusTotal] Malformed response (invalid JSON).', exc_info=True)\n\n if unicode(netloc).lower() in malware_domains:\n # malwaredomains is more trustworthy than some VT engines\n # therefore it gets a weight of 10 engines when calculating confidence\n positives += 10\n total += 10\n\n if positives > 1:\n # Possibly malicious URL detected!\n confidence = '{}%'.format(round((positives / total) * 100))\n msg = 'link posted by %s is possibly malicious ' % bold(trigger.nick)\n msg += '(confidence %s - %s/%s)' % (confidence, positives, total)\n bot.say('[' + bold(color('WARNING', 'red')) + '] ' + msg)\n if strict:\n bot.kick(trigger.nick, trigger.sender, 'Posted a malicious link')\n\n\[email protected]('safety')\ndef toggle_safety(bot, trigger):\n \"\"\"Set safety setting for channel\"\"\"\n if not trigger.admin and bot.channels[trigger.sender].privileges[trigger.nick] < OP:\n bot.reply('Only channel operators can change safety settings')\n return\n allowed_states = ['strict', 'on', 'off', 'local', 'local strict']\n if not trigger.group(2) or trigger.group(2).lower() not in allowed_states:\n options = ' / '.join(allowed_states)\n bot.reply('Available options: %s' % options)\n return\n\n channel = trigger.sender.lower()\n bot.db.set_channel_value(channel, 'safety', trigger.group(2).lower())\n bot.reply('Safety is now set to \"%s\" on this channel' % trigger.group(2))\n\n\n# Clean the cache every day\n# Code above also calls this if there are too many cache entries\[email protected](24 * 60 * 60)\ndef _clean_cache(bot):\n \"\"\"Cleans up old entries in URL safety cache.\"\"\"\n if bot.memory['safety_cache_lock'].acquire(False):\n LOGGER.info('Starting safety cache cleanup...')\n try:\n # clean up by age first\n cutoff = time.time() - (7 * 24 * 60 * 60) # 7 days ago\n old_keys = []\n for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):\n if data['fetched'] <= cutoff:\n old_keys.append(key)\n for key in old_keys:\n bot.memory['safety_cache'].pop(key, None)\n\n # clean up more values if the cache is still too big\n overage = len(bot.memory['safety_cache']) - cache_limit\n if overage > 0:\n extra_keys = sorted(\n (data.fetched, key)\n for (key, data)\n in bot.memory['safety_cache'].items())[:overage]\n for (_, key) in extra_keys:\n bot.memory['safety_cache'].pop(key, None)\n finally:\n # No matter what errors happen (or not), release the lock\n bot.memory['safety_cache_lock'].release()\n\n LOGGER.info('Safety cache cleanup finished.')\n else:\n LOGGER.info(\n 'Skipping safety cache cleanup: Cache is locked, '\n 'cleanup already running.')\n", "path": "sopel/modules/safety.py"}]}
| 3,850 | 357 |
gh_patches_debug_23646
|
rasdani/github-patches
|
git_diff
|
nipy__nipype-2066
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH: ICA_AROMA: make current working directory default output directory
Changes proposed in this pull request
- Make the default output directory the current working directory so you don't have to specify an out_dir, which I believe makes ICA_AROMA more Node friendly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nipype/interfaces/fsl/ICA_AROMA.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
3 # vi: set ft=python sts=4 ts=4 sw=4 et:
4 """This commandline module provides classes for interfacing with the
5 `ICA-AROMA.py<https://github.com/rhr-pruim/ICA-AROMA>`_ command line tool.
6 Change directory to provide relative paths for doctests
7 >>> import os
8 >>> filepath = os.path.dirname(os.path.realpath(__file__))
9 >>> datadir = os.path.realpath(os.path.join(filepath,
10 ... '../../testing/data'))
11 >>> os.chdir(datadir)
12 """
13
14 from __future__ import print_function, division, unicode_literals, absolute_import
15 from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine,
16 File, Directory, traits)
17 import os
18
19
20 class ICA_AROMAInputSpec(CommandLineInputSpec):
21 feat_dir = Directory(exists=True, mandatory=True,
22 argstr='-feat %s',
23 xor=['in_file', 'mat_file', 'fnirt_warp_file', 'motion_parameters'],
24 desc='If a feat directory exists and temporal filtering '
25 'has not been run yet, ICA_AROMA can use the files in '
26 'this directory.')
27 in_file = File(exists=True, mandatory=True,
28 argstr='-i %s', xor=['feat_dir'],
29 desc='volume to be denoised')
30 out_dir = Directory('out', genfile=True,
31 argstr='-o %s',
32 desc='output directory')
33 mask = File(exists=True, argstr='-m %s', xor=['feat_dir'],
34 desc='path/name volume mask')
35 dim = traits.Int(argstr='-dim %d',
36 desc='Dimensionality reduction when running '
37 'MELODIC (defualt is automatic estimation)')
38 TR = traits.Float(argstr='-tr %.3f',
39 desc='TR in seconds. If this is not specified '
40 'the TR will be extracted from the '
41 'header of the fMRI nifti file.')
42 melodic_dir = Directory(exists=True, argstr='-meldir %s',
43 desc='path to MELODIC directory if MELODIC has already been run')
44 mat_file = File(exists=True, argstr='-affmat %s', xor=['feat_dir'],
45 desc='path/name of the mat-file describing the '
46 'affine registration (e.g. FSL FLIRT) of the '
47 'functional data to structural space (.mat file)')
48 fnirt_warp_file = File(exists=True, argstr='-warp %s', xor=['feat_dir'],
49 desc='File name of the warp-file describing '
50 'the non-linear registration (e.g. FSL FNIRT) '
51 'of the structural data to MNI152 space (.nii.gz)')
52 motion_parameters = File(exists=True, mandatory=True,
53 argstr='-mc %s', xor=['feat_dir'],
54 desc='motion parameters file')
55 denoise_type = traits.Enum('nonaggr', 'aggr', 'both', 'no', usedefault=True,
56 mandatory=True, argstr='-den %s',
57 desc='Type of denoising strategy:\n'
58 '-none: only classification, no denoising\n'
59 '-nonaggr (default): non-aggresssive denoising, i.e. partial component regression\n'
60 '-aggr: aggressive denoising, i.e. full component regression\n'
61 '-both: both aggressive and non-aggressive denoising (two outputs)')
62
63 class ICA_AROMAOutputSpec(TraitedSpec):
64 aggr_denoised_file = File(exists=True,
65 desc='if generated: aggressively denoised volume')
66 nonaggr_denoised_file = File(exists=True,
67 desc='if generated: non aggressively denoised volume' )
68 out_dir = Directory(exists=True,
69 desc='directory contains (in addition to the denoised files): '
70 'melodic.ica + classified_motion_components + '
71 'classification_overview + feature_scores + melodic_ic_mni)')
72
73 class ICA_AROMA(CommandLine):
74 """
75 Interface for the ICA_AROMA.py script.
76
77 ICA-AROMA (i.e. 'ICA-based Automatic Removal Of Motion Artifacts') concerns
78 a data-driven method to identify and remove motion-related independent
79 components from fMRI data. To that end it exploits a small, but robust
80 set of theoretically motivated features, preventing the need for classifier
81 re-training and therefore providing direct and easy applicability.
82
83 See link for further documentation: https://github.com/rhr-pruim/ICA-AROMA
84
85 Example
86 -------
87
88 >>> from nipype.interfaces.fsl import ICA_AROMA
89 >>> from nipype.testing import example_data
90 >>> AROMA_obj = ICA_AROMA.ICA_AROMA()
91 >>> AROMA_obj.inputs.in_file = 'functional.nii'
92 >>> AROMA_obj.inputs.mat_file = 'func_to_struct.mat'
93 >>> AROMA_obj.inputs.fnirt_warp_file = 'warpfield.nii'
94 >>> AROMA_obj.inputs.motion_parameters = 'fsl_mcflirt_movpar.txt'
95 >>> AROMA_obj.inputs.mask = 'mask.nii.gz'
96 >>> AROMA_obj.inputs.denoise_type = 'both'
97 >>> AROMA_obj.inputs.out_dir = 'ICA_testout'
98 >>> AROMA_obj.cmdline # doctest: +ALLOW_UNICODE
99 'ICA_AROMA.py -den both -warp warpfield.nii -i functional.nii -m mask.nii.gz -affmat func_to_struct.mat -mc fsl_mcflirt_movpar.txt -o ICA_testout'
100 """
101 _cmd = 'ICA_AROMA.py'
102 input_spec = ICA_AROMAInputSpec
103 output_spec = ICA_AROMAOutputSpec
104
105 def _list_outputs(self):
106 outputs = self.output_spec().get()
107 if isdefined(self.inputs.out_dir):
108 outputs['out_dir'] = os.path.abspath(self.inputs.out_dir)
109 else:
110 outputs['out_dir'] = self._gen_filename('out_dir')
111 out_dir = outputs['out_dir']
112
113 if self.inputs.denoise_type in ('aggr', 'both'):
114 outputs['aggr_denoised_file'] = os.path.join(out_dir, 'denoised_func_data_aggr.nii.gz')
115 if self.inputs.denoise_type in ('nonaggr', 'both'):
116 outputs['nonaggr_denoised_file'] = os.path.join(out_dir, 'denoised_func_data_nonaggr.nii.gz')
117 return outputs
118
119 def _gen_filename(self, name):
120 if name == 'out_dir':
121 return os.getcwd()
122
123
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nipype/interfaces/fsl/ICA_AROMA.py b/nipype/interfaces/fsl/ICA_AROMA.py
--- a/nipype/interfaces/fsl/ICA_AROMA.py
+++ b/nipype/interfaces/fsl/ICA_AROMA.py
@@ -13,7 +13,7 @@
from __future__ import print_function, division, unicode_literals, absolute_import
from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine,
- File, Directory, traits)
+ File, Directory, traits, isdefined)
import os
@@ -109,7 +109,7 @@
else:
outputs['out_dir'] = self._gen_filename('out_dir')
out_dir = outputs['out_dir']
-
+
if self.inputs.denoise_type in ('aggr', 'both'):
outputs['aggr_denoised_file'] = os.path.join(out_dir, 'denoised_func_data_aggr.nii.gz')
if self.inputs.denoise_type in ('nonaggr', 'both'):
@@ -119,5 +119,3 @@
def _gen_filename(self, name):
if name == 'out_dir':
return os.getcwd()
-
-
|
{"golden_diff": "diff --git a/nipype/interfaces/fsl/ICA_AROMA.py b/nipype/interfaces/fsl/ICA_AROMA.py\n--- a/nipype/interfaces/fsl/ICA_AROMA.py\n+++ b/nipype/interfaces/fsl/ICA_AROMA.py\n@@ -13,7 +13,7 @@\n \n from __future__ import print_function, division, unicode_literals, absolute_import\n from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine,\n- File, Directory, traits)\n+ File, Directory, traits, isdefined)\n import os\n \n \n@@ -109,7 +109,7 @@\n else:\n outputs['out_dir'] = self._gen_filename('out_dir')\n out_dir = outputs['out_dir']\n- \n+\n if self.inputs.denoise_type in ('aggr', 'both'):\n outputs['aggr_denoised_file'] = os.path.join(out_dir, 'denoised_func_data_aggr.nii.gz')\n if self.inputs.denoise_type in ('nonaggr', 'both'):\n@@ -119,5 +119,3 @@\n def _gen_filename(self, name):\n if name == 'out_dir':\n return os.getcwd()\n-\n-\n", "issue": "ENH: ICA_AROMA: make current working directory default output directory\nChanges proposed in this pull request\r\n- Make the default output directory the current working directory so you don't have to specify an out_dir, which I believe makes ICA_AROMA more Node friendly.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"This commandline module provides classes for interfacing with the\n`ICA-AROMA.py<https://github.com/rhr-pruim/ICA-AROMA>`_ command line tool.\n Change directory to provide relative paths for doctests\n >>> import os\n >>> filepath = os.path.dirname(os.path.realpath(__file__))\n >>> datadir = os.path.realpath(os.path.join(filepath,\n ... '../../testing/data'))\n >>> os.chdir(datadir)\n\"\"\"\n\nfrom __future__ import print_function, division, unicode_literals, absolute_import\nfrom ..base import (TraitedSpec, CommandLineInputSpec, CommandLine,\n File, Directory, traits)\nimport os\n\n\nclass ICA_AROMAInputSpec(CommandLineInputSpec):\n feat_dir = Directory(exists=True, mandatory=True,\n argstr='-feat %s',\n xor=['in_file', 'mat_file', 'fnirt_warp_file', 'motion_parameters'],\n desc='If a feat directory exists and temporal filtering '\n 'has not been run yet, ICA_AROMA can use the files in '\n 'this directory.')\n in_file = File(exists=True, mandatory=True,\n argstr='-i %s', xor=['feat_dir'],\n desc='volume to be denoised')\n out_dir = Directory('out', genfile=True,\n argstr='-o %s',\n desc='output directory')\n mask = File(exists=True, argstr='-m %s', xor=['feat_dir'],\n desc='path/name volume mask')\n dim = traits.Int(argstr='-dim %d',\n desc='Dimensionality reduction when running '\n 'MELODIC (defualt is automatic estimation)')\n TR = traits.Float(argstr='-tr %.3f',\n desc='TR in seconds. If this is not specified '\n 'the TR will be extracted from the '\n 'header of the fMRI nifti file.')\n melodic_dir = Directory(exists=True, argstr='-meldir %s',\n desc='path to MELODIC directory if MELODIC has already been run')\n mat_file = File(exists=True, argstr='-affmat %s', xor=['feat_dir'],\n desc='path/name of the mat-file describing the '\n 'affine registration (e.g. FSL FLIRT) of the '\n 'functional data to structural space (.mat file)')\n fnirt_warp_file = File(exists=True, argstr='-warp %s', xor=['feat_dir'],\n desc='File name of the warp-file describing '\n 'the non-linear registration (e.g. FSL FNIRT) '\n 'of the structural data to MNI152 space (.nii.gz)')\n motion_parameters = File(exists=True, mandatory=True,\n argstr='-mc %s', xor=['feat_dir'],\n desc='motion parameters file')\n denoise_type = traits.Enum('nonaggr', 'aggr', 'both', 'no', usedefault=True,\n mandatory=True, argstr='-den %s',\n desc='Type of denoising strategy:\\n'\n '-none: only classification, no denoising\\n'\n '-nonaggr (default): non-aggresssive denoising, i.e. partial component regression\\n'\n '-aggr: aggressive denoising, i.e. full component regression\\n'\n '-both: both aggressive and non-aggressive denoising (two outputs)')\n\nclass ICA_AROMAOutputSpec(TraitedSpec):\n aggr_denoised_file = File(exists=True,\n desc='if generated: aggressively denoised volume')\n nonaggr_denoised_file = File(exists=True,\n desc='if generated: non aggressively denoised volume' )\n out_dir = Directory(exists=True,\n desc='directory contains (in addition to the denoised files): '\n 'melodic.ica + classified_motion_components + '\n 'classification_overview + feature_scores + melodic_ic_mni)')\n\nclass ICA_AROMA(CommandLine):\n \"\"\"\n Interface for the ICA_AROMA.py script.\n\n ICA-AROMA (i.e. 'ICA-based Automatic Removal Of Motion Artifacts') concerns\n a data-driven method to identify and remove motion-related independent\n components from fMRI data. To that end it exploits a small, but robust\n set of theoretically motivated features, preventing the need for classifier\n re-training and therefore providing direct and easy applicability.\n\n See link for further documentation: https://github.com/rhr-pruim/ICA-AROMA\n\n Example\n -------\n\n >>> from nipype.interfaces.fsl import ICA_AROMA\n >>> from nipype.testing import example_data\n >>> AROMA_obj = ICA_AROMA.ICA_AROMA()\n >>> AROMA_obj.inputs.in_file = 'functional.nii'\n >>> AROMA_obj.inputs.mat_file = 'func_to_struct.mat'\n >>> AROMA_obj.inputs.fnirt_warp_file = 'warpfield.nii'\n >>> AROMA_obj.inputs.motion_parameters = 'fsl_mcflirt_movpar.txt'\n >>> AROMA_obj.inputs.mask = 'mask.nii.gz'\n >>> AROMA_obj.inputs.denoise_type = 'both'\n >>> AROMA_obj.inputs.out_dir = 'ICA_testout'\n >>> AROMA_obj.cmdline # doctest: +ALLOW_UNICODE\n 'ICA_AROMA.py -den both -warp warpfield.nii -i functional.nii -m mask.nii.gz -affmat func_to_struct.mat -mc fsl_mcflirt_movpar.txt -o ICA_testout'\n \"\"\"\n _cmd = 'ICA_AROMA.py'\n input_spec = ICA_AROMAInputSpec\n output_spec = ICA_AROMAOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n if isdefined(self.inputs.out_dir):\n outputs['out_dir'] = os.path.abspath(self.inputs.out_dir)\n else:\n outputs['out_dir'] = self._gen_filename('out_dir')\n out_dir = outputs['out_dir']\n \n if self.inputs.denoise_type in ('aggr', 'both'):\n outputs['aggr_denoised_file'] = os.path.join(out_dir, 'denoised_func_data_aggr.nii.gz')\n if self.inputs.denoise_type in ('nonaggr', 'both'):\n outputs['nonaggr_denoised_file'] = os.path.join(out_dir, 'denoised_func_data_nonaggr.nii.gz')\n return outputs\n\n def _gen_filename(self, name):\n if name == 'out_dir':\n return os.getcwd()\n\n \n", "path": "nipype/interfaces/fsl/ICA_AROMA.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"This commandline module provides classes for interfacing with the\n`ICA-AROMA.py<https://github.com/rhr-pruim/ICA-AROMA>`_ command line tool.\n Change directory to provide relative paths for doctests\n >>> import os\n >>> filepath = os.path.dirname(os.path.realpath(__file__))\n >>> datadir = os.path.realpath(os.path.join(filepath,\n ... '../../testing/data'))\n >>> os.chdir(datadir)\n\"\"\"\n\nfrom __future__ import print_function, division, unicode_literals, absolute_import\nfrom ..base import (TraitedSpec, CommandLineInputSpec, CommandLine,\n File, Directory, traits, isdefined)\nimport os\n\n\nclass ICA_AROMAInputSpec(CommandLineInputSpec):\n feat_dir = Directory(exists=True, mandatory=True,\n argstr='-feat %s',\n xor=['in_file', 'mat_file', 'fnirt_warp_file', 'motion_parameters'],\n desc='If a feat directory exists and temporal filtering '\n 'has not been run yet, ICA_AROMA can use the files in '\n 'this directory.')\n in_file = File(exists=True, mandatory=True,\n argstr='-i %s', xor=['feat_dir'],\n desc='volume to be denoised')\n out_dir = Directory('out', genfile=True,\n argstr='-o %s',\n desc='output directory')\n mask = File(exists=True, argstr='-m %s', xor=['feat_dir'],\n desc='path/name volume mask')\n dim = traits.Int(argstr='-dim %d',\n desc='Dimensionality reduction when running '\n 'MELODIC (defualt is automatic estimation)')\n TR = traits.Float(argstr='-tr %.3f',\n desc='TR in seconds. If this is not specified '\n 'the TR will be extracted from the '\n 'header of the fMRI nifti file.')\n melodic_dir = Directory(exists=True, argstr='-meldir %s',\n desc='path to MELODIC directory if MELODIC has already been run')\n mat_file = File(exists=True, argstr='-affmat %s', xor=['feat_dir'],\n desc='path/name of the mat-file describing the '\n 'affine registration (e.g. FSL FLIRT) of the '\n 'functional data to structural space (.mat file)')\n fnirt_warp_file = File(exists=True, argstr='-warp %s', xor=['feat_dir'],\n desc='File name of the warp-file describing '\n 'the non-linear registration (e.g. FSL FNIRT) '\n 'of the structural data to MNI152 space (.nii.gz)')\n motion_parameters = File(exists=True, mandatory=True,\n argstr='-mc %s', xor=['feat_dir'],\n desc='motion parameters file')\n denoise_type = traits.Enum('nonaggr', 'aggr', 'both', 'no', usedefault=True,\n mandatory=True, argstr='-den %s',\n desc='Type of denoising strategy:\\n'\n '-none: only classification, no denoising\\n'\n '-nonaggr (default): non-aggresssive denoising, i.e. partial component regression\\n'\n '-aggr: aggressive denoising, i.e. full component regression\\n'\n '-both: both aggressive and non-aggressive denoising (two outputs)')\n\nclass ICA_AROMAOutputSpec(TraitedSpec):\n aggr_denoised_file = File(exists=True,\n desc='if generated: aggressively denoised volume')\n nonaggr_denoised_file = File(exists=True,\n desc='if generated: non aggressively denoised volume' )\n out_dir = Directory(exists=True,\n desc='directory contains (in addition to the denoised files): '\n 'melodic.ica + classified_motion_components + '\n 'classification_overview + feature_scores + melodic_ic_mni)')\n\nclass ICA_AROMA(CommandLine):\n \"\"\"\n Interface for the ICA_AROMA.py script.\n\n ICA-AROMA (i.e. 'ICA-based Automatic Removal Of Motion Artifacts') concerns\n a data-driven method to identify and remove motion-related independent\n components from fMRI data. To that end it exploits a small, but robust\n set of theoretically motivated features, preventing the need for classifier\n re-training and therefore providing direct and easy applicability.\n\n See link for further documentation: https://github.com/rhr-pruim/ICA-AROMA\n\n Example\n -------\n\n >>> from nipype.interfaces.fsl import ICA_AROMA\n >>> from nipype.testing import example_data\n >>> AROMA_obj = ICA_AROMA.ICA_AROMA()\n >>> AROMA_obj.inputs.in_file = 'functional.nii'\n >>> AROMA_obj.inputs.mat_file = 'func_to_struct.mat'\n >>> AROMA_obj.inputs.fnirt_warp_file = 'warpfield.nii'\n >>> AROMA_obj.inputs.motion_parameters = 'fsl_mcflirt_movpar.txt'\n >>> AROMA_obj.inputs.mask = 'mask.nii.gz'\n >>> AROMA_obj.inputs.denoise_type = 'both'\n >>> AROMA_obj.inputs.out_dir = 'ICA_testout'\n >>> AROMA_obj.cmdline # doctest: +ALLOW_UNICODE\n 'ICA_AROMA.py -den both -warp warpfield.nii -i functional.nii -m mask.nii.gz -affmat func_to_struct.mat -mc fsl_mcflirt_movpar.txt -o ICA_testout'\n \"\"\"\n _cmd = 'ICA_AROMA.py'\n input_spec = ICA_AROMAInputSpec\n output_spec = ICA_AROMAOutputSpec\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n if isdefined(self.inputs.out_dir):\n outputs['out_dir'] = os.path.abspath(self.inputs.out_dir)\n else:\n outputs['out_dir'] = self._gen_filename('out_dir')\n out_dir = outputs['out_dir']\n\n if self.inputs.denoise_type in ('aggr', 'both'):\n outputs['aggr_denoised_file'] = os.path.join(out_dir, 'denoised_func_data_aggr.nii.gz')\n if self.inputs.denoise_type in ('nonaggr', 'both'):\n outputs['nonaggr_denoised_file'] = os.path.join(out_dir, 'denoised_func_data_nonaggr.nii.gz')\n return outputs\n\n def _gen_filename(self, name):\n if name == 'out_dir':\n return os.getcwd()\n", "path": "nipype/interfaces/fsl/ICA_AROMA.py"}]}
| 2,064 | 270 |
gh_patches_debug_18556
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-2762
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImageMagick applies maxwidth to longest edge instead of to width
The [current invocation](https://github.com/beetbox/beets/blob/2120cf68c61649c22c14f20d83bd28d758720557/beets/util/artresizer.py#L96-L100) of ImageMagick applies the ```maxwidth``` parameter of ```fetchart``` to an image's longer edge instead of to its width. Possible solution suggested by Adrian [on the forum](https://discourse.beets.io/t/fetchart-taming-cover-art-resolution/206/12?u=dorade).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beets/util/artresizer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Fabrice Laporte
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Abstraction layer to resize images using PIL, ImageMagick, or a
17 public resizing proxy if neither is available.
18 """
19 from __future__ import division, absolute_import, print_function
20
21 import subprocess
22 import os
23 import re
24 from tempfile import NamedTemporaryFile
25 from six.moves.urllib.parse import urlencode
26 from beets import logging
27 from beets import util
28 import six
29
30 # Resizing methods
31 PIL = 1
32 IMAGEMAGICK = 2
33 WEBPROXY = 3
34
35 if util.SNI_SUPPORTED:
36 PROXY_URL = 'https://images.weserv.nl/'
37 else:
38 PROXY_URL = 'http://images.weserv.nl/'
39
40 log = logging.getLogger('beets')
41
42
43 def resize_url(url, maxwidth):
44 """Return a proxied image URL that resizes the original image to
45 maxwidth (preserving aspect ratio).
46 """
47 return '{0}?{1}'.format(PROXY_URL, urlencode({
48 'url': url.replace('http://', ''),
49 'w': maxwidth,
50 }))
51
52
53 def temp_file_for(path):
54 """Return an unused filename with the same extension as the
55 specified path.
56 """
57 ext = os.path.splitext(path)[1]
58 with NamedTemporaryFile(suffix=util.py3_path(ext), delete=False) as f:
59 return util.bytestring_path(f.name)
60
61
62 def pil_resize(maxwidth, path_in, path_out=None):
63 """Resize using Python Imaging Library (PIL). Return the output path
64 of resized image.
65 """
66 path_out = path_out or temp_file_for(path_in)
67 from PIL import Image
68 log.debug(u'artresizer: PIL resizing {0} to {1}',
69 util.displayable_path(path_in), util.displayable_path(path_out))
70
71 try:
72 im = Image.open(util.syspath(path_in))
73 size = maxwidth, maxwidth
74 im.thumbnail(size, Image.ANTIALIAS)
75 im.save(path_out)
76 return path_out
77 except IOError:
78 log.error(u"PIL cannot create thumbnail for '{0}'",
79 util.displayable_path(path_in))
80 return path_in
81
82
83 def im_resize(maxwidth, path_in, path_out=None):
84 """Resize using ImageMagick's ``convert`` tool.
85 Return the output path of resized image.
86 """
87 path_out = path_out or temp_file_for(path_in)
88 log.debug(u'artresizer: ImageMagick resizing {0} to {1}',
89 util.displayable_path(path_in), util.displayable_path(path_out))
90
91 # "-resize widthxheight>" shrinks images with dimension(s) larger
92 # than the corresponding width and/or height dimension(s). The >
93 # "only shrink" flag is prefixed by ^ escape char for Windows
94 # compatibility.
95 try:
96 util.command_output([
97 'convert', util.syspath(path_in, prefix=False),
98 '-resize', '{0}x^>'.format(maxwidth),
99 util.syspath(path_out, prefix=False),
100 ])
101 except subprocess.CalledProcessError:
102 log.warning(u'artresizer: IM convert failed for {0}',
103 util.displayable_path(path_in))
104 return path_in
105 return path_out
106
107
108 BACKEND_FUNCS = {
109 PIL: pil_resize,
110 IMAGEMAGICK: im_resize,
111 }
112
113
114 def pil_getsize(path_in):
115 from PIL import Image
116 try:
117 im = Image.open(util.syspath(path_in))
118 return im.size
119 except IOError as exc:
120 log.error(u"PIL could not read file {}: {}",
121 util.displayable_path(path_in), exc)
122
123
124 def im_getsize(path_in):
125 cmd = ['identify', '-format', '%w %h',
126 util.syspath(path_in, prefix=False)]
127 try:
128 out = util.command_output(cmd)
129 except subprocess.CalledProcessError as exc:
130 log.warning(u'ImageMagick size query failed')
131 log.debug(
132 u'`convert` exited with (status {}) when '
133 u'getting size with command {}:\n{}',
134 exc.returncode, cmd, exc.output.strip()
135 )
136 return
137 try:
138 return tuple(map(int, out.split(b' ')))
139 except IndexError:
140 log.warning(u'Could not understand IM output: {0!r}', out)
141
142
143 BACKEND_GET_SIZE = {
144 PIL: pil_getsize,
145 IMAGEMAGICK: im_getsize,
146 }
147
148
149 class Shareable(type):
150 """A pseudo-singleton metaclass that allows both shared and
151 non-shared instances. The ``MyClass.shared`` property holds a
152 lazily-created shared instance of ``MyClass`` while calling
153 ``MyClass()`` to construct a new object works as usual.
154 """
155 def __init__(self, name, bases, dict):
156 super(Shareable, self).__init__(name, bases, dict)
157 self._instance = None
158
159 @property
160 def shared(self):
161 if self._instance is None:
162 self._instance = self()
163 return self._instance
164
165
166 class ArtResizer(six.with_metaclass(Shareable, object)):
167 """A singleton class that performs image resizes.
168 """
169
170 def __init__(self):
171 """Create a resizer object with an inferred method.
172 """
173 self.method = self._check_method()
174 log.debug(u"artresizer: method is {0}", self.method)
175 self.can_compare = self._can_compare()
176
177 def resize(self, maxwidth, path_in, path_out=None):
178 """Manipulate an image file according to the method, returning a
179 new path. For PIL or IMAGEMAGIC methods, resizes the image to a
180 temporary file. For WEBPROXY, returns `path_in` unmodified.
181 """
182 if self.local:
183 func = BACKEND_FUNCS[self.method[0]]
184 return func(maxwidth, path_in, path_out)
185 else:
186 return path_in
187
188 def proxy_url(self, maxwidth, url):
189 """Modifies an image URL according the method, returning a new
190 URL. For WEBPROXY, a URL on the proxy server is returned.
191 Otherwise, the URL is returned unmodified.
192 """
193 if self.local:
194 return url
195 else:
196 return resize_url(url, maxwidth)
197
198 @property
199 def local(self):
200 """A boolean indicating whether the resizing method is performed
201 locally (i.e., PIL or ImageMagick).
202 """
203 return self.method[0] in BACKEND_FUNCS
204
205 def get_size(self, path_in):
206 """Return the size of an image file as an int couple (width, height)
207 in pixels.
208
209 Only available locally
210 """
211 if self.local:
212 func = BACKEND_GET_SIZE[self.method[0]]
213 return func(path_in)
214
215 def _can_compare(self):
216 """A boolean indicating whether image comparison is available"""
217
218 return self.method[0] == IMAGEMAGICK and self.method[1] > (6, 8, 7)
219
220 @staticmethod
221 def _check_method():
222 """Return a tuple indicating an available method and its version."""
223 version = get_im_version()
224 if version:
225 return IMAGEMAGICK, version
226
227 version = get_pil_version()
228 if version:
229 return PIL, version
230
231 return WEBPROXY, (0)
232
233
234 def get_im_version():
235 """Return Image Magick version or None if it is unavailable
236 Try invoking ImageMagick's "convert".
237 """
238 try:
239 out = util.command_output(['convert', '--version'])
240
241 if b'imagemagick' in out.lower():
242 pattern = br".+ (\d+)\.(\d+)\.(\d+).*"
243 match = re.search(pattern, out)
244 if match:
245 return (int(match.group(1)),
246 int(match.group(2)),
247 int(match.group(3)))
248 return (0,)
249
250 except (subprocess.CalledProcessError, OSError) as exc:
251 log.debug(u'ImageMagick check `convert --version` failed: {}', exc)
252 return None
253
254
255 def get_pil_version():
256 """Return Image Magick version or None if it is unavailable
257 Try importing PIL."""
258 try:
259 __import__('PIL', fromlist=[str('Image')])
260 return (0,)
261 except ImportError:
262 return None
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/beets/util/artresizer.py b/beets/util/artresizer.py
--- a/beets/util/artresizer.py
+++ b/beets/util/artresizer.py
@@ -88,14 +88,13 @@
log.debug(u'artresizer: ImageMagick resizing {0} to {1}',
util.displayable_path(path_in), util.displayable_path(path_out))
- # "-resize widthxheight>" shrinks images with dimension(s) larger
- # than the corresponding width and/or height dimension(s). The >
- # "only shrink" flag is prefixed by ^ escape char for Windows
- # compatibility.
+ # "-resize WIDTHx>" shrinks images with the width larger
+ # than the given width while maintaining the aspect ratio
+ # with regards to the height.
try:
util.command_output([
'convert', util.syspath(path_in, prefix=False),
- '-resize', '{0}x^>'.format(maxwidth),
+ '-resize', '{0}x>'.format(maxwidth),
util.syspath(path_out, prefix=False),
])
except subprocess.CalledProcessError:
|
{"golden_diff": "diff --git a/beets/util/artresizer.py b/beets/util/artresizer.py\n--- a/beets/util/artresizer.py\n+++ b/beets/util/artresizer.py\n@@ -88,14 +88,13 @@\n log.debug(u'artresizer: ImageMagick resizing {0} to {1}',\n util.displayable_path(path_in), util.displayable_path(path_out))\n \n- # \"-resize widthxheight>\" shrinks images with dimension(s) larger\n- # than the corresponding width and/or height dimension(s). The >\n- # \"only shrink\" flag is prefixed by ^ escape char for Windows\n- # compatibility.\n+ # \"-resize WIDTHx>\" shrinks images with the width larger\n+ # than the given width while maintaining the aspect ratio\n+ # with regards to the height.\n try:\n util.command_output([\n 'convert', util.syspath(path_in, prefix=False),\n- '-resize', '{0}x^>'.format(maxwidth),\n+ '-resize', '{0}x>'.format(maxwidth),\n util.syspath(path_out, prefix=False),\n ])\n except subprocess.CalledProcessError:\n", "issue": "ImageMagick applies maxwidth to longest edge instead of to width\nThe [current invocation](https://github.com/beetbox/beets/blob/2120cf68c61649c22c14f20d83bd28d758720557/beets/util/artresizer.py#L96-L100) of ImageMagick applies the ```maxwidth``` parameter of ```fetchart``` to an image's longer edge instead of to its width. Possible solution suggested by Adrian [on the forum](https://discourse.beets.io/t/fetchart-taming-cover-art-resolution/206/12?u=dorade).\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Fabrice Laporte\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Abstraction layer to resize images using PIL, ImageMagick, or a\npublic resizing proxy if neither is available.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport subprocess\nimport os\nimport re\nfrom tempfile import NamedTemporaryFile\nfrom six.moves.urllib.parse import urlencode\nfrom beets import logging\nfrom beets import util\nimport six\n\n# Resizing methods\nPIL = 1\nIMAGEMAGICK = 2\nWEBPROXY = 3\n\nif util.SNI_SUPPORTED:\n PROXY_URL = 'https://images.weserv.nl/'\nelse:\n PROXY_URL = 'http://images.weserv.nl/'\n\nlog = logging.getLogger('beets')\n\n\ndef resize_url(url, maxwidth):\n \"\"\"Return a proxied image URL that resizes the original image to\n maxwidth (preserving aspect ratio).\n \"\"\"\n return '{0}?{1}'.format(PROXY_URL, urlencode({\n 'url': url.replace('http://', ''),\n 'w': maxwidth,\n }))\n\n\ndef temp_file_for(path):\n \"\"\"Return an unused filename with the same extension as the\n specified path.\n \"\"\"\n ext = os.path.splitext(path)[1]\n with NamedTemporaryFile(suffix=util.py3_path(ext), delete=False) as f:\n return util.bytestring_path(f.name)\n\n\ndef pil_resize(maxwidth, path_in, path_out=None):\n \"\"\"Resize using Python Imaging Library (PIL). Return the output path\n of resized image.\n \"\"\"\n path_out = path_out or temp_file_for(path_in)\n from PIL import Image\n log.debug(u'artresizer: PIL resizing {0} to {1}',\n util.displayable_path(path_in), util.displayable_path(path_out))\n\n try:\n im = Image.open(util.syspath(path_in))\n size = maxwidth, maxwidth\n im.thumbnail(size, Image.ANTIALIAS)\n im.save(path_out)\n return path_out\n except IOError:\n log.error(u\"PIL cannot create thumbnail for '{0}'\",\n util.displayable_path(path_in))\n return path_in\n\n\ndef im_resize(maxwidth, path_in, path_out=None):\n \"\"\"Resize using ImageMagick's ``convert`` tool.\n Return the output path of resized image.\n \"\"\"\n path_out = path_out or temp_file_for(path_in)\n log.debug(u'artresizer: ImageMagick resizing {0} to {1}',\n util.displayable_path(path_in), util.displayable_path(path_out))\n\n # \"-resize widthxheight>\" shrinks images with dimension(s) larger\n # than the corresponding width and/or height dimension(s). The >\n # \"only shrink\" flag is prefixed by ^ escape char for Windows\n # compatibility.\n try:\n util.command_output([\n 'convert', util.syspath(path_in, prefix=False),\n '-resize', '{0}x^>'.format(maxwidth),\n util.syspath(path_out, prefix=False),\n ])\n except subprocess.CalledProcessError:\n log.warning(u'artresizer: IM convert failed for {0}',\n util.displayable_path(path_in))\n return path_in\n return path_out\n\n\nBACKEND_FUNCS = {\n PIL: pil_resize,\n IMAGEMAGICK: im_resize,\n}\n\n\ndef pil_getsize(path_in):\n from PIL import Image\n try:\n im = Image.open(util.syspath(path_in))\n return im.size\n except IOError as exc:\n log.error(u\"PIL could not read file {}: {}\",\n util.displayable_path(path_in), exc)\n\n\ndef im_getsize(path_in):\n cmd = ['identify', '-format', '%w %h',\n util.syspath(path_in, prefix=False)]\n try:\n out = util.command_output(cmd)\n except subprocess.CalledProcessError as exc:\n log.warning(u'ImageMagick size query failed')\n log.debug(\n u'`convert` exited with (status {}) when '\n u'getting size with command {}:\\n{}',\n exc.returncode, cmd, exc.output.strip()\n )\n return\n try:\n return tuple(map(int, out.split(b' ')))\n except IndexError:\n log.warning(u'Could not understand IM output: {0!r}', out)\n\n\nBACKEND_GET_SIZE = {\n PIL: pil_getsize,\n IMAGEMAGICK: im_getsize,\n}\n\n\nclass Shareable(type):\n \"\"\"A pseudo-singleton metaclass that allows both shared and\n non-shared instances. The ``MyClass.shared`` property holds a\n lazily-created shared instance of ``MyClass`` while calling\n ``MyClass()`` to construct a new object works as usual.\n \"\"\"\n def __init__(self, name, bases, dict):\n super(Shareable, self).__init__(name, bases, dict)\n self._instance = None\n\n @property\n def shared(self):\n if self._instance is None:\n self._instance = self()\n return self._instance\n\n\nclass ArtResizer(six.with_metaclass(Shareable, object)):\n \"\"\"A singleton class that performs image resizes.\n \"\"\"\n\n def __init__(self):\n \"\"\"Create a resizer object with an inferred method.\n \"\"\"\n self.method = self._check_method()\n log.debug(u\"artresizer: method is {0}\", self.method)\n self.can_compare = self._can_compare()\n\n def resize(self, maxwidth, path_in, path_out=None):\n \"\"\"Manipulate an image file according to the method, returning a\n new path. For PIL or IMAGEMAGIC methods, resizes the image to a\n temporary file. For WEBPROXY, returns `path_in` unmodified.\n \"\"\"\n if self.local:\n func = BACKEND_FUNCS[self.method[0]]\n return func(maxwidth, path_in, path_out)\n else:\n return path_in\n\n def proxy_url(self, maxwidth, url):\n \"\"\"Modifies an image URL according the method, returning a new\n URL. For WEBPROXY, a URL on the proxy server is returned.\n Otherwise, the URL is returned unmodified.\n \"\"\"\n if self.local:\n return url\n else:\n return resize_url(url, maxwidth)\n\n @property\n def local(self):\n \"\"\"A boolean indicating whether the resizing method is performed\n locally (i.e., PIL or ImageMagick).\n \"\"\"\n return self.method[0] in BACKEND_FUNCS\n\n def get_size(self, path_in):\n \"\"\"Return the size of an image file as an int couple (width, height)\n in pixels.\n\n Only available locally\n \"\"\"\n if self.local:\n func = BACKEND_GET_SIZE[self.method[0]]\n return func(path_in)\n\n def _can_compare(self):\n \"\"\"A boolean indicating whether image comparison is available\"\"\"\n\n return self.method[0] == IMAGEMAGICK and self.method[1] > (6, 8, 7)\n\n @staticmethod\n def _check_method():\n \"\"\"Return a tuple indicating an available method and its version.\"\"\"\n version = get_im_version()\n if version:\n return IMAGEMAGICK, version\n\n version = get_pil_version()\n if version:\n return PIL, version\n\n return WEBPROXY, (0)\n\n\ndef get_im_version():\n \"\"\"Return Image Magick version or None if it is unavailable\n Try invoking ImageMagick's \"convert\".\n \"\"\"\n try:\n out = util.command_output(['convert', '--version'])\n\n if b'imagemagick' in out.lower():\n pattern = br\".+ (\\d+)\\.(\\d+)\\.(\\d+).*\"\n match = re.search(pattern, out)\n if match:\n return (int(match.group(1)),\n int(match.group(2)),\n int(match.group(3)))\n return (0,)\n\n except (subprocess.CalledProcessError, OSError) as exc:\n log.debug(u'ImageMagick check `convert --version` failed: {}', exc)\n return None\n\n\ndef get_pil_version():\n \"\"\"Return Image Magick version or None if it is unavailable\n Try importing PIL.\"\"\"\n try:\n __import__('PIL', fromlist=[str('Image')])\n return (0,)\n except ImportError:\n return None\n", "path": "beets/util/artresizer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Fabrice Laporte\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Abstraction layer to resize images using PIL, ImageMagick, or a\npublic resizing proxy if neither is available.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport subprocess\nimport os\nimport re\nfrom tempfile import NamedTemporaryFile\nfrom six.moves.urllib.parse import urlencode\nfrom beets import logging\nfrom beets import util\nimport six\n\n# Resizing methods\nPIL = 1\nIMAGEMAGICK = 2\nWEBPROXY = 3\n\nif util.SNI_SUPPORTED:\n PROXY_URL = 'https://images.weserv.nl/'\nelse:\n PROXY_URL = 'http://images.weserv.nl/'\n\nlog = logging.getLogger('beets')\n\n\ndef resize_url(url, maxwidth):\n \"\"\"Return a proxied image URL that resizes the original image to\n maxwidth (preserving aspect ratio).\n \"\"\"\n return '{0}?{1}'.format(PROXY_URL, urlencode({\n 'url': url.replace('http://', ''),\n 'w': maxwidth,\n }))\n\n\ndef temp_file_for(path):\n \"\"\"Return an unused filename with the same extension as the\n specified path.\n \"\"\"\n ext = os.path.splitext(path)[1]\n with NamedTemporaryFile(suffix=util.py3_path(ext), delete=False) as f:\n return util.bytestring_path(f.name)\n\n\ndef pil_resize(maxwidth, path_in, path_out=None):\n \"\"\"Resize using Python Imaging Library (PIL). Return the output path\n of resized image.\n \"\"\"\n path_out = path_out or temp_file_for(path_in)\n from PIL import Image\n log.debug(u'artresizer: PIL resizing {0} to {1}',\n util.displayable_path(path_in), util.displayable_path(path_out))\n\n try:\n im = Image.open(util.syspath(path_in))\n size = maxwidth, maxwidth\n im.thumbnail(size, Image.ANTIALIAS)\n im.save(path_out)\n return path_out\n except IOError:\n log.error(u\"PIL cannot create thumbnail for '{0}'\",\n util.displayable_path(path_in))\n return path_in\n\n\ndef im_resize(maxwidth, path_in, path_out=None):\n \"\"\"Resize using ImageMagick's ``convert`` tool.\n Return the output path of resized image.\n \"\"\"\n path_out = path_out or temp_file_for(path_in)\n log.debug(u'artresizer: ImageMagick resizing {0} to {1}',\n util.displayable_path(path_in), util.displayable_path(path_out))\n\n # \"-resize WIDTHx>\" shrinks images with the width larger\n # than the given width while maintaining the aspect ratio\n # with regards to the height.\n try:\n util.command_output([\n 'convert', util.syspath(path_in, prefix=False),\n '-resize', '{0}x>'.format(maxwidth),\n util.syspath(path_out, prefix=False),\n ])\n except subprocess.CalledProcessError:\n log.warning(u'artresizer: IM convert failed for {0}',\n util.displayable_path(path_in))\n return path_in\n return path_out\n\n\nBACKEND_FUNCS = {\n PIL: pil_resize,\n IMAGEMAGICK: im_resize,\n}\n\n\ndef pil_getsize(path_in):\n from PIL import Image\n try:\n im = Image.open(util.syspath(path_in))\n return im.size\n except IOError as exc:\n log.error(u\"PIL could not read file {}: {}\",\n util.displayable_path(path_in), exc)\n\n\ndef im_getsize(path_in):\n cmd = ['identify', '-format', '%w %h',\n util.syspath(path_in, prefix=False)]\n try:\n out = util.command_output(cmd)\n except subprocess.CalledProcessError as exc:\n log.warning(u'ImageMagick size query failed')\n log.debug(\n u'`convert` exited with (status {}) when '\n u'getting size with command {}:\\n{}',\n exc.returncode, cmd, exc.output.strip()\n )\n return\n try:\n return tuple(map(int, out.split(b' ')))\n except IndexError:\n log.warning(u'Could not understand IM output: {0!r}', out)\n\n\nBACKEND_GET_SIZE = {\n PIL: pil_getsize,\n IMAGEMAGICK: im_getsize,\n}\n\n\nclass Shareable(type):\n \"\"\"A pseudo-singleton metaclass that allows both shared and\n non-shared instances. The ``MyClass.shared`` property holds a\n lazily-created shared instance of ``MyClass`` while calling\n ``MyClass()`` to construct a new object works as usual.\n \"\"\"\n def __init__(self, name, bases, dict):\n super(Shareable, self).__init__(name, bases, dict)\n self._instance = None\n\n @property\n def shared(self):\n if self._instance is None:\n self._instance = self()\n return self._instance\n\n\nclass ArtResizer(six.with_metaclass(Shareable, object)):\n \"\"\"A singleton class that performs image resizes.\n \"\"\"\n\n def __init__(self):\n \"\"\"Create a resizer object with an inferred method.\n \"\"\"\n self.method = self._check_method()\n log.debug(u\"artresizer: method is {0}\", self.method)\n self.can_compare = self._can_compare()\n\n def resize(self, maxwidth, path_in, path_out=None):\n \"\"\"Manipulate an image file according to the method, returning a\n new path. For PIL or IMAGEMAGIC methods, resizes the image to a\n temporary file. For WEBPROXY, returns `path_in` unmodified.\n \"\"\"\n if self.local:\n func = BACKEND_FUNCS[self.method[0]]\n return func(maxwidth, path_in, path_out)\n else:\n return path_in\n\n def proxy_url(self, maxwidth, url):\n \"\"\"Modifies an image URL according the method, returning a new\n URL. For WEBPROXY, a URL on the proxy server is returned.\n Otherwise, the URL is returned unmodified.\n \"\"\"\n if self.local:\n return url\n else:\n return resize_url(url, maxwidth)\n\n @property\n def local(self):\n \"\"\"A boolean indicating whether the resizing method is performed\n locally (i.e., PIL or ImageMagick).\n \"\"\"\n return self.method[0] in BACKEND_FUNCS\n\n def get_size(self, path_in):\n \"\"\"Return the size of an image file as an int couple (width, height)\n in pixels.\n\n Only available locally\n \"\"\"\n if self.local:\n func = BACKEND_GET_SIZE[self.method[0]]\n return func(path_in)\n\n def _can_compare(self):\n \"\"\"A boolean indicating whether image comparison is available\"\"\"\n\n return self.method[0] == IMAGEMAGICK and self.method[1] > (6, 8, 7)\n\n @staticmethod\n def _check_method():\n \"\"\"Return a tuple indicating an available method and its version.\"\"\"\n version = get_im_version()\n if version:\n return IMAGEMAGICK, version\n\n version = get_pil_version()\n if version:\n return PIL, version\n\n return WEBPROXY, (0)\n\n\ndef get_im_version():\n \"\"\"Return Image Magick version or None if it is unavailable\n Try invoking ImageMagick's \"convert\".\n \"\"\"\n try:\n out = util.command_output(['convert', '--version'])\n\n if b'imagemagick' in out.lower():\n pattern = br\".+ (\\d+)\\.(\\d+)\\.(\\d+).*\"\n match = re.search(pattern, out)\n if match:\n return (int(match.group(1)),\n int(match.group(2)),\n int(match.group(3)))\n return (0,)\n\n except (subprocess.CalledProcessError, OSError) as exc:\n log.debug(u'ImageMagick check `convert --version` failed: {}', exc)\n return None\n\n\ndef get_pil_version():\n \"\"\"Return Image Magick version or None if it is unavailable\n Try importing PIL.\"\"\"\n try:\n __import__('PIL', fromlist=[str('Image')])\n return (0,)\n except ImportError:\n return None\n", "path": "beets/util/artresizer.py"}]}
| 3,055 | 253 |
gh_patches_debug_9737
|
rasdani/github-patches
|
git_diff
|
unionai-oss__pandera-1190
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove incorrect(?) warning for `register_check_method()` in docs
#### Location of the documentation
https://pandera.readthedocs.io/en/latest/reference/generated/pandera.extensions.html
#### Documentation problem
It's documented for `register_check_method()` that
> **Warning**
> This is the legacy method for registering check methods. Use the `register_check()` decorator instead.
I can't see any reference to `register_check()` in the docs or the repo, so I assume this is an out-dated warning and `register_check_method()` is infact the defacto function for this kind of stuff.
Might I be missing something? Maybe this warning is supposed to refer to another function.
#### Suggested fix for documentation
Remove warning as it's seemingly not warranted (at least in this moment).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandera/api/extensions.py`
Content:
```
1 """Extensions module."""
2
3 import inspect
4 import warnings
5 from enum import Enum
6 from functools import partial, wraps
7 from inspect import signature
8 from typing import Callable, List, Optional, Tuple, Type, Union
9
10 import pandas as pd
11 import typing_inspect
12
13 from pandera.api.checks import Check
14 from pandera.api.hypotheses import Hypothesis
15 from pandera.strategies.base_strategies import STRATEGY_DISPATCHER
16
17
18 class BuiltinCheckRegistrationError(Exception):
19 """
20 Exception raised when registering a built-in check implementation but the
21 default check function implementation hasn't been registered with
22 :py:meth:`~flytekit.core.base.BaseCheck.register_builtin_check_fn`.
23 """
24
25
26 # pylint: disable=too-many-locals
27 def register_builtin_check(
28 fn=None,
29 strategy: Optional[Callable] = None,
30 _check_cls: Type = Check,
31 **outer_kwargs,
32 ):
33 """Register a check method to the Check namespace.
34
35 This is the primary way for extending the Check api to define additional
36 built-in checks.
37 """
38
39 if fn is None:
40 return partial(
41 register_builtin_check,
42 strategy=strategy,
43 _check_cls=_check_cls,
44 **outer_kwargs,
45 )
46
47 name = fn.__name__
48
49 # see if the check function is already registered
50 check_fn = _check_cls.CHECK_FUNCTION_REGISTRY.get(name)
51 fn_sig = signature(fn)
52
53 # register the check strategy for this particular check, identified
54 # by the check `name`, and the data type of the check function. This
55 # supports Union types. Also assume that the data type of the data
56 # object to validate is the first argument.
57 data_type = [*fn_sig.parameters.values()][0].annotation
58
59 if typing_inspect.get_origin(data_type) is Tuple:
60 data_type, *_ = typing_inspect.get_args(data_type)
61
62 if typing_inspect.get_origin(data_type) is Union:
63 data_types = typing_inspect.get_args(data_type)
64 else:
65 data_types = (data_type,)
66
67 if strategy is not None:
68 for dt in data_types:
69 STRATEGY_DISPATCHER[(name, dt)] = strategy
70
71 if check_fn is None: # pragma: no cover
72 raise BuiltinCheckRegistrationError(
73 f"Check '{name}' doesn't have a base check implementation. "
74 f"You need to create a stub method in the {_check_cls} class and "
75 "then register a base check function implementation with the "
76 f"{_check_cls}.register_builtin_check_fn method.\n"
77 "See the `pandera.api.base.builtin_checks` and "
78 "`pandera.backends.pandas.builtin_checks` modules as an example."
79 )
80
81 check_fn.register(fn) # type: ignore
82
83 return fn
84
85
86 def register_builtin_hypothesis(**kwargs):
87 """Register a new hypothesis."""
88 return partial(
89 register_builtin_check,
90 _check_cls=Hypothesis,
91 **kwargs,
92 )
93
94
95 # --------------------------------
96 # CUSTOM CHECK REGISTRATION METHOD
97 # --------------------------------
98 #
99 # The `register_check_method` decorator is the legacy method for registering
100 # custom checks and will slated for deprecation after merging the core
101 # internals overhaul.
102
103
104 class CheckType(Enum):
105 """Check types for registered check methods."""
106
107 VECTORIZED = 1 #: Check applied to a Series or DataFrame
108 ELEMENT_WISE = 2 #: Check applied to an element of a Series or DataFrame
109 GROUPBY = 3 #: Check applied to dictionary of Series or DataFrames.
110
111
112 def register_check_statistics(statistics_args):
113 """Decorator to set statistics based on Check method."""
114
115 def register_check_statistics_decorator(class_method):
116 @wraps(class_method)
117 def _wrapper(cls, *args, **kwargs):
118 args = list(args)
119 arg_names = inspect.getfullargspec(class_method).args[1:]
120 if not arg_names:
121 arg_names = statistics_args
122 args_dict = {**dict(zip(arg_names, args)), **kwargs}
123 check = class_method(cls, *args, **kwargs)
124 check.statistics = {
125 stat: args_dict.get(stat) for stat in statistics_args
126 }
127 check.statistics_args = statistics_args
128 return check
129
130 return _wrapper
131
132 return register_check_statistics_decorator
133
134
135 def register_check_method(
136 check_fn=None,
137 *,
138 statistics: Optional[List[str]] = None,
139 supported_types: Union[type, Tuple, List] = (pd.DataFrame, pd.Series),
140 check_type: Union[CheckType, str] = "vectorized",
141 strategy=None,
142 ):
143 """Registers a function as a :class:`~pandera.api.checks.Check` method.
144
145 See the :ref:`user guide<extensions>` for more details.
146
147 .. warning::
148
149 This is the legacy method for registering check methods. Use the
150 :py:func:`~pandera.api.extensions.register_check` decorator instead.
151
152 :param check_fn: check function to register. The function should take one
153 positional argument for the object to validate and additional
154 keyword-only arguments for the check statistics.
155 :param statistics: list of keyword-only arguments in the ``check_fn``,
156 which serve as the statistics needed to serialize/de-serialize the
157 check and generate data if a ``strategy`` function is provided.
158 :param supported_types: the pandas type(s) supported by the check function.
159 Valid values are ``pd.DataFrame``, ``pd.Series``, or a list/tuple of
160 ``(pa.DataFrame, pa.Series)`` if both types are supported.
161 :param check_type: the expected input of the check function. Valid values
162 are :class:`~pandera.extensions.CheckType` enums or
163 ``{"vectorized", "element_wise", "groupby"}``. The input signature of
164 ``check_fn`` is determined by this argument:
165
166 - if ``vectorized``, the first positional argument of ``check_fn``
167 should be one of the ``supported_types``.
168 - if ``element_wise``, the first positional argument of ``check_fn``
169 should be a single scalar element in the pandas Series or DataFrame.
170 - if ``groupby``, the first positional argument of ``check_fn`` should
171 be a dictionary mapping group names to subsets of the Series or
172 DataFrame.
173
174 :param strategy: data-generation strategy associated with the check
175 function.
176 :return: register check function wrapper.
177 """
178
179 # pylint: disable=import-outside-toplevel
180 from pandera.strategies.pandas_strategies import register_check_strategy
181
182 if statistics is None:
183 statistics = []
184
185 if isinstance(check_type, str):
186 check_type = CheckType[check_type.upper()]
187
188 msg = (
189 "{} is not a valid input type for check_fn. You must specify one of "
190 "pandas.DataFrame, pandas.Series, or a tuple of both."
191 )
192 if isinstance(supported_types, list):
193 supported_types = tuple(supported_types)
194 elif not isinstance(supported_types, tuple):
195 supported_types = (supported_types,)
196
197 for supported_type in supported_types: # type: ignore
198 if supported_type not in {pd.DataFrame, pd.Series}:
199 raise TypeError(msg.format(supported_type))
200
201 if check_type is CheckType.ELEMENT_WISE and set(supported_types) != {
202 pd.DataFrame,
203 pd.Series,
204 }: # type: ignore
205 raise ValueError(
206 "Element-wise checks should support DataFrame and Series "
207 "validation. Use the default setting for the 'supported_types' "
208 "argument."
209 )
210
211 if check_fn is None:
212 return partial(
213 register_check_method,
214 statistics=statistics,
215 supported_types=supported_types,
216 check_type=check_type,
217 strategy=strategy,
218 )
219 else:
220 sig = signature(check_fn)
221 for statistic in statistics:
222 if statistic not in sig.parameters:
223 raise TypeError(
224 f"statistic '{statistic}' is not part of "
225 f"{check_fn.__name__}'s signature."
226 )
227
228 def register_check_wrapper(check_fn: Callable):
229 """Register a function as a :class:`~pandera.api.checks.Check` method."""
230
231 if hasattr(Check, check_fn.__name__):
232 raise ValueError(
233 f"method with name '{check_fn.__name__}' already defined. "
234 "Check methods must have a unique method name."
235 )
236
237 @wraps(check_fn)
238 def check_fn_wrapper(validate_obj, **kwargs):
239 """Wrapper for check_fn to validate inputs."""
240 return check_fn(validate_obj, **kwargs)
241
242 def validate_check_kwargs(check_kwargs):
243 msg = (
244 f"'{check_fn.__name__} has check_type={check_type}. "
245 "Providing the following arguments will have no effect: "
246 "{}. Remove these arguments to avoid this warning."
247 )
248
249 no_effect_args = {
250 CheckType.ELEMENT_WISE: ["element_wise", "groupby", "groups"],
251 CheckType.VECTORIZED: ["element_wise", "groupby", "groups"],
252 CheckType.GROUPBY: ["element_wise"],
253 }[check_type]
254
255 if any(arg in check_kwargs for arg in no_effect_args):
256 warnings.warn(msg.format(no_effect_args))
257 for arg in no_effect_args:
258 check_kwargs.pop(arg, None)
259
260 if check_type is CheckType.ELEMENT_WISE:
261 check_kwargs["element_wise"] = True
262
263 return check_kwargs
264
265 @register_check_statistics(statistics)
266 def check_method(cls, *args, **kwargs):
267 """Wrapper function that serves as the Check method."""
268 stats, check_kwargs = {}, {}
269
270 if args:
271 stats = dict(zip(statistics, args))
272
273 for k, v in kwargs.items():
274 if k in statistics:
275 stats[k] = v
276 else:
277 check_kwargs[k] = v
278
279 return cls(
280 partial(check_fn_wrapper, **stats),
281 name=check_fn.__name__,
282 **validate_check_kwargs(check_kwargs),
283 )
284
285 if strategy is not None:
286 check_method = register_check_strategy(strategy)(check_method)
287
288 Check.REGISTERED_CUSTOM_CHECKS[check_fn.__name__] = partial(
289 check_method, Check
290 )
291
292 return register_check_wrapper(check_fn)
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pandera/api/extensions.py b/pandera/api/extensions.py
--- a/pandera/api/extensions.py
+++ b/pandera/api/extensions.py
@@ -144,11 +144,6 @@
See the :ref:`user guide<extensions>` for more details.
- .. warning::
-
- This is the legacy method for registering check methods. Use the
- :py:func:`~pandera.api.extensions.register_check` decorator instead.
-
:param check_fn: check function to register. The function should take one
positional argument for the object to validate and additional
keyword-only arguments for the check statistics.
|
{"golden_diff": "diff --git a/pandera/api/extensions.py b/pandera/api/extensions.py\n--- a/pandera/api/extensions.py\n+++ b/pandera/api/extensions.py\n@@ -144,11 +144,6 @@\n \n See the :ref:`user guide<extensions>` for more details.\n \n- .. warning::\n-\n- This is the legacy method for registering check methods. Use the\n- :py:func:`~pandera.api.extensions.register_check` decorator instead.\n-\n :param check_fn: check function to register. The function should take one\n positional argument for the object to validate and additional\n keyword-only arguments for the check statistics.\n", "issue": "Remove incorrect(?) warning for `register_check_method()` in docs\n#### Location of the documentation\r\n\r\nhttps://pandera.readthedocs.io/en/latest/reference/generated/pandera.extensions.html\r\n\r\n#### Documentation problem\r\n\r\nIt's documented for `register_check_method()` that\r\n\r\n> **Warning**\r\n> This is the legacy method for registering check methods. Use the `register_check()` decorator instead.\r\n\r\nI can't see any reference to `register_check()` in the docs or the repo, so I assume this is an out-dated warning and `register_check_method()` is infact the defacto function for this kind of stuff. \r\n\r\nMight I be missing something? Maybe this warning is supposed to refer to another function.\r\n\r\n#### Suggested fix for documentation\r\n\r\nRemove warning as it's seemingly not warranted (at least in this moment).\n", "before_files": [{"content": "\"\"\"Extensions module.\"\"\"\n\nimport inspect\nimport warnings\nfrom enum import Enum\nfrom functools import partial, wraps\nfrom inspect import signature\nfrom typing import Callable, List, Optional, Tuple, Type, Union\n\nimport pandas as pd\nimport typing_inspect\n\nfrom pandera.api.checks import Check\nfrom pandera.api.hypotheses import Hypothesis\nfrom pandera.strategies.base_strategies import STRATEGY_DISPATCHER\n\n\nclass BuiltinCheckRegistrationError(Exception):\n \"\"\"\n Exception raised when registering a built-in check implementation but the\n default check function implementation hasn't been registered with\n :py:meth:`~flytekit.core.base.BaseCheck.register_builtin_check_fn`.\n \"\"\"\n\n\n# pylint: disable=too-many-locals\ndef register_builtin_check(\n fn=None,\n strategy: Optional[Callable] = None,\n _check_cls: Type = Check,\n **outer_kwargs,\n):\n \"\"\"Register a check method to the Check namespace.\n\n This is the primary way for extending the Check api to define additional\n built-in checks.\n \"\"\"\n\n if fn is None:\n return partial(\n register_builtin_check,\n strategy=strategy,\n _check_cls=_check_cls,\n **outer_kwargs,\n )\n\n name = fn.__name__\n\n # see if the check function is already registered\n check_fn = _check_cls.CHECK_FUNCTION_REGISTRY.get(name)\n fn_sig = signature(fn)\n\n # register the check strategy for this particular check, identified\n # by the check `name`, and the data type of the check function. This\n # supports Union types. Also assume that the data type of the data\n # object to validate is the first argument.\n data_type = [*fn_sig.parameters.values()][0].annotation\n\n if typing_inspect.get_origin(data_type) is Tuple:\n data_type, *_ = typing_inspect.get_args(data_type)\n\n if typing_inspect.get_origin(data_type) is Union:\n data_types = typing_inspect.get_args(data_type)\n else:\n data_types = (data_type,)\n\n if strategy is not None:\n for dt in data_types:\n STRATEGY_DISPATCHER[(name, dt)] = strategy\n\n if check_fn is None: # pragma: no cover\n raise BuiltinCheckRegistrationError(\n f\"Check '{name}' doesn't have a base check implementation. \"\n f\"You need to create a stub method in the {_check_cls} class and \"\n \"then register a base check function implementation with the \"\n f\"{_check_cls}.register_builtin_check_fn method.\\n\"\n \"See the `pandera.api.base.builtin_checks` and \"\n \"`pandera.backends.pandas.builtin_checks` modules as an example.\"\n )\n\n check_fn.register(fn) # type: ignore\n\n return fn\n\n\ndef register_builtin_hypothesis(**kwargs):\n \"\"\"Register a new hypothesis.\"\"\"\n return partial(\n register_builtin_check,\n _check_cls=Hypothesis,\n **kwargs,\n )\n\n\n# --------------------------------\n# CUSTOM CHECK REGISTRATION METHOD\n# --------------------------------\n#\n# The `register_check_method` decorator is the legacy method for registering\n# custom checks and will slated for deprecation after merging the core\n# internals overhaul.\n\n\nclass CheckType(Enum):\n \"\"\"Check types for registered check methods.\"\"\"\n\n VECTORIZED = 1 #: Check applied to a Series or DataFrame\n ELEMENT_WISE = 2 #: Check applied to an element of a Series or DataFrame\n GROUPBY = 3 #: Check applied to dictionary of Series or DataFrames.\n\n\ndef register_check_statistics(statistics_args):\n \"\"\"Decorator to set statistics based on Check method.\"\"\"\n\n def register_check_statistics_decorator(class_method):\n @wraps(class_method)\n def _wrapper(cls, *args, **kwargs):\n args = list(args)\n arg_names = inspect.getfullargspec(class_method).args[1:]\n if not arg_names:\n arg_names = statistics_args\n args_dict = {**dict(zip(arg_names, args)), **kwargs}\n check = class_method(cls, *args, **kwargs)\n check.statistics = {\n stat: args_dict.get(stat) for stat in statistics_args\n }\n check.statistics_args = statistics_args\n return check\n\n return _wrapper\n\n return register_check_statistics_decorator\n\n\ndef register_check_method(\n check_fn=None,\n *,\n statistics: Optional[List[str]] = None,\n supported_types: Union[type, Tuple, List] = (pd.DataFrame, pd.Series),\n check_type: Union[CheckType, str] = \"vectorized\",\n strategy=None,\n):\n \"\"\"Registers a function as a :class:`~pandera.api.checks.Check` method.\n\n See the :ref:`user guide<extensions>` for more details.\n\n .. warning::\n\n This is the legacy method for registering check methods. Use the\n :py:func:`~pandera.api.extensions.register_check` decorator instead.\n\n :param check_fn: check function to register. The function should take one\n positional argument for the object to validate and additional\n keyword-only arguments for the check statistics.\n :param statistics: list of keyword-only arguments in the ``check_fn``,\n which serve as the statistics needed to serialize/de-serialize the\n check and generate data if a ``strategy`` function is provided.\n :param supported_types: the pandas type(s) supported by the check function.\n Valid values are ``pd.DataFrame``, ``pd.Series``, or a list/tuple of\n ``(pa.DataFrame, pa.Series)`` if both types are supported.\n :param check_type: the expected input of the check function. Valid values\n are :class:`~pandera.extensions.CheckType` enums or\n ``{\"vectorized\", \"element_wise\", \"groupby\"}``. The input signature of\n ``check_fn`` is determined by this argument:\n\n - if ``vectorized``, the first positional argument of ``check_fn``\n should be one of the ``supported_types``.\n - if ``element_wise``, the first positional argument of ``check_fn``\n should be a single scalar element in the pandas Series or DataFrame.\n - if ``groupby``, the first positional argument of ``check_fn`` should\n be a dictionary mapping group names to subsets of the Series or\n DataFrame.\n\n :param strategy: data-generation strategy associated with the check\n function.\n :return: register check function wrapper.\n \"\"\"\n\n # pylint: disable=import-outside-toplevel\n from pandera.strategies.pandas_strategies import register_check_strategy\n\n if statistics is None:\n statistics = []\n\n if isinstance(check_type, str):\n check_type = CheckType[check_type.upper()]\n\n msg = (\n \"{} is not a valid input type for check_fn. You must specify one of \"\n \"pandas.DataFrame, pandas.Series, or a tuple of both.\"\n )\n if isinstance(supported_types, list):\n supported_types = tuple(supported_types)\n elif not isinstance(supported_types, tuple):\n supported_types = (supported_types,)\n\n for supported_type in supported_types: # type: ignore\n if supported_type not in {pd.DataFrame, pd.Series}:\n raise TypeError(msg.format(supported_type))\n\n if check_type is CheckType.ELEMENT_WISE and set(supported_types) != {\n pd.DataFrame,\n pd.Series,\n }: # type: ignore\n raise ValueError(\n \"Element-wise checks should support DataFrame and Series \"\n \"validation. Use the default setting for the 'supported_types' \"\n \"argument.\"\n )\n\n if check_fn is None:\n return partial(\n register_check_method,\n statistics=statistics,\n supported_types=supported_types,\n check_type=check_type,\n strategy=strategy,\n )\n else:\n sig = signature(check_fn)\n for statistic in statistics:\n if statistic not in sig.parameters:\n raise TypeError(\n f\"statistic '{statistic}' is not part of \"\n f\"{check_fn.__name__}'s signature.\"\n )\n\n def register_check_wrapper(check_fn: Callable):\n \"\"\"Register a function as a :class:`~pandera.api.checks.Check` method.\"\"\"\n\n if hasattr(Check, check_fn.__name__):\n raise ValueError(\n f\"method with name '{check_fn.__name__}' already defined. \"\n \"Check methods must have a unique method name.\"\n )\n\n @wraps(check_fn)\n def check_fn_wrapper(validate_obj, **kwargs):\n \"\"\"Wrapper for check_fn to validate inputs.\"\"\"\n return check_fn(validate_obj, **kwargs)\n\n def validate_check_kwargs(check_kwargs):\n msg = (\n f\"'{check_fn.__name__} has check_type={check_type}. \"\n \"Providing the following arguments will have no effect: \"\n \"{}. Remove these arguments to avoid this warning.\"\n )\n\n no_effect_args = {\n CheckType.ELEMENT_WISE: [\"element_wise\", \"groupby\", \"groups\"],\n CheckType.VECTORIZED: [\"element_wise\", \"groupby\", \"groups\"],\n CheckType.GROUPBY: [\"element_wise\"],\n }[check_type]\n\n if any(arg in check_kwargs for arg in no_effect_args):\n warnings.warn(msg.format(no_effect_args))\n for arg in no_effect_args:\n check_kwargs.pop(arg, None)\n\n if check_type is CheckType.ELEMENT_WISE:\n check_kwargs[\"element_wise\"] = True\n\n return check_kwargs\n\n @register_check_statistics(statistics)\n def check_method(cls, *args, **kwargs):\n \"\"\"Wrapper function that serves as the Check method.\"\"\"\n stats, check_kwargs = {}, {}\n\n if args:\n stats = dict(zip(statistics, args))\n\n for k, v in kwargs.items():\n if k in statistics:\n stats[k] = v\n else:\n check_kwargs[k] = v\n\n return cls(\n partial(check_fn_wrapper, **stats),\n name=check_fn.__name__,\n **validate_check_kwargs(check_kwargs),\n )\n\n if strategy is not None:\n check_method = register_check_strategy(strategy)(check_method)\n\n Check.REGISTERED_CUSTOM_CHECKS[check_fn.__name__] = partial(\n check_method, Check\n )\n\n return register_check_wrapper(check_fn)\n", "path": "pandera/api/extensions.py"}], "after_files": [{"content": "\"\"\"Extensions module.\"\"\"\n\nimport inspect\nimport warnings\nfrom enum import Enum\nfrom functools import partial, wraps\nfrom inspect import signature\nfrom typing import Callable, List, Optional, Tuple, Type, Union\n\nimport pandas as pd\nimport typing_inspect\n\nfrom pandera.api.checks import Check\nfrom pandera.api.hypotheses import Hypothesis\nfrom pandera.strategies.base_strategies import STRATEGY_DISPATCHER\n\n\nclass BuiltinCheckRegistrationError(Exception):\n \"\"\"\n Exception raised when registering a built-in check implementation but the\n default check function implementation hasn't been registered with\n :py:meth:`~flytekit.core.base.BaseCheck.register_builtin_check_fn`.\n \"\"\"\n\n\n# pylint: disable=too-many-locals\ndef register_builtin_check(\n fn=None,\n strategy: Optional[Callable] = None,\n _check_cls: Type = Check,\n **outer_kwargs,\n):\n \"\"\"Register a check method to the Check namespace.\n\n This is the primary way for extending the Check api to define additional\n built-in checks.\n \"\"\"\n\n if fn is None:\n return partial(\n register_builtin_check,\n strategy=strategy,\n _check_cls=_check_cls,\n **outer_kwargs,\n )\n\n name = fn.__name__\n\n # see if the check function is already registered\n check_fn = _check_cls.CHECK_FUNCTION_REGISTRY.get(name)\n fn_sig = signature(fn)\n\n # register the check strategy for this particular check, identified\n # by the check `name`, and the data type of the check function. This\n # supports Union types. Also assume that the data type of the data\n # object to validate is the first argument.\n data_type = [*fn_sig.parameters.values()][0].annotation\n\n if typing_inspect.get_origin(data_type) is Tuple:\n data_type, *_ = typing_inspect.get_args(data_type)\n\n if typing_inspect.get_origin(data_type) is Union:\n data_types = typing_inspect.get_args(data_type)\n else:\n data_types = (data_type,)\n\n if strategy is not None:\n for dt in data_types:\n STRATEGY_DISPATCHER[(name, dt)] = strategy\n\n if check_fn is None: # pragma: no cover\n raise BuiltinCheckRegistrationError(\n f\"Check '{name}' doesn't have a base check implementation. \"\n f\"You need to create a stub method in the {_check_cls} class and \"\n \"then register a base check function implementation with the \"\n f\"{_check_cls}.register_builtin_check_fn method.\\n\"\n \"See the `pandera.api.base.builtin_checks` and \"\n \"`pandera.backends.pandas.builtin_checks` modules as an example.\"\n )\n\n check_fn.register(fn) # type: ignore\n\n return fn\n\n\ndef register_builtin_hypothesis(**kwargs):\n \"\"\"Register a new hypothesis.\"\"\"\n return partial(\n register_builtin_check,\n _check_cls=Hypothesis,\n **kwargs,\n )\n\n\n# --------------------------------\n# CUSTOM CHECK REGISTRATION METHOD\n# --------------------------------\n#\n# The `register_check_method` decorator is the legacy method for registering\n# custom checks and will slated for deprecation after merging the core\n# internals overhaul.\n\n\nclass CheckType(Enum):\n \"\"\"Check types for registered check methods.\"\"\"\n\n VECTORIZED = 1 #: Check applied to a Series or DataFrame\n ELEMENT_WISE = 2 #: Check applied to an element of a Series or DataFrame\n GROUPBY = 3 #: Check applied to dictionary of Series or DataFrames.\n\n\ndef register_check_statistics(statistics_args):\n \"\"\"Decorator to set statistics based on Check method.\"\"\"\n\n def register_check_statistics_decorator(class_method):\n @wraps(class_method)\n def _wrapper(cls, *args, **kwargs):\n args = list(args)\n arg_names = inspect.getfullargspec(class_method).args[1:]\n if not arg_names:\n arg_names = statistics_args\n args_dict = {**dict(zip(arg_names, args)), **kwargs}\n check = class_method(cls, *args, **kwargs)\n check.statistics = {\n stat: args_dict.get(stat) for stat in statistics_args\n }\n check.statistics_args = statistics_args\n return check\n\n return _wrapper\n\n return register_check_statistics_decorator\n\n\ndef register_check_method(\n check_fn=None,\n *,\n statistics: Optional[List[str]] = None,\n supported_types: Union[type, Tuple, List] = (pd.DataFrame, pd.Series),\n check_type: Union[CheckType, str] = \"vectorized\",\n strategy=None,\n):\n \"\"\"Registers a function as a :class:`~pandera.api.checks.Check` method.\n\n See the :ref:`user guide<extensions>` for more details.\n\n :param check_fn: check function to register. The function should take one\n positional argument for the object to validate and additional\n keyword-only arguments for the check statistics.\n :param statistics: list of keyword-only arguments in the ``check_fn``,\n which serve as the statistics needed to serialize/de-serialize the\n check and generate data if a ``strategy`` function is provided.\n :param supported_types: the pandas type(s) supported by the check function.\n Valid values are ``pd.DataFrame``, ``pd.Series``, or a list/tuple of\n ``(pa.DataFrame, pa.Series)`` if both types are supported.\n :param check_type: the expected input of the check function. Valid values\n are :class:`~pandera.extensions.CheckType` enums or\n ``{\"vectorized\", \"element_wise\", \"groupby\"}``. The input signature of\n ``check_fn`` is determined by this argument:\n\n - if ``vectorized``, the first positional argument of ``check_fn``\n should be one of the ``supported_types``.\n - if ``element_wise``, the first positional argument of ``check_fn``\n should be a single scalar element in the pandas Series or DataFrame.\n - if ``groupby``, the first positional argument of ``check_fn`` should\n be a dictionary mapping group names to subsets of the Series or\n DataFrame.\n\n :param strategy: data-generation strategy associated with the check\n function.\n :return: register check function wrapper.\n \"\"\"\n\n # pylint: disable=import-outside-toplevel\n from pandera.strategies.pandas_strategies import register_check_strategy\n\n if statistics is None:\n statistics = []\n\n if isinstance(check_type, str):\n check_type = CheckType[check_type.upper()]\n\n msg = (\n \"{} is not a valid input type for check_fn. You must specify one of \"\n \"pandas.DataFrame, pandas.Series, or a tuple of both.\"\n )\n if isinstance(supported_types, list):\n supported_types = tuple(supported_types)\n elif not isinstance(supported_types, tuple):\n supported_types = (supported_types,)\n\n for supported_type in supported_types: # type: ignore\n if supported_type not in {pd.DataFrame, pd.Series}:\n raise TypeError(msg.format(supported_type))\n\n if check_type is CheckType.ELEMENT_WISE and set(supported_types) != {\n pd.DataFrame,\n pd.Series,\n }: # type: ignore\n raise ValueError(\n \"Element-wise checks should support DataFrame and Series \"\n \"validation. Use the default setting for the 'supported_types' \"\n \"argument.\"\n )\n\n if check_fn is None:\n return partial(\n register_check_method,\n statistics=statistics,\n supported_types=supported_types,\n check_type=check_type,\n strategy=strategy,\n )\n else:\n sig = signature(check_fn)\n for statistic in statistics:\n if statistic not in sig.parameters:\n raise TypeError(\n f\"statistic '{statistic}' is not part of \"\n f\"{check_fn.__name__}'s signature.\"\n )\n\n def register_check_wrapper(check_fn: Callable):\n \"\"\"Register a function as a :class:`~pandera.api.checks.Check` method.\"\"\"\n\n if hasattr(Check, check_fn.__name__):\n raise ValueError(\n f\"method with name '{check_fn.__name__}' already defined. \"\n \"Check methods must have a unique method name.\"\n )\n\n @wraps(check_fn)\n def check_fn_wrapper(validate_obj, **kwargs):\n \"\"\"Wrapper for check_fn to validate inputs.\"\"\"\n return check_fn(validate_obj, **kwargs)\n\n def validate_check_kwargs(check_kwargs):\n msg = (\n f\"'{check_fn.__name__} has check_type={check_type}. \"\n \"Providing the following arguments will have no effect: \"\n \"{}. Remove these arguments to avoid this warning.\"\n )\n\n no_effect_args = {\n CheckType.ELEMENT_WISE: [\"element_wise\", \"groupby\", \"groups\"],\n CheckType.VECTORIZED: [\"element_wise\", \"groupby\", \"groups\"],\n CheckType.GROUPBY: [\"element_wise\"],\n }[check_type]\n\n if any(arg in check_kwargs for arg in no_effect_args):\n warnings.warn(msg.format(no_effect_args))\n for arg in no_effect_args:\n check_kwargs.pop(arg, None)\n\n if check_type is CheckType.ELEMENT_WISE:\n check_kwargs[\"element_wise\"] = True\n\n return check_kwargs\n\n @register_check_statistics(statistics)\n def check_method(cls, *args, **kwargs):\n \"\"\"Wrapper function that serves as the Check method.\"\"\"\n stats, check_kwargs = {}, {}\n\n if args:\n stats = dict(zip(statistics, args))\n\n for k, v in kwargs.items():\n if k in statistics:\n stats[k] = v\n else:\n check_kwargs[k] = v\n\n return cls(\n partial(check_fn_wrapper, **stats),\n name=check_fn.__name__,\n **validate_check_kwargs(check_kwargs),\n )\n\n if strategy is not None:\n check_method = register_check_strategy(strategy)(check_method)\n\n Check.REGISTERED_CUSTOM_CHECKS[check_fn.__name__] = partial(\n check_method, Check\n )\n\n return register_check_wrapper(check_fn)\n", "path": "pandera/api/extensions.py"}]}
| 3,416 | 145 |
gh_patches_debug_18288
|
rasdani/github-patches
|
git_diff
|
deepset-ai__haystack-6735
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Weights and score normalization for DocumentJoiner with reciprocal rank fusion - 2.x
Complete details in #5551.
Implemented for 1.x by @robpasternak in #5704.
We should port this improvement to 2.x.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/components/joiners/document_joiner.py`
Content:
```
1 import itertools
2 import logging
3 from collections import defaultdict
4 from math import inf
5 from typing import List, Optional
6 from haystack.core.component.types import Variadic
7
8 from haystack import component, Document
9
10
11 logger = logging.getLogger(__name__)
12
13
14 @component
15 class DocumentJoiner:
16 """
17 A component that joins input lists of Documents from multiple connections and outputs them as one list.
18
19 The component allows multiple join modes:
20 * concatenate: Combine Documents from multiple components. Discards duplicate Documents.
21 Documents get their scores from the last component in the pipeline that assigns scores.
22 This join mode doesn't influence Document scores.
23 * merge: Merge scores of duplicate Documents coming from multiple components.
24 Optionally, you can assign a weight to the scores and set the top_k limit for this join mode.
25 You can also use this join mode to rerank retrieved Documents.
26 * reciprocal_rank_fusion: Combine Documents into a single list based on their ranking received from multiple components.
27
28 Example usage in a hybrid retrieval pipeline:
29 ```python
30 document_store = InMemoryDocumentStore()
31 p = Pipeline()
32 p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name="bm25_retriever")
33 p.add_component(
34 instance=SentenceTransformersTextEmbedder(model="sentence-transformers/all-MiniLM-L6-v2"),
35 name="text_embedder",
36 )
37 p.add_component(instance=InMemoryEmbeddingRetriever(document_store=document_store), name="embedding_retriever")
38 p.add_component(instance=DocumentJoiner(), name="joiner")
39 p.connect("bm25_retriever", "joiner")
40 p.connect("embedding_retriever", "joiner")
41 p.connect("text_embedder", "embedding_retriever")
42 query = "What is the capital of France?"
43 p.run(data={"bm25_retriever": {"query": query},
44 "text_embedder": {"text": query}})
45 ```
46 """
47
48 def __init__(
49 self,
50 join_mode: str = "concatenate",
51 weights: Optional[List[float]] = None,
52 top_k: Optional[int] = None,
53 sort_by_score: bool = True,
54 ):
55 """
56 Initialize the DocumentJoiner.
57
58 :param join_mode: Specifies the join mode to use. Available modes: `concatenate` to combine Documents from multiple Retrievers, `merge` to aggregate the scores of
59 individual Documents, `reciprocal_rank_fusion` to apply rank-based scoring.
60 :param weights: A component-wise list (the length of the list must be equal to the number of input components) of weights for
61 adjusting Document scores when using the `merge` join_mode. By default, equal weight is given
62 to each Retriever score. This param is not compatible with the `concatenate` join_mode.
63 :param top_k: The maximum number of Documents to be returned as output. By default, returns all Documents.
64 :param sort_by_score: Whether the output list of Documents should be sorted by Document scores in descending order.
65 By default, the output is sorted.
66 Documents without score are handled as if their score was -infinity.
67 """
68 if join_mode not in ["concatenate", "merge", "reciprocal_rank_fusion"]:
69 raise ValueError(f"DocumentJoiner component does not support '{join_mode}' join_mode.")
70 self.join_mode = join_mode
71 self.weights = [float(i) / sum(weights) for i in weights] if weights else None
72 self.top_k = top_k
73 self.sort_by_score = sort_by_score
74
75 @component.output_types(documents=List[Document])
76 def run(self, documents: Variadic[List[Document]]):
77 """
78 Run the DocumentJoiner. This method joins the input lists of Documents into one output list based on the join_mode specified during initialization.
79
80 :param documents: An arbitrary number of lists of Documents to join.
81 """
82 output_documents = []
83 if self.join_mode == "concatenate":
84 output_documents = self._concatenate(documents)
85 elif self.join_mode == "merge":
86 output_documents = self._merge(documents)
87 elif self.join_mode == "reciprocal_rank_fusion":
88 output_documents = self._reciprocal_rank_fusion(documents)
89
90 if self.sort_by_score:
91 output_documents = sorted(
92 output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True
93 )
94 if any(doc.score is None for doc in output_documents):
95 logger.info(
96 "Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by "
97 "score, so those with score=None were sorted as if they had a score of -infinity."
98 )
99
100 if self.top_k:
101 output_documents = output_documents[: self.top_k]
102 return {"documents": output_documents}
103
104 def _concatenate(self, document_lists):
105 """
106 Concatenate multiple lists of Documents and return only the Document with the highest score for duplicate Documents.
107 """
108 output = []
109 docs_per_id = defaultdict(list)
110 for doc in itertools.chain.from_iterable(document_lists):
111 docs_per_id[doc.id].append(doc)
112 for docs in docs_per_id.values():
113 doc_with_best_score = max(docs, key=lambda doc: doc.score if doc.score else -inf)
114 output.append(doc_with_best_score)
115 return output
116
117 def _merge(self, document_lists):
118 """
119 Merge multiple lists of Documents and calculate a weighted sum of the scores of duplicate Documents.
120 """
121 scores_map = defaultdict(int)
122 documents_map = {}
123 weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)
124
125 for documents, weight in zip(document_lists, weights):
126 for doc in documents:
127 scores_map[doc.id] += (doc.score if doc.score else 0) * weight
128 documents_map[doc.id] = doc
129
130 for doc in documents_map.values():
131 doc.score = scores_map[doc.id]
132
133 return documents_map.values()
134
135 def _reciprocal_rank_fusion(self, document_lists):
136 """
137 Merge multiple lists of Documents and assign scores based on reciprocal rank fusion.
138 The constant k is set to 61 (60 was suggested by the original paper,
139 plus 1 as python lists are 0-based and the paper used 1-based ranking).
140 """
141 k = 61
142
143 scores_map = defaultdict(int)
144 documents_map = {}
145 for documents in document_lists:
146 for rank, doc in enumerate(documents):
147 scores_map[doc.id] += 1 / (k + rank)
148 documents_map[doc.id] = doc
149
150 for doc in documents_map.values():
151 doc.score = scores_map[doc.id]
152
153 return documents_map.values()
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/haystack/components/joiners/document_joiner.py b/haystack/components/joiners/document_joiner.py
--- a/haystack/components/joiners/document_joiner.py
+++ b/haystack/components/joiners/document_joiner.py
@@ -142,11 +142,19 @@
scores_map = defaultdict(int)
documents_map = {}
- for documents in document_lists:
+ weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)
+
+ # Calculate weighted reciprocal rank fusion score
+ for documents, weight in zip(document_lists, weights):
for rank, doc in enumerate(documents):
- scores_map[doc.id] += 1 / (k + rank)
+ scores_map[doc.id] += (weight * len(document_lists)) / (k + rank)
documents_map[doc.id] = doc
+ # Normalize scores. Note: len(results) / k is the maximum possible score,
+ # achieved by being ranked first in all doc lists with non-zero weight.
+ for id in scores_map:
+ scores_map[id] /= len(document_lists) / k
+
for doc in documents_map.values():
doc.score = scores_map[doc.id]
|
{"golden_diff": "diff --git a/haystack/components/joiners/document_joiner.py b/haystack/components/joiners/document_joiner.py\n--- a/haystack/components/joiners/document_joiner.py\n+++ b/haystack/components/joiners/document_joiner.py\n@@ -142,11 +142,19 @@\n \n scores_map = defaultdict(int)\n documents_map = {}\n- for documents in document_lists:\n+ weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n+\n+ # Calculate weighted reciprocal rank fusion score\n+ for documents, weight in zip(document_lists, weights):\n for rank, doc in enumerate(documents):\n- scores_map[doc.id] += 1 / (k + rank)\n+ scores_map[doc.id] += (weight * len(document_lists)) / (k + rank)\n documents_map[doc.id] = doc\n \n+ # Normalize scores. Note: len(results) / k is the maximum possible score,\n+ # achieved by being ranked first in all doc lists with non-zero weight.\n+ for id in scores_map:\n+ scores_map[id] /= len(document_lists) / k\n+\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n", "issue": "Weights and score normalization for DocumentJoiner with reciprocal rank fusion - 2.x\nComplete details in #5551.\r\nImplemented for 1.x by @robpasternak in #5704.\r\n\r\nWe should port this improvement to 2.x.\n", "before_files": [{"content": "import itertools\nimport logging\nfrom collections import defaultdict\nfrom math import inf\nfrom typing import List, Optional\nfrom haystack.core.component.types import Variadic\n\nfrom haystack import component, Document\n\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass DocumentJoiner:\n \"\"\"\n A component that joins input lists of Documents from multiple connections and outputs them as one list.\n\n The component allows multiple join modes:\n * concatenate: Combine Documents from multiple components. Discards duplicate Documents.\n Documents get their scores from the last component in the pipeline that assigns scores.\n This join mode doesn't influence Document scores.\n * merge: Merge scores of duplicate Documents coming from multiple components.\n Optionally, you can assign a weight to the scores and set the top_k limit for this join mode.\n You can also use this join mode to rerank retrieved Documents.\n * reciprocal_rank_fusion: Combine Documents into a single list based on their ranking received from multiple components.\n\n Example usage in a hybrid retrieval pipeline:\n ```python\n document_store = InMemoryDocumentStore()\n p = Pipeline()\n p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name=\"bm25_retriever\")\n p.add_component(\n instance=SentenceTransformersTextEmbedder(model=\"sentence-transformers/all-MiniLM-L6-v2\"),\n name=\"text_embedder\",\n )\n p.add_component(instance=InMemoryEmbeddingRetriever(document_store=document_store), name=\"embedding_retriever\")\n p.add_component(instance=DocumentJoiner(), name=\"joiner\")\n p.connect(\"bm25_retriever\", \"joiner\")\n p.connect(\"embedding_retriever\", \"joiner\")\n p.connect(\"text_embedder\", \"embedding_retriever\")\n query = \"What is the capital of France?\"\n p.run(data={\"bm25_retriever\": {\"query\": query},\n \"text_embedder\": {\"text\": query}})\n ```\n \"\"\"\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n Initialize the DocumentJoiner.\n\n :param join_mode: Specifies the join mode to use. Available modes: `concatenate` to combine Documents from multiple Retrievers, `merge` to aggregate the scores of\n individual Documents, `reciprocal_rank_fusion` to apply rank-based scoring.\n :param weights: A component-wise list (the length of the list must be equal to the number of input components) of weights for\n adjusting Document scores when using the `merge` join_mode. By default, equal weight is given\n to each Retriever score. This param is not compatible with the `concatenate` join_mode.\n :param top_k: The maximum number of Documents to be returned as output. By default, returns all Documents.\n :param sort_by_score: Whether the output list of Documents should be sorted by Document scores in descending order.\n By default, the output is sorted.\n Documents without score are handled as if their score was -infinity.\n \"\"\"\n if join_mode not in [\"concatenate\", \"merge\", \"reciprocal_rank_fusion\"]:\n raise ValueError(f\"DocumentJoiner component does not support '{join_mode}' join_mode.\")\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k = top_k\n self.sort_by_score = sort_by_score\n\n @component.output_types(documents=List[Document])\n def run(self, documents: Variadic[List[Document]]):\n \"\"\"\n Run the DocumentJoiner. This method joins the input lists of Documents into one output list based on the join_mode specified during initialization.\n\n :param documents: An arbitrary number of lists of Documents to join.\n \"\"\"\n output_documents = []\n if self.join_mode == \"concatenate\":\n output_documents = self._concatenate(documents)\n elif self.join_mode == \"merge\":\n output_documents = self._merge(documents)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n output_documents = self._reciprocal_rank_fusion(documents)\n\n if self.sort_by_score:\n output_documents = sorted(\n output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True\n )\n if any(doc.score is None for doc in output_documents):\n logger.info(\n \"Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by \"\n \"score, so those with score=None were sorted as if they had a score of -infinity.\"\n )\n\n if self.top_k:\n output_documents = output_documents[: self.top_k]\n return {\"documents\": output_documents}\n\n def _concatenate(self, document_lists):\n \"\"\"\n Concatenate multiple lists of Documents and return only the Document with the highest score for duplicate Documents.\n \"\"\"\n output = []\n docs_per_id = defaultdict(list)\n for doc in itertools.chain.from_iterable(document_lists):\n docs_per_id[doc.id].append(doc)\n for docs in docs_per_id.values():\n doc_with_best_score = max(docs, key=lambda doc: doc.score if doc.score else -inf)\n output.append(doc_with_best_score)\n return output\n\n def _merge(self, document_lists):\n \"\"\"\n Merge multiple lists of Documents and calculate a weighted sum of the scores of duplicate Documents.\n \"\"\"\n scores_map = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n for documents, weight in zip(document_lists, weights):\n for doc in documents:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n documents_map[doc.id] = doc\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return documents_map.values()\n\n def _reciprocal_rank_fusion(self, document_lists):\n \"\"\"\n Merge multiple lists of Documents and assign scores based on reciprocal rank fusion.\n The constant k is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n k = 61\n\n scores_map = defaultdict(int)\n documents_map = {}\n for documents in document_lists:\n for rank, doc in enumerate(documents):\n scores_map[doc.id] += 1 / (k + rank)\n documents_map[doc.id] = doc\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return documents_map.values()\n", "path": "haystack/components/joiners/document_joiner.py"}], "after_files": [{"content": "import itertools\nimport logging\nfrom collections import defaultdict\nfrom math import inf\nfrom typing import List, Optional\nfrom haystack.core.component.types import Variadic\n\nfrom haystack import component, Document\n\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass DocumentJoiner:\n \"\"\"\n A component that joins input lists of Documents from multiple connections and outputs them as one list.\n\n The component allows multiple join modes:\n * concatenate: Combine Documents from multiple components. Discards duplicate Documents.\n Documents get their scores from the last component in the pipeline that assigns scores.\n This join mode doesn't influence Document scores.\n * merge: Merge scores of duplicate Documents coming from multiple components.\n Optionally, you can assign a weight to the scores and set the top_k limit for this join mode.\n You can also use this join mode to rerank retrieved Documents.\n * reciprocal_rank_fusion: Combine Documents into a single list based on their ranking received from multiple components.\n\n Example usage in a hybrid retrieval pipeline:\n ```python\n document_store = InMemoryDocumentStore()\n p = Pipeline()\n p.add_component(instance=InMemoryBM25Retriever(document_store=document_store), name=\"bm25_retriever\")\n p.add_component(\n instance=SentenceTransformersTextEmbedder(model=\"sentence-transformers/all-MiniLM-L6-v2\"),\n name=\"text_embedder\",\n )\n p.add_component(instance=InMemoryEmbeddingRetriever(document_store=document_store), name=\"embedding_retriever\")\n p.add_component(instance=DocumentJoiner(), name=\"joiner\")\n p.connect(\"bm25_retriever\", \"joiner\")\n p.connect(\"embedding_retriever\", \"joiner\")\n p.connect(\"text_embedder\", \"embedding_retriever\")\n query = \"What is the capital of France?\"\n p.run(data={\"bm25_retriever\": {\"query\": query},\n \"text_embedder\": {\"text\": query}})\n ```\n \"\"\"\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n Initialize the DocumentJoiner.\n\n :param join_mode: Specifies the join mode to use. Available modes: `concatenate` to combine Documents from multiple Retrievers, `merge` to aggregate the scores of\n individual Documents, `reciprocal_rank_fusion` to apply rank-based scoring.\n :param weights: A component-wise list (the length of the list must be equal to the number of input components) of weights for\n adjusting Document scores when using the `merge` join_mode. By default, equal weight is given\n to each Retriever score. This param is not compatible with the `concatenate` join_mode.\n :param top_k: The maximum number of Documents to be returned as output. By default, returns all Documents.\n :param sort_by_score: Whether the output list of Documents should be sorted by Document scores in descending order.\n By default, the output is sorted.\n Documents without score are handled as if their score was -infinity.\n \"\"\"\n if join_mode not in [\"concatenate\", \"merge\", \"reciprocal_rank_fusion\"]:\n raise ValueError(f\"DocumentJoiner component does not support '{join_mode}' join_mode.\")\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k = top_k\n self.sort_by_score = sort_by_score\n\n @component.output_types(documents=List[Document])\n def run(self, documents: Variadic[List[Document]]):\n \"\"\"\n Run the DocumentJoiner. This method joins the input lists of Documents into one output list based on the join_mode specified during initialization.\n\n :param documents: An arbitrary number of lists of Documents to join.\n \"\"\"\n output_documents = []\n if self.join_mode == \"concatenate\":\n output_documents = self._concatenate(documents)\n elif self.join_mode == \"merge\":\n output_documents = self._merge(documents)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n output_documents = self._reciprocal_rank_fusion(documents)\n\n if self.sort_by_score:\n output_documents = sorted(\n output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True\n )\n if any(doc.score is None for doc in output_documents):\n logger.info(\n \"Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by \"\n \"score, so those with score=None were sorted as if they had a score of -infinity.\"\n )\n\n if self.top_k:\n output_documents = output_documents[: self.top_k]\n return {\"documents\": output_documents}\n\n def _concatenate(self, document_lists):\n \"\"\"\n Concatenate multiple lists of Documents and return only the Document with the highest score for duplicate Documents.\n \"\"\"\n output = []\n docs_per_id = defaultdict(list)\n for doc in itertools.chain.from_iterable(document_lists):\n docs_per_id[doc.id].append(doc)\n for docs in docs_per_id.values():\n doc_with_best_score = max(docs, key=lambda doc: doc.score if doc.score else -inf)\n output.append(doc_with_best_score)\n return output\n\n def _merge(self, document_lists):\n \"\"\"\n Merge multiple lists of Documents and calculate a weighted sum of the scores of duplicate Documents.\n \"\"\"\n scores_map = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n for documents, weight in zip(document_lists, weights):\n for doc in documents:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n documents_map[doc.id] = doc\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return documents_map.values()\n\n def _reciprocal_rank_fusion(self, document_lists):\n \"\"\"\n Merge multiple lists of Documents and assign scores based on reciprocal rank fusion.\n The constant k is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n k = 61\n\n scores_map = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n # Calculate weighted reciprocal rank fusion score\n for documents, weight in zip(document_lists, weights):\n for rank, doc in enumerate(documents):\n scores_map[doc.id] += (weight * len(document_lists)) / (k + rank)\n documents_map[doc.id] = doc\n\n # Normalize scores. Note: len(results) / k is the maximum possible score,\n # achieved by being ranked first in all doc lists with non-zero weight.\n for id in scores_map:\n scores_map[id] /= len(document_lists) / k\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return documents_map.values()\n", "path": "haystack/components/joiners/document_joiner.py"}]}
| 2,133 | 275 |
gh_patches_debug_41991
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1428
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlite/handlers/base.py`
Content:
```
1 from __future__ import annotations
2
3 from copy import copy
4 from inspect import Signature
5 from typing import TYPE_CHECKING, Any, Generic, Mapping, Sequence, TypeVar, cast
6
7 from starlite._signature.field import SignatureField
8 from starlite.exceptions import ImproperlyConfiguredException
9 from starlite.types import Dependencies, Empty, EmptyType, ExceptionHandlersMap, Guard, Middleware, TypeEncodersMap
10 from starlite.utils import AsyncCallable, Ref, get_name, normalize_path
11 from starlite.utils.helpers import unwrap_partial
12
13 __all__ = ("BaseRouteHandler",)
14
15
16 if TYPE_CHECKING:
17 from typing_extensions import Self
18
19 from starlite._signature.models import SignatureModel
20 from starlite.connection import ASGIConnection
21 from starlite.controller import Controller
22 from starlite.di import Provide
23 from starlite.params import ParameterKwarg
24 from starlite.router import Router
25 from starlite.types import AnyCallable, AsyncAnyCallable, ExceptionHandler
26 from starlite.types.composite_types import MaybePartial
27
28 T = TypeVar("T", bound="BaseRouteHandler")
29
30
31 class BaseRouteHandler(Generic[T]):
32 """Base route handler.
33
34 Serves as a subclass for all route handlers
35 """
36
37 fn: Ref[MaybePartial[AnyCallable]]
38 signature: Signature
39
40 __slots__ = (
41 "_resolved_dependencies",
42 "_resolved_guards",
43 "_resolved_layered_parameters",
44 "_resolved_signature_namespace",
45 "_resolved_type_encoders",
46 "dependencies",
47 "exception_handlers",
48 "fn",
49 "guards",
50 "middleware",
51 "name",
52 "opt",
53 "owner",
54 "paths",
55 "signature",
56 "signature_model",
57 "signature_namespace",
58 "type_encoders",
59 )
60
61 def __init__(
62 self,
63 path: str | Sequence[str] | None = None,
64 *,
65 dependencies: Dependencies | None = None,
66 exception_handlers: ExceptionHandlersMap | None = None,
67 guards: Sequence[Guard] | None = None,
68 middleware: Sequence[Middleware] | None = None,
69 name: str | None = None,
70 opt: Mapping[str, Any] | None = None,
71 signature_namespace: Mapping[str, Any] | None = None,
72 type_encoders: TypeEncodersMap | None = None,
73 **kwargs: Any,
74 ) -> None:
75 """Initialize ``HTTPRouteHandler``.
76
77 Args:
78 path: A path fragment for the route handler function or a sequence of path fragments. If not given defaults
79 to ``/``
80 dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.
81 exception_handlers: A mapping of status codes and/or exception types to handler functions.
82 guards: A sequence of :class:`Guard <.types.Guard>` callables.
83 middleware: A sequence of :class:`Middleware <.types.Middleware>`.
84 name: A string identifying the route handler.
85 opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or
86 wherever you have access to :class:`Request <.connection.Request>` or
87 :class:`ASGI Scope <.types.Scope>`.
88 signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.
89 type_encoders: A mapping of types to callables that transform them into types supported for serialization.
90 **kwargs: Any additional kwarg - will be set in the opt dictionary.
91 """
92 self._resolved_dependencies: dict[str, Provide] | EmptyType = Empty
93 self._resolved_guards: list[Guard] | EmptyType = Empty
94 self._resolved_layered_parameters: dict[str, SignatureField] | EmptyType = Empty
95 self._resolved_signature_namespace: dict[str, Any] | EmptyType = Empty
96 self._resolved_type_encoders: TypeEncodersMap | EmptyType = Empty
97
98 self.dependencies = dependencies
99 self.exception_handlers = exception_handlers
100 self.guards = guards
101 self.middleware = middleware
102 self.name = name
103 self.opt = dict(opt or {})
104 self.owner: Controller | Router | None = None
105 self.signature_model: type[SignatureModel] | None = None
106 self.signature_namespace = signature_namespace or {}
107 self.paths = (
108 {normalize_path(p) for p in path}
109 if path and isinstance(path, list)
110 else {normalize_path(path or "/")} # type: ignore
111 )
112 self.opt.update(**kwargs)
113 self.type_encoders = type_encoders
114
115 def __call__(self, fn: AsyncAnyCallable) -> Self:
116 """Replace a function with itself."""
117 self.fn = Ref["MaybePartial[AsyncAnyCallable]"](fn)
118 self.signature = Signature.from_callable(fn)
119 self._validate_handler_function()
120 return self
121
122 @property
123 def handler_name(self) -> str:
124 """Get the name of the handler function.
125
126 Raises:
127 ImproperlyConfiguredException: if handler fn is not set.
128
129 Returns:
130 Name of the handler function
131 """
132 fn = getattr(self, "fn", None)
133 if not fn:
134 raise ImproperlyConfiguredException("cannot access handler name before setting the handler function")
135 return get_name(unwrap_partial(self.fn.value))
136
137 @property
138 def dependency_name_set(self) -> set[str]:
139 """Set of all dependency names provided in the handler's ownership layers."""
140 layered_dependencies = (layer.dependencies or {} for layer in self.ownership_layers)
141 return {name for layer in layered_dependencies for name in layer}
142
143 @property
144 def ownership_layers(self) -> list[T | Controller | Router]:
145 """Return the handler layers from the app down to the route handler.
146
147 ``app -> ... -> route handler``
148 """
149 layers = []
150
151 cur: Any = self
152 while cur:
153 layers.append(cur)
154 cur = cur.owner
155
156 return list(reversed(layers))
157
158 def resolve_type_encoders(self) -> TypeEncodersMap:
159 """Return a merged type_encoders mapping.
160
161 This method is memoized so the computation occurs only once.
162
163 Returns:
164 A dict of type encoders
165 """
166 if self._resolved_type_encoders is Empty:
167 self._resolved_type_encoders = {}
168
169 for layer in self.ownership_layers:
170 if type_encoders := getattr(layer, "type_encoders", None):
171 self._resolved_type_encoders.update(type_encoders)
172 return cast("TypeEncodersMap", self._resolved_type_encoders)
173
174 def resolve_layered_parameters(self) -> dict[str, SignatureField]:
175 """Return all parameters declared above the handler."""
176 if self._resolved_layered_parameters is Empty:
177 parameter_kwargs: dict[str, ParameterKwarg] = {}
178
179 for layer in self.ownership_layers:
180 parameter_kwargs.update(getattr(layer, "parameters", {}) or {})
181
182 self._resolved_layered_parameters = {
183 key: SignatureField.create(
184 name=key, field_type=parameter.value_type, default_value=parameter.default, kwarg_model=parameter
185 )
186 for key, parameter in parameter_kwargs.items()
187 }
188
189 return cast("dict[str, SignatureField]", self._resolved_layered_parameters)
190
191 def resolve_guards(self) -> list[Guard]:
192 """Return all guards in the handlers scope, starting from highest to current layer."""
193 if self._resolved_guards is Empty:
194 self._resolved_guards = []
195
196 for layer in self.ownership_layers:
197 self._resolved_guards.extend(layer.guards or [])
198
199 self._resolved_guards = cast("list[Guard]", [AsyncCallable(guard) for guard in self._resolved_guards])
200
201 return self._resolved_guards # type:ignore
202
203 def resolve_dependencies(self) -> dict[str, Provide]:
204 """Return all dependencies correlating to handler function's kwargs that exist in the handler's scope."""
205 if self._resolved_dependencies is Empty:
206 self._resolved_dependencies = {}
207
208 for layer in self.ownership_layers:
209 for key, value in (layer.dependencies or {}).items():
210 self._validate_dependency_is_unique(
211 dependencies=self._resolved_dependencies, key=key, provider=value
212 )
213 self._resolved_dependencies[key] = value
214
215 return cast("dict[str, Provide]", self._resolved_dependencies)
216
217 def resolve_middleware(self) -> list[Middleware]:
218 """Build the middleware stack for the RouteHandler and return it.
219
220 The middlewares are added from top to bottom (``app -> router -> controller -> route handler``) and then
221 reversed.
222 """
223 resolved_middleware: list[Middleware] = []
224 for layer in self.ownership_layers:
225 resolved_middleware.extend(layer.middleware or [])
226 return list(reversed(resolved_middleware))
227
228 def resolve_exception_handlers(self) -> ExceptionHandlersMap:
229 """Resolve the exception_handlers by starting from the route handler and moving up.
230
231 This method is memoized so the computation occurs only once.
232 """
233 resolved_exception_handlers: dict[int | type[Exception], ExceptionHandler] = {}
234 for layer in self.ownership_layers:
235 resolved_exception_handlers.update(layer.exception_handlers or {})
236 return resolved_exception_handlers
237
238 def resolve_opts(self) -> None:
239 """Build the route handler opt dictionary by going from top to bottom.
240
241 When merging keys from multiple layers, if the same key is defined by multiple layers, the value from the
242 layer closest to the response handler will take precedence.
243 """
244
245 opt: dict[str, Any] = {}
246 for layer in self.ownership_layers:
247 opt.update(layer.opt or {})
248
249 self.opt = opt
250
251 def resolve_signature_namespace(self) -> dict[str, Any]:
252 """Build the route handler signature namespace dictionary by going from top to bottom.
253
254 When merging keys from multiple layers, if the same key is defined by multiple layers, the value from the
255 layer closest to the response handler will take precedence.
256 """
257 if self._resolved_layered_parameters is Empty:
258 ns: dict[str, Any] = {}
259 for layer in self.ownership_layers:
260 ns.update(layer.signature_namespace)
261
262 self._resolved_signature_namespace = ns
263 return cast("dict[str, Any]", self._resolved_signature_namespace)
264
265 async def authorize_connection(self, connection: "ASGIConnection") -> None:
266 """Ensure the connection is authorized by running all the route guards in scope."""
267 for guard in self.resolve_guards():
268 await guard(connection, copy(self)) # type: ignore
269
270 @staticmethod
271 def _validate_dependency_is_unique(dependencies: dict[str, Provide], key: str, provider: Provide) -> None:
272 """Validate that a given provider has not been already defined under a different key."""
273 for dependency_key, value in dependencies.items():
274 if provider == value:
275 raise ImproperlyConfiguredException(
276 f"Provider for key {key} is already defined under the different key {dependency_key}. "
277 f"If you wish to override a provider, it must have the same key."
278 )
279
280 def _validate_handler_function(self) -> None:
281 """Validate the route handler function once set by inspecting its return annotations."""
282 if not getattr(self, "fn", None):
283 raise ImproperlyConfiguredException("Cannot call _validate_handler_function without first setting self.fn")
284
285 def __str__(self) -> str:
286 """Return a unique identifier for the route handler.
287
288 Returns:
289 A string
290 """
291 target = unwrap_partial(self.fn.value)
292 if not hasattr(target, "__qualname__"):
293 target = type(target)
294 return f"{target.__module__}.{target.__qualname__}"
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlite/handlers/base.py b/starlite/handlers/base.py
--- a/starlite/handlers/base.py
+++ b/starlite/handlers/base.py
@@ -22,7 +22,7 @@
from starlite.di import Provide
from starlite.params import ParameterKwarg
from starlite.router import Router
- from starlite.types import AnyCallable, AsyncAnyCallable, ExceptionHandler
+ from starlite.types import AsyncAnyCallable, ExceptionHandler
from starlite.types.composite_types import MaybePartial
T = TypeVar("T", bound="BaseRouteHandler")
@@ -34,10 +34,10 @@
Serves as a subclass for all route handlers
"""
- fn: Ref[MaybePartial[AnyCallable]]
signature: Signature
__slots__ = (
+ "_fn",
"_resolved_dependencies",
"_resolved_guards",
"_resolved_layered_parameters",
@@ -45,7 +45,6 @@
"_resolved_type_encoders",
"dependencies",
"exception_handlers",
- "fn",
"guards",
"middleware",
"name",
@@ -114,11 +113,25 @@
def __call__(self, fn: AsyncAnyCallable) -> Self:
"""Replace a function with itself."""
- self.fn = Ref["MaybePartial[AsyncAnyCallable]"](fn)
+ self._fn = Ref["MaybePartial[AsyncAnyCallable]"](fn)
self.signature = Signature.from_callable(fn)
self._validate_handler_function()
return self
+ @property
+ def fn(self) -> Ref[MaybePartial[AsyncAnyCallable]]:
+ """Get the handler function.
+
+ Raises:
+ ImproperlyConfiguredException: if handler fn is not set.
+
+ Returns:
+ Handler function
+ """
+ if not hasattr(self, "_fn"):
+ raise ImproperlyConfiguredException("Handler has not decorated a function")
+ return self._fn
+
@property
def handler_name(self) -> str:
"""Get the name of the handler function.
@@ -129,9 +142,6 @@
Returns:
Name of the handler function
"""
- fn = getattr(self, "fn", None)
- if not fn:
- raise ImproperlyConfiguredException("cannot access handler name before setting the handler function")
return get_name(unwrap_partial(self.fn.value))
@property
@@ -279,8 +289,6 @@
def _validate_handler_function(self) -> None:
"""Validate the route handler function once set by inspecting its return annotations."""
- if not getattr(self, "fn", None):
- raise ImproperlyConfiguredException("Cannot call _validate_handler_function without first setting self.fn")
def __str__(self) -> str:
"""Return a unique identifier for the route handler.
@@ -288,6 +296,7 @@
Returns:
A string
"""
+ target: type[AsyncAnyCallable] | AsyncAnyCallable
target = unwrap_partial(self.fn.value)
if not hasattr(target, "__qualname__"):
target = type(target)
|
{"golden_diff": "diff --git a/starlite/handlers/base.py b/starlite/handlers/base.py\n--- a/starlite/handlers/base.py\n+++ b/starlite/handlers/base.py\n@@ -22,7 +22,7 @@\n from starlite.di import Provide\n from starlite.params import ParameterKwarg\n from starlite.router import Router\n- from starlite.types import AnyCallable, AsyncAnyCallable, ExceptionHandler\n+ from starlite.types import AsyncAnyCallable, ExceptionHandler\n from starlite.types.composite_types import MaybePartial\n \n T = TypeVar(\"T\", bound=\"BaseRouteHandler\")\n@@ -34,10 +34,10 @@\n Serves as a subclass for all route handlers\n \"\"\"\n \n- fn: Ref[MaybePartial[AnyCallable]]\n signature: Signature\n \n __slots__ = (\n+ \"_fn\",\n \"_resolved_dependencies\",\n \"_resolved_guards\",\n \"_resolved_layered_parameters\",\n@@ -45,7 +45,6 @@\n \"_resolved_type_encoders\",\n \"dependencies\",\n \"exception_handlers\",\n- \"fn\",\n \"guards\",\n \"middleware\",\n \"name\",\n@@ -114,11 +113,25 @@\n \n def __call__(self, fn: AsyncAnyCallable) -> Self:\n \"\"\"Replace a function with itself.\"\"\"\n- self.fn = Ref[\"MaybePartial[AsyncAnyCallable]\"](fn)\n+ self._fn = Ref[\"MaybePartial[AsyncAnyCallable]\"](fn)\n self.signature = Signature.from_callable(fn)\n self._validate_handler_function()\n return self\n \n+ @property\n+ def fn(self) -> Ref[MaybePartial[AsyncAnyCallable]]:\n+ \"\"\"Get the handler function.\n+\n+ Raises:\n+ ImproperlyConfiguredException: if handler fn is not set.\n+\n+ Returns:\n+ Handler function\n+ \"\"\"\n+ if not hasattr(self, \"_fn\"):\n+ raise ImproperlyConfiguredException(\"Handler has not decorated a function\")\n+ return self._fn\n+\n @property\n def handler_name(self) -> str:\n \"\"\"Get the name of the handler function.\n@@ -129,9 +142,6 @@\n Returns:\n Name of the handler function\n \"\"\"\n- fn = getattr(self, \"fn\", None)\n- if not fn:\n- raise ImproperlyConfiguredException(\"cannot access handler name before setting the handler function\")\n return get_name(unwrap_partial(self.fn.value))\n \n @property\n@@ -279,8 +289,6 @@\n \n def _validate_handler_function(self) -> None:\n \"\"\"Validate the route handler function once set by inspecting its return annotations.\"\"\"\n- if not getattr(self, \"fn\", None):\n- raise ImproperlyConfiguredException(\"Cannot call _validate_handler_function without first setting self.fn\")\n \n def __str__(self) -> str:\n \"\"\"Return a unique identifier for the route handler.\n@@ -288,6 +296,7 @@\n Returns:\n A string\n \"\"\"\n+ target: type[AsyncAnyCallable] | AsyncAnyCallable\n target = unwrap_partial(self.fn.value)\n if not hasattr(target, \"__qualname__\"):\n target = type(target)\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom copy import copy\nfrom inspect import Signature\nfrom typing import TYPE_CHECKING, Any, Generic, Mapping, Sequence, TypeVar, cast\n\nfrom starlite._signature.field import SignatureField\nfrom starlite.exceptions import ImproperlyConfiguredException\nfrom starlite.types import Dependencies, Empty, EmptyType, ExceptionHandlersMap, Guard, Middleware, TypeEncodersMap\nfrom starlite.utils import AsyncCallable, Ref, get_name, normalize_path\nfrom starlite.utils.helpers import unwrap_partial\n\n__all__ = (\"BaseRouteHandler\",)\n\n\nif TYPE_CHECKING:\n from typing_extensions import Self\n\n from starlite._signature.models import SignatureModel\n from starlite.connection import ASGIConnection\n from starlite.controller import Controller\n from starlite.di import Provide\n from starlite.params import ParameterKwarg\n from starlite.router import Router\n from starlite.types import AnyCallable, AsyncAnyCallable, ExceptionHandler\n from starlite.types.composite_types import MaybePartial\n\nT = TypeVar(\"T\", bound=\"BaseRouteHandler\")\n\n\nclass BaseRouteHandler(Generic[T]):\n \"\"\"Base route handler.\n\n Serves as a subclass for all route handlers\n \"\"\"\n\n fn: Ref[MaybePartial[AnyCallable]]\n signature: Signature\n\n __slots__ = (\n \"_resolved_dependencies\",\n \"_resolved_guards\",\n \"_resolved_layered_parameters\",\n \"_resolved_signature_namespace\",\n \"_resolved_type_encoders\",\n \"dependencies\",\n \"exception_handlers\",\n \"fn\",\n \"guards\",\n \"middleware\",\n \"name\",\n \"opt\",\n \"owner\",\n \"paths\",\n \"signature\",\n \"signature_model\",\n \"signature_namespace\",\n \"type_encoders\",\n )\n\n def __init__(\n self,\n path: str | Sequence[str] | None = None,\n *,\n dependencies: Dependencies | None = None,\n exception_handlers: ExceptionHandlersMap | None = None,\n guards: Sequence[Guard] | None = None,\n middleware: Sequence[Middleware] | None = None,\n name: str | None = None,\n opt: Mapping[str, Any] | None = None,\n signature_namespace: Mapping[str, Any] | None = None,\n type_encoders: TypeEncodersMap | None = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize ``HTTPRouteHandler``.\n\n Args:\n path: A path fragment for the route handler function or a sequence of path fragments. If not given defaults\n to ``/``\n dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.\n exception_handlers: A mapping of status codes and/or exception types to handler functions.\n guards: A sequence of :class:`Guard <.types.Guard>` callables.\n middleware: A sequence of :class:`Middleware <.types.Middleware>`.\n name: A string identifying the route handler.\n opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or\n wherever you have access to :class:`Request <.connection.Request>` or\n :class:`ASGI Scope <.types.Scope>`.\n signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.\n type_encoders: A mapping of types to callables that transform them into types supported for serialization.\n **kwargs: Any additional kwarg - will be set in the opt dictionary.\n \"\"\"\n self._resolved_dependencies: dict[str, Provide] | EmptyType = Empty\n self._resolved_guards: list[Guard] | EmptyType = Empty\n self._resolved_layered_parameters: dict[str, SignatureField] | EmptyType = Empty\n self._resolved_signature_namespace: dict[str, Any] | EmptyType = Empty\n self._resolved_type_encoders: TypeEncodersMap | EmptyType = Empty\n\n self.dependencies = dependencies\n self.exception_handlers = exception_handlers\n self.guards = guards\n self.middleware = middleware\n self.name = name\n self.opt = dict(opt or {})\n self.owner: Controller | Router | None = None\n self.signature_model: type[SignatureModel] | None = None\n self.signature_namespace = signature_namespace or {}\n self.paths = (\n {normalize_path(p) for p in path}\n if path and isinstance(path, list)\n else {normalize_path(path or \"/\")} # type: ignore\n )\n self.opt.update(**kwargs)\n self.type_encoders = type_encoders\n\n def __call__(self, fn: AsyncAnyCallable) -> Self:\n \"\"\"Replace a function with itself.\"\"\"\n self.fn = Ref[\"MaybePartial[AsyncAnyCallable]\"](fn)\n self.signature = Signature.from_callable(fn)\n self._validate_handler_function()\n return self\n\n @property\n def handler_name(self) -> str:\n \"\"\"Get the name of the handler function.\n\n Raises:\n ImproperlyConfiguredException: if handler fn is not set.\n\n Returns:\n Name of the handler function\n \"\"\"\n fn = getattr(self, \"fn\", None)\n if not fn:\n raise ImproperlyConfiguredException(\"cannot access handler name before setting the handler function\")\n return get_name(unwrap_partial(self.fn.value))\n\n @property\n def dependency_name_set(self) -> set[str]:\n \"\"\"Set of all dependency names provided in the handler's ownership layers.\"\"\"\n layered_dependencies = (layer.dependencies or {} for layer in self.ownership_layers)\n return {name for layer in layered_dependencies for name in layer}\n\n @property\n def ownership_layers(self) -> list[T | Controller | Router]:\n \"\"\"Return the handler layers from the app down to the route handler.\n\n ``app -> ... -> route handler``\n \"\"\"\n layers = []\n\n cur: Any = self\n while cur:\n layers.append(cur)\n cur = cur.owner\n\n return list(reversed(layers))\n\n def resolve_type_encoders(self) -> TypeEncodersMap:\n \"\"\"Return a merged type_encoders mapping.\n\n This method is memoized so the computation occurs only once.\n\n Returns:\n A dict of type encoders\n \"\"\"\n if self._resolved_type_encoders is Empty:\n self._resolved_type_encoders = {}\n\n for layer in self.ownership_layers:\n if type_encoders := getattr(layer, \"type_encoders\", None):\n self._resolved_type_encoders.update(type_encoders)\n return cast(\"TypeEncodersMap\", self._resolved_type_encoders)\n\n def resolve_layered_parameters(self) -> dict[str, SignatureField]:\n \"\"\"Return all parameters declared above the handler.\"\"\"\n if self._resolved_layered_parameters is Empty:\n parameter_kwargs: dict[str, ParameterKwarg] = {}\n\n for layer in self.ownership_layers:\n parameter_kwargs.update(getattr(layer, \"parameters\", {}) or {})\n\n self._resolved_layered_parameters = {\n key: SignatureField.create(\n name=key, field_type=parameter.value_type, default_value=parameter.default, kwarg_model=parameter\n )\n for key, parameter in parameter_kwargs.items()\n }\n\n return cast(\"dict[str, SignatureField]\", self._resolved_layered_parameters)\n\n def resolve_guards(self) -> list[Guard]:\n \"\"\"Return all guards in the handlers scope, starting from highest to current layer.\"\"\"\n if self._resolved_guards is Empty:\n self._resolved_guards = []\n\n for layer in self.ownership_layers:\n self._resolved_guards.extend(layer.guards or [])\n\n self._resolved_guards = cast(\"list[Guard]\", [AsyncCallable(guard) for guard in self._resolved_guards])\n\n return self._resolved_guards # type:ignore\n\n def resolve_dependencies(self) -> dict[str, Provide]:\n \"\"\"Return all dependencies correlating to handler function's kwargs that exist in the handler's scope.\"\"\"\n if self._resolved_dependencies is Empty:\n self._resolved_dependencies = {}\n\n for layer in self.ownership_layers:\n for key, value in (layer.dependencies or {}).items():\n self._validate_dependency_is_unique(\n dependencies=self._resolved_dependencies, key=key, provider=value\n )\n self._resolved_dependencies[key] = value\n\n return cast(\"dict[str, Provide]\", self._resolved_dependencies)\n\n def resolve_middleware(self) -> list[Middleware]:\n \"\"\"Build the middleware stack for the RouteHandler and return it.\n\n The middlewares are added from top to bottom (``app -> router -> controller -> route handler``) and then\n reversed.\n \"\"\"\n resolved_middleware: list[Middleware] = []\n for layer in self.ownership_layers:\n resolved_middleware.extend(layer.middleware or [])\n return list(reversed(resolved_middleware))\n\n def resolve_exception_handlers(self) -> ExceptionHandlersMap:\n \"\"\"Resolve the exception_handlers by starting from the route handler and moving up.\n\n This method is memoized so the computation occurs only once.\n \"\"\"\n resolved_exception_handlers: dict[int | type[Exception], ExceptionHandler] = {}\n for layer in self.ownership_layers:\n resolved_exception_handlers.update(layer.exception_handlers or {})\n return resolved_exception_handlers\n\n def resolve_opts(self) -> None:\n \"\"\"Build the route handler opt dictionary by going from top to bottom.\n\n When merging keys from multiple layers, if the same key is defined by multiple layers, the value from the\n layer closest to the response handler will take precedence.\n \"\"\"\n\n opt: dict[str, Any] = {}\n for layer in self.ownership_layers:\n opt.update(layer.opt or {})\n\n self.opt = opt\n\n def resolve_signature_namespace(self) -> dict[str, Any]:\n \"\"\"Build the route handler signature namespace dictionary by going from top to bottom.\n\n When merging keys from multiple layers, if the same key is defined by multiple layers, the value from the\n layer closest to the response handler will take precedence.\n \"\"\"\n if self._resolved_layered_parameters is Empty:\n ns: dict[str, Any] = {}\n for layer in self.ownership_layers:\n ns.update(layer.signature_namespace)\n\n self._resolved_signature_namespace = ns\n return cast(\"dict[str, Any]\", self._resolved_signature_namespace)\n\n async def authorize_connection(self, connection: \"ASGIConnection\") -> None:\n \"\"\"Ensure the connection is authorized by running all the route guards in scope.\"\"\"\n for guard in self.resolve_guards():\n await guard(connection, copy(self)) # type: ignore\n\n @staticmethod\n def _validate_dependency_is_unique(dependencies: dict[str, Provide], key: str, provider: Provide) -> None:\n \"\"\"Validate that a given provider has not been already defined under a different key.\"\"\"\n for dependency_key, value in dependencies.items():\n if provider == value:\n raise ImproperlyConfiguredException(\n f\"Provider for key {key} is already defined under the different key {dependency_key}. \"\n f\"If you wish to override a provider, it must have the same key.\"\n )\n\n def _validate_handler_function(self) -> None:\n \"\"\"Validate the route handler function once set by inspecting its return annotations.\"\"\"\n if not getattr(self, \"fn\", None):\n raise ImproperlyConfiguredException(\"Cannot call _validate_handler_function without first setting self.fn\")\n\n def __str__(self) -> str:\n \"\"\"Return a unique identifier for the route handler.\n\n Returns:\n A string\n \"\"\"\n target = unwrap_partial(self.fn.value)\n if not hasattr(target, \"__qualname__\"):\n target = type(target)\n return f\"{target.__module__}.{target.__qualname__}\"\n", "path": "starlite/handlers/base.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom copy import copy\nfrom inspect import Signature\nfrom typing import TYPE_CHECKING, Any, Generic, Mapping, Sequence, TypeVar, cast\n\nfrom starlite._signature.field import SignatureField\nfrom starlite.exceptions import ImproperlyConfiguredException\nfrom starlite.types import Dependencies, Empty, EmptyType, ExceptionHandlersMap, Guard, Middleware, TypeEncodersMap\nfrom starlite.utils import AsyncCallable, Ref, get_name, normalize_path\nfrom starlite.utils.helpers import unwrap_partial\n\n__all__ = (\"BaseRouteHandler\",)\n\n\nif TYPE_CHECKING:\n from typing_extensions import Self\n\n from starlite._signature.models import SignatureModel\n from starlite.connection import ASGIConnection\n from starlite.controller import Controller\n from starlite.di import Provide\n from starlite.params import ParameterKwarg\n from starlite.router import Router\n from starlite.types import AsyncAnyCallable, ExceptionHandler\n from starlite.types.composite_types import MaybePartial\n\nT = TypeVar(\"T\", bound=\"BaseRouteHandler\")\n\n\nclass BaseRouteHandler(Generic[T]):\n \"\"\"Base route handler.\n\n Serves as a subclass for all route handlers\n \"\"\"\n\n signature: Signature\n\n __slots__ = (\n \"_fn\",\n \"_resolved_dependencies\",\n \"_resolved_guards\",\n \"_resolved_layered_parameters\",\n \"_resolved_signature_namespace\",\n \"_resolved_type_encoders\",\n \"dependencies\",\n \"exception_handlers\",\n \"guards\",\n \"middleware\",\n \"name\",\n \"opt\",\n \"owner\",\n \"paths\",\n \"signature\",\n \"signature_model\",\n \"signature_namespace\",\n \"type_encoders\",\n )\n\n def __init__(\n self,\n path: str | Sequence[str] | None = None,\n *,\n dependencies: Dependencies | None = None,\n exception_handlers: ExceptionHandlersMap | None = None,\n guards: Sequence[Guard] | None = None,\n middleware: Sequence[Middleware] | None = None,\n name: str | None = None,\n opt: Mapping[str, Any] | None = None,\n signature_namespace: Mapping[str, Any] | None = None,\n type_encoders: TypeEncodersMap | None = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize ``HTTPRouteHandler``.\n\n Args:\n path: A path fragment for the route handler function or a sequence of path fragments. If not given defaults\n to ``/``\n dependencies: A string keyed mapping of dependency :class:`Provider <.di.Provide>` instances.\n exception_handlers: A mapping of status codes and/or exception types to handler functions.\n guards: A sequence of :class:`Guard <.types.Guard>` callables.\n middleware: A sequence of :class:`Middleware <.types.Middleware>`.\n name: A string identifying the route handler.\n opt: A string keyed mapping of arbitrary values that can be accessed in :class:`Guards <.types.Guard>` or\n wherever you have access to :class:`Request <.connection.Request>` or\n :class:`ASGI Scope <.types.Scope>`.\n signature_namespace: A mapping of names to types for use in forward reference resolution during signature modelling.\n type_encoders: A mapping of types to callables that transform them into types supported for serialization.\n **kwargs: Any additional kwarg - will be set in the opt dictionary.\n \"\"\"\n self._resolved_dependencies: dict[str, Provide] | EmptyType = Empty\n self._resolved_guards: list[Guard] | EmptyType = Empty\n self._resolved_layered_parameters: dict[str, SignatureField] | EmptyType = Empty\n self._resolved_signature_namespace: dict[str, Any] | EmptyType = Empty\n self._resolved_type_encoders: TypeEncodersMap | EmptyType = Empty\n\n self.dependencies = dependencies\n self.exception_handlers = exception_handlers\n self.guards = guards\n self.middleware = middleware\n self.name = name\n self.opt = dict(opt or {})\n self.owner: Controller | Router | None = None\n self.signature_model: type[SignatureModel] | None = None\n self.signature_namespace = signature_namespace or {}\n self.paths = (\n {normalize_path(p) for p in path}\n if path and isinstance(path, list)\n else {normalize_path(path or \"/\")} # type: ignore\n )\n self.opt.update(**kwargs)\n self.type_encoders = type_encoders\n\n def __call__(self, fn: AsyncAnyCallable) -> Self:\n \"\"\"Replace a function with itself.\"\"\"\n self._fn = Ref[\"MaybePartial[AsyncAnyCallable]\"](fn)\n self.signature = Signature.from_callable(fn)\n self._validate_handler_function()\n return self\n\n @property\n def fn(self) -> Ref[MaybePartial[AsyncAnyCallable]]:\n \"\"\"Get the handler function.\n\n Raises:\n ImproperlyConfiguredException: if handler fn is not set.\n\n Returns:\n Handler function\n \"\"\"\n if not hasattr(self, \"_fn\"):\n raise ImproperlyConfiguredException(\"Handler has not decorated a function\")\n return self._fn\n\n @property\n def handler_name(self) -> str:\n \"\"\"Get the name of the handler function.\n\n Raises:\n ImproperlyConfiguredException: if handler fn is not set.\n\n Returns:\n Name of the handler function\n \"\"\"\n return get_name(unwrap_partial(self.fn.value))\n\n @property\n def dependency_name_set(self) -> set[str]:\n \"\"\"Set of all dependency names provided in the handler's ownership layers.\"\"\"\n layered_dependencies = (layer.dependencies or {} for layer in self.ownership_layers)\n return {name for layer in layered_dependencies for name in layer}\n\n @property\n def ownership_layers(self) -> list[T | Controller | Router]:\n \"\"\"Return the handler layers from the app down to the route handler.\n\n ``app -> ... -> route handler``\n \"\"\"\n layers = []\n\n cur: Any = self\n while cur:\n layers.append(cur)\n cur = cur.owner\n\n return list(reversed(layers))\n\n def resolve_type_encoders(self) -> TypeEncodersMap:\n \"\"\"Return a merged type_encoders mapping.\n\n This method is memoized so the computation occurs only once.\n\n Returns:\n A dict of type encoders\n \"\"\"\n if self._resolved_type_encoders is Empty:\n self._resolved_type_encoders = {}\n\n for layer in self.ownership_layers:\n if type_encoders := getattr(layer, \"type_encoders\", None):\n self._resolved_type_encoders.update(type_encoders)\n return cast(\"TypeEncodersMap\", self._resolved_type_encoders)\n\n def resolve_layered_parameters(self) -> dict[str, SignatureField]:\n \"\"\"Return all parameters declared above the handler.\"\"\"\n if self._resolved_layered_parameters is Empty:\n parameter_kwargs: dict[str, ParameterKwarg] = {}\n\n for layer in self.ownership_layers:\n parameter_kwargs.update(getattr(layer, \"parameters\", {}) or {})\n\n self._resolved_layered_parameters = {\n key: SignatureField.create(\n name=key, field_type=parameter.value_type, default_value=parameter.default, kwarg_model=parameter\n )\n for key, parameter in parameter_kwargs.items()\n }\n\n return cast(\"dict[str, SignatureField]\", self._resolved_layered_parameters)\n\n def resolve_guards(self) -> list[Guard]:\n \"\"\"Return all guards in the handlers scope, starting from highest to current layer.\"\"\"\n if self._resolved_guards is Empty:\n self._resolved_guards = []\n\n for layer in self.ownership_layers:\n self._resolved_guards.extend(layer.guards or [])\n\n self._resolved_guards = cast(\"list[Guard]\", [AsyncCallable(guard) for guard in self._resolved_guards])\n\n return self._resolved_guards # type:ignore\n\n def resolve_dependencies(self) -> dict[str, Provide]:\n \"\"\"Return all dependencies correlating to handler function's kwargs that exist in the handler's scope.\"\"\"\n if self._resolved_dependencies is Empty:\n self._resolved_dependencies = {}\n\n for layer in self.ownership_layers:\n for key, value in (layer.dependencies or {}).items():\n self._validate_dependency_is_unique(\n dependencies=self._resolved_dependencies, key=key, provider=value\n )\n self._resolved_dependencies[key] = value\n\n return cast(\"dict[str, Provide]\", self._resolved_dependencies)\n\n def resolve_middleware(self) -> list[Middleware]:\n \"\"\"Build the middleware stack for the RouteHandler and return it.\n\n The middlewares are added from top to bottom (``app -> router -> controller -> route handler``) and then\n reversed.\n \"\"\"\n resolved_middleware: list[Middleware] = []\n for layer in self.ownership_layers:\n resolved_middleware.extend(layer.middleware or [])\n return list(reversed(resolved_middleware))\n\n def resolve_exception_handlers(self) -> ExceptionHandlersMap:\n \"\"\"Resolve the exception_handlers by starting from the route handler and moving up.\n\n This method is memoized so the computation occurs only once.\n \"\"\"\n resolved_exception_handlers: dict[int | type[Exception], ExceptionHandler] = {}\n for layer in self.ownership_layers:\n resolved_exception_handlers.update(layer.exception_handlers or {})\n return resolved_exception_handlers\n\n def resolve_opts(self) -> None:\n \"\"\"Build the route handler opt dictionary by going from top to bottom.\n\n When merging keys from multiple layers, if the same key is defined by multiple layers, the value from the\n layer closest to the response handler will take precedence.\n \"\"\"\n\n opt: dict[str, Any] = {}\n for layer in self.ownership_layers:\n opt.update(layer.opt or {})\n\n self.opt = opt\n\n def resolve_signature_namespace(self) -> dict[str, Any]:\n \"\"\"Build the route handler signature namespace dictionary by going from top to bottom.\n\n When merging keys from multiple layers, if the same key is defined by multiple layers, the value from the\n layer closest to the response handler will take precedence.\n \"\"\"\n if self._resolved_layered_parameters is Empty:\n ns: dict[str, Any] = {}\n for layer in self.ownership_layers:\n ns.update(layer.signature_namespace)\n\n self._resolved_signature_namespace = ns\n return cast(\"dict[str, Any]\", self._resolved_signature_namespace)\n\n async def authorize_connection(self, connection: \"ASGIConnection\") -> None:\n \"\"\"Ensure the connection is authorized by running all the route guards in scope.\"\"\"\n for guard in self.resolve_guards():\n await guard(connection, copy(self)) # type: ignore\n\n @staticmethod\n def _validate_dependency_is_unique(dependencies: dict[str, Provide], key: str, provider: Provide) -> None:\n \"\"\"Validate that a given provider has not been already defined under a different key.\"\"\"\n for dependency_key, value in dependencies.items():\n if provider == value:\n raise ImproperlyConfiguredException(\n f\"Provider for key {key} is already defined under the different key {dependency_key}. \"\n f\"If you wish to override a provider, it must have the same key.\"\n )\n\n def _validate_handler_function(self) -> None:\n \"\"\"Validate the route handler function once set by inspecting its return annotations.\"\"\"\n\n def __str__(self) -> str:\n \"\"\"Return a unique identifier for the route handler.\n\n Returns:\n A string\n \"\"\"\n target: type[AsyncAnyCallable] | AsyncAnyCallable\n target = unwrap_partial(self.fn.value)\n if not hasattr(target, \"__qualname__\"):\n target = type(target)\n return f\"{target.__module__}.{target.__qualname__}\"\n", "path": "starlite/handlers/base.py"}]}
| 3,708 | 712 |
gh_patches_debug_26094
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-922
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
REST API error with sklearn classifier
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: I wrote a simple random forest classifier using sklearn and the iris dataset to test the mlflow workflow from scratch.
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: macOS 10.14.1
- **MLflow installed from (source or binary)**: source (pip install mlflow)
- **MLflow version (run ``mlflow --version``)**: 0.8.2
- **Python version**: 3.7
- **npm version (if running the dev UI):
- **Exact command to reproduce**:curl -X POST -H "Content-Type:application/json; format=pandas-split" --data '{"columns":["sepal_length_cm", "sepal_width_cm", "petal_length_cm", "petal_width_cm"],"data":[[5.3, 1.7, 3.5, 0.5]]}' http://127.0.0.1:1234/invocations
### Describe the problem
If sklearn classifier.predict() method returns a categorical variable, then there is a 500 internal server error returned after the above http request.
The error is from the following function in mlflow/utils/__init.py__:
def ndarray2list(ndarray):
"""
Convert n-dimensional numpy array into nested lists and convert the elements types to native
python so that the list is json-able using standard json library.
:param ndarray: numpy array
:return: list representation of the numpy array with element types convereted to native python
"""
if len(ndarray.shape) <= 1:
return [x.item() for x in ndarray]
return [ndarray2list(ndarray[i, :]) for i in range(0, ndarray.shape[0])]
x.item() fails if the array returned from the sklearn classifier.predict() method contains strings, as in the case of the iris dataset (labels are 'setosa', etc). I corrected the error on my local install by changing the function as below:
def ndarray2list(ndarray):
"""
Convert n-dimensional numpy array into nested lists and convert the elements types to native
python so that the list is json-able using standard json library.
:param ndarray: numpy array
:return: list representation of the numpy array with element types convereted to native python
"""
if len(ndarray.shape) <= 1:
try:
return [x.item() for x in ndarray]
except:
return [x for x in ndarray]
return [ndarray2list(ndarray[i, :]) for i in range(0, ndarray.shape[0])]
### Source code / logs
server side error traceback:
[2019-02-19 10:57:20,668] ERROR in app: Exception on /invocations [POST]
Traceback (most recent call last):
File "/anaconda3/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/anaconda3/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/anaconda3/lib/python3.7/site-packages/mlflow/server/handlers.py", line 68, in wrapper
return func(*args, **kwargs)
File "/anaconda3/lib/python3.7/site-packages/mlflow/pyfunc/scoring_server.py", line 185, in transformation
predictions = get_jsonable_obj(raw_predictions, pandas_orient="records")
File "/anaconda3/lib/python3.7/site-packages/mlflow/utils/__init__.py", line 34, in get_jsonable_obj
return ndarray2list(data)
File "/anaconda3/lib/python3.7/site-packages/mlflow/utils/__init__.py", line 20, in ndarray2list
return [x.item() for x in ndarray]
File "/anaconda3/lib/python3.7/site-packages/mlflow/utils/__init__.py", line 20, in <listcomp>
return [x.item() for x in ndarray]
AttributeError: 'str' object has no attribute 'item'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/utils/__init__.py`
Content:
```
1 from sys import version_info
2
3 import numpy as np
4 import pandas as pd
5
6
7 PYTHON_VERSION = "{major}.{minor}.{micro}".format(major=version_info.major,
8 minor=version_info.minor,
9 micro=version_info.micro)
10
11
12 def ndarray2list(ndarray):
13 """
14 Convert n-dimensional numpy array into nested lists and convert the elements types to native
15 python so that the list is json-able using standard json library.
16 :param ndarray: numpy array
17 :return: list representation of the numpy array with element types convereted to native python
18 """
19 if len(ndarray.shape) <= 1:
20 return [x.item() for x in ndarray]
21 return [ndarray2list(ndarray[i, :]) for i in range(0, ndarray.shape[0])]
22
23
24 def get_jsonable_obj(data, pandas_orient="records"):
25 """Attempt to make the data json-able via standard library.
26 Look for some commonly used types that are not jsonable and convert them into json-able ones.
27 Unknown data types are returned as is.
28
29 :param data: data to be converted, works with pandas and numpy, rest will be returned as is.
30 :param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON
31 dictionary using this Pandas serialization orientation.
32 """
33 if isinstance(data, np.ndarray):
34 return ndarray2list(data)
35 if isinstance(data, pd.DataFrame):
36 return data.to_dict(orient=pandas_orient)
37 if isinstance(data, pd.Series):
38 return pd.DataFrame(data).to_dict(orient=pandas_orient)
39 else: # by default just return whatever this is and hope for the best
40 return data
41
42
43 def get_major_minor_py_version(py_version):
44 return ".".join(py_version.split(".")[:2])
45
46
47 def get_unique_resource_id(max_length=None):
48 """
49 Obtains a unique id that can be included in a resource name. This unique id is a valid
50 DNS subname.
51
52 :param max_length: The maximum length of the identifier
53 :return: A unique identifier that can be appended to a user-readable resource name to avoid
54 naming collisions.
55 """
56 import uuid
57 import base64
58 if max_length is not None and max_length <= 0:
59 raise ValueError(
60 "The specified maximum length for the unique resource id must be positive!")
61
62 uuid_bytes = uuid.uuid4().bytes
63 # Use base64 encoding to shorten the UUID length. Note that the replacement of the
64 # unsupported '+' symbol maintains uniqueness because the UUID byte string is of a fixed,
65 # 16-byte length
66 uuid_b64 = base64.b64encode(uuid_bytes)
67 if version_info >= (3, 0):
68 # In Python3, `uuid_b64` is a `bytes` object. It needs to be
69 # converted to a string
70 uuid_b64 = uuid_b64.decode("ascii")
71 unique_id = uuid_b64.rstrip('=\n').replace("/", "-").replace("+", "AB").lower()
72 if max_length is not None:
73 unique_id = unique_id[:int(max_length)]
74 return unique_id
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlflow/utils/__init__.py b/mlflow/utils/__init__.py
--- a/mlflow/utils/__init__.py
+++ b/mlflow/utils/__init__.py
@@ -9,18 +9,6 @@
micro=version_info.micro)
-def ndarray2list(ndarray):
- """
- Convert n-dimensional numpy array into nested lists and convert the elements types to native
- python so that the list is json-able using standard json library.
- :param ndarray: numpy array
- :return: list representation of the numpy array with element types convereted to native python
- """
- if len(ndarray.shape) <= 1:
- return [x.item() for x in ndarray]
- return [ndarray2list(ndarray[i, :]) for i in range(0, ndarray.shape[0])]
-
-
def get_jsonable_obj(data, pandas_orient="records"):
"""Attempt to make the data json-able via standard library.
Look for some commonly used types that are not jsonable and convert them into json-able ones.
@@ -31,7 +19,7 @@
dictionary using this Pandas serialization orientation.
"""
if isinstance(data, np.ndarray):
- return ndarray2list(data)
+ return data.tolist()
if isinstance(data, pd.DataFrame):
return data.to_dict(orient=pandas_orient)
if isinstance(data, pd.Series):
|
{"golden_diff": "diff --git a/mlflow/utils/__init__.py b/mlflow/utils/__init__.py\n--- a/mlflow/utils/__init__.py\n+++ b/mlflow/utils/__init__.py\n@@ -9,18 +9,6 @@\n micro=version_info.micro)\n \n \n-def ndarray2list(ndarray):\n- \"\"\"\n- Convert n-dimensional numpy array into nested lists and convert the elements types to native\n- python so that the list is json-able using standard json library.\n- :param ndarray: numpy array\n- :return: list representation of the numpy array with element types convereted to native python\n- \"\"\"\n- if len(ndarray.shape) <= 1:\n- return [x.item() for x in ndarray]\n- return [ndarray2list(ndarray[i, :]) for i in range(0, ndarray.shape[0])]\n-\n-\n def get_jsonable_obj(data, pandas_orient=\"records\"):\n \"\"\"Attempt to make the data json-able via standard library.\n Look for some commonly used types that are not jsonable and convert them into json-able ones.\n@@ -31,7 +19,7 @@\n dictionary using this Pandas serialization orientation.\n \"\"\"\n if isinstance(data, np.ndarray):\n- return ndarray2list(data)\n+ return data.tolist()\n if isinstance(data, pd.DataFrame):\n return data.to_dict(orient=pandas_orient)\n if isinstance(data, pd.Series):\n", "issue": "REST API error with sklearn classifier\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: I wrote a simple random forest classifier using sklearn and the iris dataset to test the mlflow workflow from scratch.\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: macOS 10.14.1\r\n- **MLflow installed from (source or binary)**: source (pip install mlflow)\r\n- **MLflow version (run ``mlflow --version``)**: 0.8.2\r\n- **Python version**: 3.7\r\n- **npm version (if running the dev UI):\r\n- **Exact command to reproduce**:curl -X POST -H \"Content-Type:application/json; format=pandas-split\" --data '{\"columns\":[\"sepal_length_cm\", \"sepal_width_cm\", \"petal_length_cm\", \"petal_width_cm\"],\"data\":[[5.3, 1.7, 3.5, 0.5]]}' http://127.0.0.1:1234/invocations\r\n\r\n### Describe the problem\r\nIf sklearn classifier.predict() method returns a categorical variable, then there is a 500 internal server error returned after the above http request.\r\n\r\nThe error is from the following function in mlflow/utils/__init.py__:\r\n\r\ndef ndarray2list(ndarray):\r\n \"\"\"\r\n Convert n-dimensional numpy array into nested lists and convert the elements types to native\r\n python so that the list is json-able using standard json library.\r\n :param ndarray: numpy array\r\n :return: list representation of the numpy array with element types convereted to native python\r\n \"\"\"\r\n if len(ndarray.shape) <= 1:\r\n return [x.item() for x in ndarray]\r\n return [ndarray2list(ndarray[i, :]) for i in range(0, ndarray.shape[0])]\r\n\r\nx.item() fails if the array returned from the sklearn classifier.predict() method contains strings, as in the case of the iris dataset (labels are 'setosa', etc). I corrected the error on my local install by changing the function as below:\r\n\r\ndef ndarray2list(ndarray):\r\n \"\"\"\r\n Convert n-dimensional numpy array into nested lists and convert the elements types to native\r\n python so that the list is json-able using standard json library.\r\n :param ndarray: numpy array\r\n :return: list representation of the numpy array with element types convereted to native python\r\n \"\"\"\r\n if len(ndarray.shape) <= 1:\r\n try:\r\n return [x.item() for x in ndarray]\r\n except:\r\n return [x for x in ndarray]\r\n return [ndarray2list(ndarray[i, :]) for i in range(0, ndarray.shape[0])]\r\n\r\n### Source code / logs\r\nserver side error traceback:\r\n[2019-02-19 10:57:20,668] ERROR in app: Exception on /invocations [POST]\r\nTraceback (most recent call last):\r\n File \"/anaconda3/lib/python3.7/site-packages/flask/app.py\", line 2292, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/anaconda3/lib/python3.7/site-packages/flask/app.py\", line 1815, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/anaconda3/lib/python3.7/site-packages/flask/app.py\", line 1718, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/anaconda3/lib/python3.7/site-packages/flask/_compat.py\", line 35, in reraise\r\n raise value\r\n File \"/anaconda3/lib/python3.7/site-packages/flask/app.py\", line 1813, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/anaconda3/lib/python3.7/site-packages/flask/app.py\", line 1799, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/anaconda3/lib/python3.7/site-packages/mlflow/server/handlers.py\", line 68, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/anaconda3/lib/python3.7/site-packages/mlflow/pyfunc/scoring_server.py\", line 185, in transformation\r\n predictions = get_jsonable_obj(raw_predictions, pandas_orient=\"records\")\r\n File \"/anaconda3/lib/python3.7/site-packages/mlflow/utils/__init__.py\", line 34, in get_jsonable_obj\r\n return ndarray2list(data)\r\n File \"/anaconda3/lib/python3.7/site-packages/mlflow/utils/__init__.py\", line 20, in ndarray2list\r\n return [x.item() for x in ndarray]\r\n File \"/anaconda3/lib/python3.7/site-packages/mlflow/utils/__init__.py\", line 20, in <listcomp>\r\n return [x.item() for x in ndarray]\r\nAttributeError: 'str' object has no attribute 'item'\n", "before_files": [{"content": "from sys import version_info\n\nimport numpy as np\nimport pandas as pd\n\n\nPYTHON_VERSION = \"{major}.{minor}.{micro}\".format(major=version_info.major,\n minor=version_info.minor,\n micro=version_info.micro)\n\n\ndef ndarray2list(ndarray):\n \"\"\"\n Convert n-dimensional numpy array into nested lists and convert the elements types to native\n python so that the list is json-able using standard json library.\n :param ndarray: numpy array\n :return: list representation of the numpy array with element types convereted to native python\n \"\"\"\n if len(ndarray.shape) <= 1:\n return [x.item() for x in ndarray]\n return [ndarray2list(ndarray[i, :]) for i in range(0, ndarray.shape[0])]\n\n\ndef get_jsonable_obj(data, pandas_orient=\"records\"):\n \"\"\"Attempt to make the data json-able via standard library.\n Look for some commonly used types that are not jsonable and convert them into json-able ones.\n Unknown data types are returned as is.\n\n :param data: data to be converted, works with pandas and numpy, rest will be returned as is.\n :param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON\n dictionary using this Pandas serialization orientation.\n \"\"\"\n if isinstance(data, np.ndarray):\n return ndarray2list(data)\n if isinstance(data, pd.DataFrame):\n return data.to_dict(orient=pandas_orient)\n if isinstance(data, pd.Series):\n return pd.DataFrame(data).to_dict(orient=pandas_orient)\n else: # by default just return whatever this is and hope for the best\n return data\n\n\ndef get_major_minor_py_version(py_version):\n return \".\".join(py_version.split(\".\")[:2])\n\n\ndef get_unique_resource_id(max_length=None):\n \"\"\"\n Obtains a unique id that can be included in a resource name. This unique id is a valid\n DNS subname.\n\n :param max_length: The maximum length of the identifier\n :return: A unique identifier that can be appended to a user-readable resource name to avoid\n naming collisions.\n \"\"\"\n import uuid\n import base64\n if max_length is not None and max_length <= 0:\n raise ValueError(\n \"The specified maximum length for the unique resource id must be positive!\")\n\n uuid_bytes = uuid.uuid4().bytes\n # Use base64 encoding to shorten the UUID length. Note that the replacement of the\n # unsupported '+' symbol maintains uniqueness because the UUID byte string is of a fixed,\n # 16-byte length\n uuid_b64 = base64.b64encode(uuid_bytes)\n if version_info >= (3, 0):\n # In Python3, `uuid_b64` is a `bytes` object. It needs to be\n # converted to a string\n uuid_b64 = uuid_b64.decode(\"ascii\")\n unique_id = uuid_b64.rstrip('=\\n').replace(\"/\", \"-\").replace(\"+\", \"AB\").lower()\n if max_length is not None:\n unique_id = unique_id[:int(max_length)]\n return unique_id\n", "path": "mlflow/utils/__init__.py"}], "after_files": [{"content": "from sys import version_info\n\nimport numpy as np\nimport pandas as pd\n\n\nPYTHON_VERSION = \"{major}.{minor}.{micro}\".format(major=version_info.major,\n minor=version_info.minor,\n micro=version_info.micro)\n\n\ndef get_jsonable_obj(data, pandas_orient=\"records\"):\n \"\"\"Attempt to make the data json-able via standard library.\n Look for some commonly used types that are not jsonable and convert them into json-able ones.\n Unknown data types are returned as is.\n\n :param data: data to be converted, works with pandas and numpy, rest will be returned as is.\n :param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON\n dictionary using this Pandas serialization orientation.\n \"\"\"\n if isinstance(data, np.ndarray):\n return data.tolist()\n if isinstance(data, pd.DataFrame):\n return data.to_dict(orient=pandas_orient)\n if isinstance(data, pd.Series):\n return pd.DataFrame(data).to_dict(orient=pandas_orient)\n else: # by default just return whatever this is and hope for the best\n return data\n\n\ndef get_major_minor_py_version(py_version):\n return \".\".join(py_version.split(\".\")[:2])\n\n\ndef get_unique_resource_id(max_length=None):\n \"\"\"\n Obtains a unique id that can be included in a resource name. This unique id is a valid\n DNS subname.\n\n :param max_length: The maximum length of the identifier\n :return: A unique identifier that can be appended to a user-readable resource name to avoid\n naming collisions.\n \"\"\"\n import uuid\n import base64\n if max_length is not None and max_length <= 0:\n raise ValueError(\n \"The specified maximum length for the unique resource id must be positive!\")\n\n uuid_bytes = uuid.uuid4().bytes\n # Use base64 encoding to shorten the UUID length. Note that the replacement of the\n # unsupported '+' symbol maintains uniqueness because the UUID byte string is of a fixed,\n # 16-byte length\n uuid_b64 = base64.b64encode(uuid_bytes)\n if version_info >= (3, 0):\n # In Python3, `uuid_b64` is a `bytes` object. It needs to be\n # converted to a string\n uuid_b64 = uuid_b64.decode(\"ascii\")\n unique_id = uuid_b64.rstrip('=\\n').replace(\"/\", \"-\").replace(\"+\", \"AB\").lower()\n if max_length is not None:\n unique_id = unique_id[:int(max_length)]\n return unique_id\n", "path": "mlflow/utils/__init__.py"}]}
| 2,193 | 306 |
gh_patches_debug_53283
|
rasdani/github-patches
|
git_diff
|
holoviz__panel-1789
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Image not scaling dynamically
#### ALL software version info
```
Python 3.9.0
bokeh==2.2.3
notebook==5.7.9
panel==0.10.1
macOS Catalina 10.15.7
Chrome 85.0.4183.121
```
#### Description of expected behavior and the observed behavior
##### Expected
I should be able to scale image up and down dynamically in Jupyter Notebook and using the standalone server.
##### Observed
In the notebook, I'm able to scale up and down <= 300 width. I can't make the image larger than 300 pixels wide.
Using the standalone server, it looks like it scales just once (either up or down) and then gets stuck.
#### Complete, minimal, self-contained example code that reproduces the issue
```python
import panel as pn
def panel_logo(width=300):
# also happens with .jpg
return pn.panel("https://panel.holoviz.org/_static/logo_stacked.png", width=width)
pn.interact(panel_logo)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/pane/image.py`
Content:
```
1 """
2 Contains Image panes including renderers for PNG, SVG, GIF and JPG
3 file types.
4 """
5 from __future__ import absolute_import, division, unicode_literals
6
7 import base64
8
9 from io import BytesIO
10 from six import string_types
11
12 import param
13
14 from .markup import escape, DivPaneBase
15 from ..util import isfile, isurl
16
17
18 class ImageBase(DivPaneBase):
19 """
20 Encodes an image as base64 and wraps it in a Bokeh Div model.
21 This is an abstract base class that needs the image type
22 to be specified and specific code for determining the image shape.
23
24 The imgtype determines the filetype, extension, and MIME type for
25 this image. Each image type (png,jpg,gif) has a base class that
26 supports anything with a `_repr_X_` method (where X is `png`,
27 `gif`, etc.), a local file with the given file extension, or a
28 HTTP(S) url with the given extension. Subclasses of each type can
29 provide their own way of obtaining or generating a PNG.
30 """
31
32 alt_text = param.String(default=None, doc="""
33 alt text to add to the image tag. The alt text is shown when a
34 user cannot load or display the image.""")
35
36 link_url = param.String(default=None, doc="""
37 A link URL to make the image clickable and link to some other
38 website.""")
39
40 embed = param.Boolean(default=True, doc="""
41 Whether to embed the image as base64.""")
42
43 imgtype = 'None'
44
45 _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']
46
47 _target_transforms = {'object': """'<img src="' + value + '"></img>'"""}
48
49 __abstract = True
50
51 @classmethod
52 def applies(cls, obj):
53 imgtype = cls.imgtype
54 if hasattr(obj, '_repr_{}_'.format(imgtype)):
55 return True
56 if isinstance(obj, string_types):
57 if isfile(obj) and obj.endswith('.'+imgtype):
58 return True
59 if isurl(obj, [cls.imgtype]):
60 return True
61 elif isurl(obj, None):
62 return 0
63 if hasattr(obj, 'read'): # Check for file like object
64 return True
65 return False
66
67 def _type_error(self, object):
68 if isinstance(object, string_types):
69 raise ValueError("%s pane cannot parse string that is not a filename "
70 "or URL." % type(self).__name__)
71 super(ImageBase, self)._type_error(object)
72
73 def _img(self):
74 if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):
75 return getattr(self.object, '_repr_' + self.imgtype + '_')()
76 if isinstance(self.object, string_types):
77 if isfile(self.object):
78 with open(self.object, 'rb') as f:
79 return f.read()
80 if hasattr(self.object, 'read'):
81 if hasattr(self.object, 'seek'):
82 self.object.seek(0)
83 return self.object.read()
84 if isurl(self.object, None):
85 import requests
86 r = requests.request(url=self.object, method='GET')
87 return r.content
88
89 def _b64(self):
90 data = self._img()
91 if not isinstance(data, bytes):
92 data = data.encode('utf-8')
93 b64 = base64.b64encode(data).decode("utf-8")
94 return "data:image/"+self.imgtype+f";base64,{b64}"
95
96 def _imgshape(self, data):
97 """Calculate and return image width,height"""
98 raise NotImplementedError
99
100 def _get_properties(self):
101 p = super(ImageBase, self)._get_properties()
102 if self.object is None:
103 return dict(p, text='<img></img>')
104 data = self._img()
105 if not isinstance(data, bytes):
106 data = base64.b64decode(data)
107 width, height = self._imgshape(data)
108 if self.width is not None:
109 if self.height is None:
110 height = int((self.width/width)*height)
111 else:
112 height = self.height
113 width = self.width
114 elif self.height is not None:
115 width = int((self.height/height)*width)
116 height = self.height
117 if not self.embed:
118 src = self.object
119 else:
120 b64 = base64.b64encode(data).decode("utf-8")
121 src = "data:image/"+self.imgtype+";base64,{b64}".format(b64=b64)
122
123 smode = self.sizing_mode
124 if smode in ['fixed', None]:
125 w, h = '%spx' % width, '%spx' % height
126 elif smode == 'stretch_both':
127 w, h = '100%', '100%'
128 elif smode == 'stretch_width':
129 w, h = '%spx' % width, '100%'
130 elif smode == 'stretch_height':
131 w, h = '100%', '%spx' % height
132 elif smode == 'scale_height':
133 w, h = 'auto', '100%'
134 else:
135 w, h = '100%', 'auto'
136
137 html = '<img src="{src}" width="{width}" height="{height}" alt="{alt}"></img>'.format(
138 src=src, width=w, height=h, alt=self.alt_text or '')
139
140 if self.link_url:
141 html = '<a href="{url}" target="_blank">{html}</a>'.format(
142 url=self.link_url, html=html)
143
144 return dict(p, width=width, height=height, text=escape(html))
145
146
147 class PNG(ImageBase):
148
149 imgtype = 'png'
150
151 @classmethod
152 def _imgshape(cls, data):
153 import struct
154 w, h = struct.unpack('>LL', data[16:24])
155 return int(w), int(h)
156
157
158 class GIF(ImageBase):
159
160 imgtype = 'gif'
161
162 @classmethod
163 def _imgshape(cls, data):
164 import struct
165 w, h = struct.unpack("<HH", data[6:10])
166 return int(w), int(h)
167
168
169 class JPG(ImageBase):
170
171 imgtype = 'jpg'
172
173 @classmethod
174 def _imgshape(cls, data):
175 import struct
176 b = BytesIO(data)
177 b.read(2)
178 c = b.read(1)
179 while (c and ord(c) != 0xDA):
180 while (ord(c) != 0xFF): c = b.read(1)
181 while (ord(c) == 0xFF): c = b.read(1)
182 if (ord(c) >= 0xC0 and ord(c) <= 0xC3):
183 b.read(3)
184 h, w = struct.unpack(">HH", b.read(4))
185 break
186 else:
187 b.read(int(struct.unpack(">H", b.read(2))[0])-2)
188 c = b.read(1)
189 return int(w), int(h)
190
191
192 class SVG(ImageBase):
193
194 encode = param.Boolean(default=False, doc="""
195 Whether to enable base64 encoding of the SVG, base64 encoded
196 SVGs do not support links.""")
197
198 imgtype = 'svg'
199
200 _rerender_params = ImageBase._rerender_params + ['encode']
201
202 @classmethod
203 def applies(cls, obj):
204 return (super(SVG, cls).applies(obj) or
205 (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))
206
207 def _type_error(self, object):
208 if isinstance(object, string_types):
209 raise ValueError("%s pane cannot parse string that is not a filename, "
210 "URL or a SVG XML contents." % type(self).__name__)
211 super(SVG, self)._type_error(object)
212
213 def _img(self):
214 if (isinstance(self.object, string_types) and
215 self.object.lstrip().startswith('<svg')):
216 return self.object
217 return super(SVG, self)._img()
218
219 def _b64(self):
220 data = self._img()
221 if not isinstance(data, bytes):
222 data = data.encode('utf-8')
223 b64 = base64.b64encode(data).decode("utf-8")
224 return f"data:image/svg+xml;base64,{b64}"
225
226 def _imgshape(self, data):
227 return (self.width, self.height)
228
229 def _get_properties(self):
230 p = super(ImageBase, self)._get_properties()
231 if self.object is None:
232 return dict(p, text='<img></img>')
233 data = self._img()
234 width, height = self._imgshape(data)
235 if not isinstance(data, bytes):
236 data = data.encode('utf-8')
237
238 if self.encode:
239 b64 = base64.b64encode(data).decode("utf-8")
240 src = "data:image/svg+xml;base64,{b64}".format(b64=b64)
241 html = "<img src='{src}' width={width} height={height}></img>".format(
242 src=src, width=width, height=height
243 )
244 else:
245 html = data.decode("utf-8")
246 return dict(p, width=width, height=height, text=escape(html))
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/panel/pane/image.py b/panel/pane/image.py
--- a/panel/pane/image.py
+++ b/panel/pane/image.py
@@ -42,7 +42,7 @@
imgtype = 'None'
- _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']
+ _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style', 'width', 'height']
_target_transforms = {'object': """'<img src="' + value + '"></img>'"""}
|
{"golden_diff": "diff --git a/panel/pane/image.py b/panel/pane/image.py\n--- a/panel/pane/image.py\n+++ b/panel/pane/image.py\n@@ -42,7 +42,7 @@\n \n imgtype = 'None'\n \n- _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']\n+ _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style', 'width', 'height']\n \n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n", "issue": "Image not scaling dynamically\n#### ALL software version info\r\n\r\n```\r\nPython 3.9.0\r\n\r\nbokeh==2.2.3\r\nnotebook==5.7.9\r\npanel==0.10.1\r\n\r\nmacOS Catalina 10.15.7\r\n\r\nChrome 85.0.4183.121\r\n```\r\n\r\n#### Description of expected behavior and the observed behavior\r\n\r\n##### Expected\r\n\r\nI should be able to scale image up and down dynamically in Jupyter Notebook and using the standalone server.\r\n\r\n##### Observed\r\n\r\nIn the notebook, I'm able to scale up and down <= 300 width. I can't make the image larger than 300 pixels wide.\r\n\r\nUsing the standalone server, it looks like it scales just once (either up or down) and then gets stuck.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```python\r\nimport panel as pn\r\n\r\ndef panel_logo(width=300):\r\n # also happens with .jpg\r\n return pn.panel(\"https://panel.holoviz.org/_static/logo_stacked.png\", width=width)\r\n\r\npn.interact(panel_logo)\r\n```\n", "before_files": [{"content": "\"\"\"\nContains Image panes including renderers for PNG, SVG, GIF and JPG\nfile types.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport base64\n\nfrom io import BytesIO\nfrom six import string_types\n\nimport param\n\nfrom .markup import escape, DivPaneBase\nfrom ..util import isfile, isurl\n\n\nclass ImageBase(DivPaneBase):\n \"\"\"\n Encodes an image as base64 and wraps it in a Bokeh Div model.\n This is an abstract base class that needs the image type\n to be specified and specific code for determining the image shape.\n\n The imgtype determines the filetype, extension, and MIME type for\n this image. Each image type (png,jpg,gif) has a base class that\n supports anything with a `_repr_X_` method (where X is `png`,\n `gif`, etc.), a local file with the given file extension, or a\n HTTP(S) url with the given extension. Subclasses of each type can\n provide their own way of obtaining or generating a PNG.\n \"\"\"\n\n alt_text = param.String(default=None, doc=\"\"\"\n alt text to add to the image tag. The alt text is shown when a\n user cannot load or display the image.\"\"\")\n\n link_url = param.String(default=None, doc=\"\"\"\n A link URL to make the image clickable and link to some other\n website.\"\"\")\n\n embed = param.Boolean(default=True, doc=\"\"\"\n Whether to embed the image as base64.\"\"\")\n\n imgtype = 'None'\n\n _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']\n\n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n\n __abstract = True\n\n @classmethod\n def applies(cls, obj):\n imgtype = cls.imgtype\n if hasattr(obj, '_repr_{}_'.format(imgtype)):\n return True\n if isinstance(obj, string_types):\n if isfile(obj) and obj.endswith('.'+imgtype):\n return True\n if isurl(obj, [cls.imgtype]):\n return True\n elif isurl(obj, None):\n return 0\n if hasattr(obj, 'read'): # Check for file like object\n return True\n return False\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename \"\n \"or URL.\" % type(self).__name__)\n super(ImageBase, self)._type_error(object)\n\n def _img(self):\n if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):\n return getattr(self.object, '_repr_' + self.imgtype + '_')()\n if isinstance(self.object, string_types):\n if isfile(self.object):\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n if hasattr(self.object, 'seek'):\n self.object.seek(0)\n return self.object.read()\n if isurl(self.object, None):\n import requests\n r = requests.request(url=self.object, method='GET')\n return r.content\n\n def _b64(self):\n data = self._img()\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n b64 = base64.b64encode(data).decode(\"utf-8\")\n return \"data:image/\"+self.imgtype+f\";base64,{b64}\"\n\n def _imgshape(self, data):\n \"\"\"Calculate and return image width,height\"\"\"\n raise NotImplementedError\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n if not isinstance(data, bytes):\n data = base64.b64decode(data)\n width, height = self._imgshape(data)\n if self.width is not None:\n if self.height is None:\n height = int((self.width/width)*height)\n else:\n height = self.height\n width = self.width\n elif self.height is not None:\n width = int((self.height/height)*width)\n height = self.height\n if not self.embed:\n src = self.object\n else:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/\"+self.imgtype+\";base64,{b64}\".format(b64=b64)\n\n smode = self.sizing_mode\n if smode in ['fixed', None]:\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n elif smode == 'stretch_width':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n elif smode == 'scale_height':\n w, h = 'auto', '100%'\n else:\n w, h = '100%', 'auto'\n\n html = '<img src=\"{src}\" width=\"{width}\" height=\"{height}\" alt=\"{alt}\"></img>'.format(\n src=src, width=w, height=h, alt=self.alt_text or '')\n\n if self.link_url:\n html = '<a href=\"{url}\" target=\"_blank\">{html}</a>'.format(\n url=self.link_url, html=html)\n\n return dict(p, width=width, height=height, text=escape(html))\n\n\nclass PNG(ImageBase):\n\n imgtype = 'png'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack('>LL', data[16:24])\n return int(w), int(h)\n\n\nclass GIF(ImageBase):\n\n imgtype = 'gif'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack(\"<HH\", data[6:10])\n return int(w), int(h)\n\n\nclass JPG(ImageBase):\n\n imgtype = 'jpg'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n b = BytesIO(data)\n b.read(2)\n c = b.read(1)\n while (c and ord(c) != 0xDA):\n while (ord(c) != 0xFF): c = b.read(1)\n while (ord(c) == 0xFF): c = b.read(1)\n if (ord(c) >= 0xC0 and ord(c) <= 0xC3):\n b.read(3)\n h, w = struct.unpack(\">HH\", b.read(4))\n break\n else:\n b.read(int(struct.unpack(\">H\", b.read(2))[0])-2)\n c = b.read(1)\n return int(w), int(h)\n\n\nclass SVG(ImageBase):\n\n encode = param.Boolean(default=False, doc=\"\"\"\n Whether to enable base64 encoding of the SVG, base64 encoded\n SVGs do not support links.\"\"\")\n\n imgtype = 'svg'\n\n _rerender_params = ImageBase._rerender_params + ['encode']\n\n @classmethod\n def applies(cls, obj):\n return (super(SVG, cls).applies(obj) or\n (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename, \"\n \"URL or a SVG XML contents.\" % type(self).__name__)\n super(SVG, self)._type_error(object)\n\n def _img(self):\n if (isinstance(self.object, string_types) and\n self.object.lstrip().startswith('<svg')):\n return self.object\n return super(SVG, self)._img()\n\n def _b64(self):\n data = self._img()\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n b64 = base64.b64encode(data).decode(\"utf-8\")\n return f\"data:image/svg+xml;base64,{b64}\"\n\n def _imgshape(self, data):\n return (self.width, self.height)\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n width, height = self._imgshape(data)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n\n if self.encode:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/svg+xml;base64,{b64}\".format(b64=b64)\n html = \"<img src='{src}' width={width} height={height}></img>\".format(\n src=src, width=width, height=height\n )\n else:\n html = data.decode(\"utf-8\")\n return dict(p, width=width, height=height, text=escape(html))\n", "path": "panel/pane/image.py"}], "after_files": [{"content": "\"\"\"\nContains Image panes including renderers for PNG, SVG, GIF and JPG\nfile types.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport base64\n\nfrom io import BytesIO\nfrom six import string_types\n\nimport param\n\nfrom .markup import escape, DivPaneBase\nfrom ..util import isfile, isurl\n\n\nclass ImageBase(DivPaneBase):\n \"\"\"\n Encodes an image as base64 and wraps it in a Bokeh Div model.\n This is an abstract base class that needs the image type\n to be specified and specific code for determining the image shape.\n\n The imgtype determines the filetype, extension, and MIME type for\n this image. Each image type (png,jpg,gif) has a base class that\n supports anything with a `_repr_X_` method (where X is `png`,\n `gif`, etc.), a local file with the given file extension, or a\n HTTP(S) url with the given extension. Subclasses of each type can\n provide their own way of obtaining or generating a PNG.\n \"\"\"\n\n alt_text = param.String(default=None, doc=\"\"\"\n alt text to add to the image tag. The alt text is shown when a\n user cannot load or display the image.\"\"\")\n\n link_url = param.String(default=None, doc=\"\"\"\n A link URL to make the image clickable and link to some other\n website.\"\"\")\n\n embed = param.Boolean(default=True, doc=\"\"\"\n Whether to embed the image as base64.\"\"\")\n\n imgtype = 'None'\n\n _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style', 'width', 'height']\n\n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n\n __abstract = True\n\n @classmethod\n def applies(cls, obj):\n imgtype = cls.imgtype\n if hasattr(obj, '_repr_{}_'.format(imgtype)):\n return True\n if isinstance(obj, string_types):\n if isfile(obj) and obj.endswith('.'+imgtype):\n return True\n if isurl(obj, [cls.imgtype]):\n return True\n elif isurl(obj, None):\n return 0\n if hasattr(obj, 'read'): # Check for file like object\n return True\n return False\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename \"\n \"or URL.\" % type(self).__name__)\n super(ImageBase, self)._type_error(object)\n\n def _img(self):\n if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):\n return getattr(self.object, '_repr_' + self.imgtype + '_')()\n if isinstance(self.object, string_types):\n if isfile(self.object):\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n if hasattr(self.object, 'seek'):\n self.object.seek(0)\n return self.object.read()\n if isurl(self.object, None):\n import requests\n r = requests.request(url=self.object, method='GET')\n return r.content\n\n def _b64(self):\n data = self._img()\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n b64 = base64.b64encode(data).decode(\"utf-8\")\n return \"data:image/\"+self.imgtype+f\";base64,{b64}\"\n\n def _imgshape(self, data):\n \"\"\"Calculate and return image width,height\"\"\"\n raise NotImplementedError\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n if not isinstance(data, bytes):\n data = base64.b64decode(data)\n width, height = self._imgshape(data)\n if self.width is not None:\n if self.height is None:\n height = int((self.width/width)*height)\n else:\n height = self.height\n width = self.width\n elif self.height is not None:\n width = int((self.height/height)*width)\n height = self.height\n if not self.embed:\n src = self.object\n else:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/\"+self.imgtype+\";base64,{b64}\".format(b64=b64)\n\n smode = self.sizing_mode\n if smode in ['fixed', None]:\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n elif smode == 'stretch_width':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n elif smode == 'scale_height':\n w, h = 'auto', '100%'\n else:\n w, h = '100%', 'auto'\n\n html = '<img src=\"{src}\" width=\"{width}\" height=\"{height}\" alt=\"{alt}\"></img>'.format(\n src=src, width=w, height=h, alt=self.alt_text or '')\n\n if self.link_url:\n html = '<a href=\"{url}\" target=\"_blank\">{html}</a>'.format(\n url=self.link_url, html=html)\n\n return dict(p, width=width, height=height, text=escape(html))\n\n\nclass PNG(ImageBase):\n\n imgtype = 'png'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack('>LL', data[16:24])\n return int(w), int(h)\n\n\nclass GIF(ImageBase):\n\n imgtype = 'gif'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack(\"<HH\", data[6:10])\n return int(w), int(h)\n\n\nclass JPG(ImageBase):\n\n imgtype = 'jpg'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n b = BytesIO(data)\n b.read(2)\n c = b.read(1)\n while (c and ord(c) != 0xDA):\n while (ord(c) != 0xFF): c = b.read(1)\n while (ord(c) == 0xFF): c = b.read(1)\n if (ord(c) >= 0xC0 and ord(c) <= 0xC3):\n b.read(3)\n h, w = struct.unpack(\">HH\", b.read(4))\n break\n else:\n b.read(int(struct.unpack(\">H\", b.read(2))[0])-2)\n c = b.read(1)\n return int(w), int(h)\n\n\nclass SVG(ImageBase):\n\n encode = param.Boolean(default=False, doc=\"\"\"\n Whether to enable base64 encoding of the SVG, base64 encoded\n SVGs do not support links.\"\"\")\n\n imgtype = 'svg'\n\n _rerender_params = ImageBase._rerender_params + ['encode']\n\n @classmethod\n def applies(cls, obj):\n return (super(SVG, cls).applies(obj) or\n (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename, \"\n \"URL or a SVG XML contents.\" % type(self).__name__)\n super(SVG, self)._type_error(object)\n\n def _img(self):\n if (isinstance(self.object, string_types) and\n self.object.lstrip().startswith('<svg')):\n return self.object\n return super(SVG, self)._img()\n\n def _b64(self):\n data = self._img()\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n b64 = base64.b64encode(data).decode(\"utf-8\")\n return f\"data:image/svg+xml;base64,{b64}\"\n\n def _imgshape(self, data):\n return (self.width, self.height)\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n width, height = self._imgshape(data)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n\n if self.encode:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/svg+xml;base64,{b64}\".format(b64=b64)\n html = \"<img src='{src}' width={width} height={height}></img>\".format(\n src=src, width=width, height=height\n )\n else:\n html = data.decode(\"utf-8\")\n return dict(p, width=width, height=height, text=escape(html))\n", "path": "panel/pane/image.py"}]}
| 3,199 | 135 |
gh_patches_debug_29456
|
rasdani/github-patches
|
git_diff
|
oppia__oppia-7287
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show skill mastery values in the topic viewer
Add a skill tab in the topic viewer that will show skill mastery of all skills in that topic (Once we have enough skill mastery information for the skill)
Milestone 3.2 in @sophiewu6 's GSoC project
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/controllers/topic_viewer.py`
Content:
```
1 # Copyright 2018 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Controllers for the topic viewer page."""
16
17 from constants import constants
18 from core.controllers import acl_decorators
19 from core.controllers import base
20 from core.domain import story_fetchers
21 from core.domain import topic_fetchers
22 import feconf
23
24
25 class TopicViewerPage(base.BaseHandler):
26 """Renders the topic viewer page."""
27
28 @acl_decorators.can_access_topic_viewer_page
29 def get(self, _):
30 """Handles GET requests."""
31
32 if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
33 raise self.PageNotFoundException
34
35 self.render_template('dist/topic-viewer-page.mainpage.html')
36
37
38 class TopicPageDataHandler(base.BaseHandler):
39 """Manages the data that needs to be displayed to a learner on the topic
40 viewer page.
41 """
42 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
43
44 @acl_decorators.can_access_topic_viewer_page
45 def get(self, topic_name):
46 """Handles GET requests."""
47
48 if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
49 raise self.PageNotFoundException
50
51 topic = topic_fetchers.get_topic_by_name(topic_name)
52 canonical_story_ids = topic.get_canonical_story_ids(
53 include_only_published=True)
54 additional_story_ids = topic.get_additional_story_ids(
55 include_only_published=True)
56 canonical_story_summaries = [
57 story_fetchers.get_story_summary_by_id(
58 canonical_story_id) for canonical_story_id
59 in canonical_story_ids]
60
61 additional_story_summaries = [
62 story_fetchers.get_story_summary_by_id(
63 additional_story_id) for additional_story_id
64 in additional_story_ids]
65
66 canonical_story_dicts = [
67 summary.to_human_readable_dict() for summary
68 in canonical_story_summaries]
69
70 additional_story_dicts = [
71 summary.to_human_readable_dict() for summary
72 in additional_story_summaries]
73
74 uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()
75 subtopics = topic.get_all_subtopics()
76
77 self.values.update({
78 'topic_id': topic.id,
79 'topic_name': topic.name,
80 'canonical_story_dicts': canonical_story_dicts,
81 'additional_story_dicts': additional_story_dicts,
82 'uncategorized_skill_ids': uncategorized_skill_ids,
83 'subtopics': subtopics
84 })
85 self.render_json(self.values)
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/controllers/topic_viewer.py b/core/controllers/topic_viewer.py
--- a/core/controllers/topic_viewer.py
+++ b/core/controllers/topic_viewer.py
@@ -17,6 +17,7 @@
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
+from core.domain import skill_services
from core.domain import story_fetchers
from core.domain import topic_fetchers
import feconf
@@ -74,12 +75,26 @@
uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()
subtopics = topic.get_all_subtopics()
+ assigned_skill_ids = topic.get_all_skill_ids()
+ skill_descriptions = skill_services.get_skill_descriptions_by_ids(
+ topic.id, assigned_skill_ids)
+
+ if self.user_id:
+ degrees_of_mastery = skill_services.get_multi_user_skill_mastery(
+ self.user_id, assigned_skill_ids)
+ else:
+ degrees_of_mastery = {}
+ for skill_id in assigned_skill_ids:
+ degrees_of_mastery[skill_id] = None
+
self.values.update({
'topic_id': topic.id,
'topic_name': topic.name,
'canonical_story_dicts': canonical_story_dicts,
'additional_story_dicts': additional_story_dicts,
'uncategorized_skill_ids': uncategorized_skill_ids,
- 'subtopics': subtopics
+ 'subtopics': subtopics,
+ 'degrees_of_mastery': degrees_of_mastery,
+ 'skill_descriptions': skill_descriptions
})
self.render_json(self.values)
|
{"golden_diff": "diff --git a/core/controllers/topic_viewer.py b/core/controllers/topic_viewer.py\n--- a/core/controllers/topic_viewer.py\n+++ b/core/controllers/topic_viewer.py\n@@ -17,6 +17,7 @@\n from constants import constants\n from core.controllers import acl_decorators\n from core.controllers import base\n+from core.domain import skill_services\n from core.domain import story_fetchers\n from core.domain import topic_fetchers\n import feconf\n@@ -74,12 +75,26 @@\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n \n+ assigned_skill_ids = topic.get_all_skill_ids()\n+ skill_descriptions = skill_services.get_skill_descriptions_by_ids(\n+ topic.id, assigned_skill_ids)\n+\n+ if self.user_id:\n+ degrees_of_mastery = skill_services.get_multi_user_skill_mastery(\n+ self.user_id, assigned_skill_ids)\n+ else:\n+ degrees_of_mastery = {}\n+ for skill_id in assigned_skill_ids:\n+ degrees_of_mastery[skill_id] = None\n+\n self.values.update({\n 'topic_id': topic.id,\n 'topic_name': topic.name,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n- 'subtopics': subtopics\n+ 'subtopics': subtopics,\n+ 'degrees_of_mastery': degrees_of_mastery,\n+ 'skill_descriptions': skill_descriptions\n })\n self.render_json(self.values)\n", "issue": "Show skill mastery values in the topic viewer\nAdd a skill tab in the topic viewer that will show skill mastery of all skills in that topic (Once we have enough skill mastery information for the skill)\r\n\r\nMilestone 3.2 in @sophiewu6 's GSoC project\n", "before_files": [{"content": "# Copyright 2018 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the topic viewer page.\"\"\"\n\nfrom constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import story_fetchers\nfrom core.domain import topic_fetchers\nimport feconf\n\n\nclass TopicViewerPage(base.BaseHandler):\n \"\"\"Renders the topic viewer page.\"\"\"\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, _):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n self.render_template('dist/topic-viewer-page.mainpage.html')\n\n\nclass TopicPageDataHandler(base.BaseHandler):\n \"\"\"Manages the data that needs to be displayed to a learner on the topic\n viewer page.\n \"\"\"\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, topic_name):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n topic = topic_fetchers.get_topic_by_name(topic_name)\n canonical_story_ids = topic.get_canonical_story_ids(\n include_only_published=True)\n additional_story_ids = topic.get_additional_story_ids(\n include_only_published=True)\n canonical_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n canonical_story_id) for canonical_story_id\n in canonical_story_ids]\n\n additional_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n additional_story_id) for additional_story_id\n in additional_story_ids]\n\n canonical_story_dicts = [\n summary.to_human_readable_dict() for summary\n in canonical_story_summaries]\n\n additional_story_dicts = [\n summary.to_human_readable_dict() for summary\n in additional_story_summaries]\n\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n\n self.values.update({\n 'topic_id': topic.id,\n 'topic_name': topic.name,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n 'subtopics': subtopics\n })\n self.render_json(self.values)\n", "path": "core/controllers/topic_viewer.py"}], "after_files": [{"content": "# Copyright 2018 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the topic viewer page.\"\"\"\n\nfrom constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import skill_services\nfrom core.domain import story_fetchers\nfrom core.domain import topic_fetchers\nimport feconf\n\n\nclass TopicViewerPage(base.BaseHandler):\n \"\"\"Renders the topic viewer page.\"\"\"\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, _):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n self.render_template('dist/topic-viewer-page.mainpage.html')\n\n\nclass TopicPageDataHandler(base.BaseHandler):\n \"\"\"Manages the data that needs to be displayed to a learner on the topic\n viewer page.\n \"\"\"\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, topic_name):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n topic = topic_fetchers.get_topic_by_name(topic_name)\n canonical_story_ids = topic.get_canonical_story_ids(\n include_only_published=True)\n additional_story_ids = topic.get_additional_story_ids(\n include_only_published=True)\n canonical_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n canonical_story_id) for canonical_story_id\n in canonical_story_ids]\n\n additional_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n additional_story_id) for additional_story_id\n in additional_story_ids]\n\n canonical_story_dicts = [\n summary.to_human_readable_dict() for summary\n in canonical_story_summaries]\n\n additional_story_dicts = [\n summary.to_human_readable_dict() for summary\n in additional_story_summaries]\n\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n\n assigned_skill_ids = topic.get_all_skill_ids()\n skill_descriptions = skill_services.get_skill_descriptions_by_ids(\n topic.id, assigned_skill_ids)\n\n if self.user_id:\n degrees_of_mastery = skill_services.get_multi_user_skill_mastery(\n self.user_id, assigned_skill_ids)\n else:\n degrees_of_mastery = {}\n for skill_id in assigned_skill_ids:\n degrees_of_mastery[skill_id] = None\n\n self.values.update({\n 'topic_id': topic.id,\n 'topic_name': topic.name,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n 'subtopics': subtopics,\n 'degrees_of_mastery': degrees_of_mastery,\n 'skill_descriptions': skill_descriptions\n })\n self.render_json(self.values)\n", "path": "core/controllers/topic_viewer.py"}]}
| 1,098 | 340 |
gh_patches_debug_23586
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1555
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pymongo is not collecting the property: db.mongodb.collection
According to the [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md#call-level-attributes-for-specific-technologies) -
mongodb should capture: "The collection being accessed within the database stated in db.name."
and save it in: `db.mongodb.collection`
**Steps to reproduce**
Instrument a client using PymongoInstrumentor().
Send a request to the db.
**What is the expected behavior?**
Produce a span with `db.mongodb.collection` value containing the collection name.
**What is the actual behavior?**
Produce a span without generating `db.mongodb.collection`.
**Example:**
Here is a simple code example:
```
PymongoInstrumentor().instrument()
client = MongoClient()
RECORD = {"test": "123"}
db = client["MongoDB_Database"]
collection = db["MongoDB_Collection"]
collection.find_one(RECORD)
```
and the result is missing the collection:
```
"attributes": {
"db.system": "mongodb",
"db.name": "MongoDB_Database",
"db.statement": "find",
"net.peer.name": "localhost",
"net.peer.port": 27017
}
```
If you can - assign this to me, thanks :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 The integration with MongoDB supports the `pymongo`_ library, it can be
17 enabled using the ``PymongoInstrumentor``.
18
19 .. _pymongo: https://pypi.org/project/pymongo
20
21 Usage
22 -----
23
24 .. code:: python
25
26 from pymongo import MongoClient
27 from opentelemetry.instrumentation.pymongo import PymongoInstrumentor
28
29 PymongoInstrumentor().instrument()
30 client = MongoClient()
31 db = client["MongoDB_Database"]
32 collection = db["MongoDB_Collection"]
33 collection.find_one()
34
35 API
36 ---
37 The `instrument` method accepts the following keyword args:
38
39 tracer_provider (TracerProvider) - an optional tracer provider
40 request_hook (Callable) -
41 a function with extra user-defined logic to be performed before querying mongodb
42 this function signature is: def request_hook(span: Span, event: CommandStartedEvent) -> None
43 response_hook (Callable) -
44 a function with extra user-defined logic to be performed after the query returns with a successful response
45 this function signature is: def response_hook(span: Span, event: CommandSucceededEvent) -> None
46 failed_hook (Callable) -
47 a function with extra user-defined logic to be performed after the query returns with a failed response
48 this function signature is: def failed_hook(span: Span, event: CommandFailedEvent) -> None
49
50 for example:
51
52 .. code: python
53
54 from opentelemetry.instrumentation.pymongo import PymongoInstrumentor
55 from pymongo import MongoClient
56
57 def request_hook(span, event):
58 # request hook logic
59
60 def response_hook(span, event):
61 # response hook logic
62
63 def failed_hook(span, event):
64 # failed hook logic
65
66 # Instrument pymongo with hooks
67 PymongoInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook, failed_hook=failed_hook)
68
69 # This will create a span with pymongo specific attributes, including custom attributes added from the hooks
70 client = MongoClient()
71 db = client["MongoDB_Database"]
72 collection = db["MongoDB_Collection"]
73 collection.find_one()
74
75 """
76 from logging import getLogger
77 from typing import Callable, Collection
78
79 from pymongo import monitoring
80
81 from opentelemetry import context
82 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
83 from opentelemetry.instrumentation.pymongo.package import _instruments
84 from opentelemetry.instrumentation.pymongo.version import __version__
85 from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
86 from opentelemetry.semconv.trace import DbSystemValues, SpanAttributes
87 from opentelemetry.trace import SpanKind, get_tracer
88 from opentelemetry.trace.span import Span
89 from opentelemetry.trace.status import Status, StatusCode
90
91 _LOG = getLogger(__name__)
92
93 RequestHookT = Callable[[Span, monitoring.CommandStartedEvent], None]
94 ResponseHookT = Callable[[Span, monitoring.CommandSucceededEvent], None]
95 FailedHookT = Callable[[Span, monitoring.CommandFailedEvent], None]
96
97
98 def dummy_callback(span, event):
99 ...
100
101
102 class CommandTracer(monitoring.CommandListener):
103 def __init__(
104 self,
105 tracer,
106 request_hook: RequestHookT = dummy_callback,
107 response_hook: ResponseHookT = dummy_callback,
108 failed_hook: FailedHookT = dummy_callback,
109 ):
110 self._tracer = tracer
111 self._span_dict = {}
112 self.is_enabled = True
113 self.start_hook = request_hook
114 self.success_hook = response_hook
115 self.failed_hook = failed_hook
116
117 def started(self, event: monitoring.CommandStartedEvent):
118 """Method to handle a pymongo CommandStartedEvent"""
119 if not self.is_enabled or context.get_value(
120 _SUPPRESS_INSTRUMENTATION_KEY
121 ):
122 return
123 command = event.command.get(event.command_name, "")
124 name = event.database_name
125 name += "." + event.command_name
126 statement = event.command_name
127 if command:
128 statement += " " + str(command)
129
130 try:
131 span = self._tracer.start_span(name, kind=SpanKind.CLIENT)
132 if span.is_recording():
133 span.set_attribute(
134 SpanAttributes.DB_SYSTEM, DbSystemValues.MONGODB.value
135 )
136 span.set_attribute(SpanAttributes.DB_NAME, event.database_name)
137 span.set_attribute(SpanAttributes.DB_STATEMENT, statement)
138 if event.connection_id is not None:
139 span.set_attribute(
140 SpanAttributes.NET_PEER_NAME, event.connection_id[0]
141 )
142 span.set_attribute(
143 SpanAttributes.NET_PEER_PORT, event.connection_id[1]
144 )
145 try:
146 self.start_hook(span, event)
147 except Exception as hook_exception: # noqa pylint: disable=broad-except
148 _LOG.exception(hook_exception)
149
150 # Add Span to dictionary
151 self._span_dict[_get_span_dict_key(event)] = span
152 except Exception as ex: # noqa pylint: disable=broad-except
153 if span is not None and span.is_recording():
154 span.set_status(Status(StatusCode.ERROR, str(ex)))
155 span.end()
156 self._pop_span(event)
157
158 def succeeded(self, event: monitoring.CommandSucceededEvent):
159 """Method to handle a pymongo CommandSucceededEvent"""
160 if not self.is_enabled or context.get_value(
161 _SUPPRESS_INSTRUMENTATION_KEY
162 ):
163 return
164 span = self._pop_span(event)
165 if span is None:
166 return
167 if span.is_recording():
168 try:
169 self.success_hook(span, event)
170 except Exception as hook_exception: # noqa pylint: disable=broad-except
171 _LOG.exception(hook_exception)
172 span.end()
173
174 def failed(self, event: monitoring.CommandFailedEvent):
175 """Method to handle a pymongo CommandFailedEvent"""
176 if not self.is_enabled or context.get_value(
177 _SUPPRESS_INSTRUMENTATION_KEY
178 ):
179 return
180 span = self._pop_span(event)
181 if span is None:
182 return
183 if span.is_recording():
184 span.set_status(Status(StatusCode.ERROR, event.failure))
185 try:
186 self.failed_hook(span, event)
187 except Exception as hook_exception: # noqa pylint: disable=broad-except
188 _LOG.exception(hook_exception)
189 span.end()
190
191 def _pop_span(self, event):
192 return self._span_dict.pop(_get_span_dict_key(event), None)
193
194
195 def _get_span_dict_key(event):
196 if event.connection_id is not None:
197 return event.request_id, event.connection_id
198 return event.request_id
199
200
201 class PymongoInstrumentor(BaseInstrumentor):
202 _commandtracer_instance = None # type CommandTracer
203 # The instrumentation for PyMongo is based on the event listener interface
204 # https://api.mongodb.com/python/current/api/pymongo/monitoring.html.
205 # This interface only allows to register listeners and does not provide
206 # an unregister API. In order to provide a mechanishm to disable
207 # instrumentation an enabled flag is implemented in CommandTracer,
208 # it's checked in the different listeners.
209
210 def instrumentation_dependencies(self) -> Collection[str]:
211 return _instruments
212
213 def _instrument(self, **kwargs):
214 """Integrate with pymongo to trace it using event listener.
215 https://api.mongodb.com/python/current/api/pymongo/monitoring.html
216
217 Args:
218 tracer_provider: The `TracerProvider` to use. If none is passed the
219 current configured one is used.
220 """
221
222 tracer_provider = kwargs.get("tracer_provider")
223 request_hook = kwargs.get("request_hook", dummy_callback)
224 response_hook = kwargs.get("response_hook", dummy_callback)
225 failed_hook = kwargs.get("failed_hook", dummy_callback)
226 # Create and register a CommandTracer only the first time
227 if self._commandtracer_instance is None:
228 tracer = get_tracer(__name__, __version__, tracer_provider)
229
230 self._commandtracer_instance = CommandTracer(
231 tracer,
232 request_hook=request_hook,
233 response_hook=response_hook,
234 failed_hook=failed_hook,
235 )
236 monitoring.register(self._commandtracer_instance)
237 # If already created, just enable it
238 self._commandtracer_instance.is_enabled = True
239
240 def _uninstrument(self, **kwargs):
241 if self._commandtracer_instance is not None:
242 self._commandtracer_instance.is_enabled = False
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py
@@ -126,6 +126,7 @@
statement = event.command_name
if command:
statement += " " + str(command)
+ collection = event.command.get(event.command_name)
try:
span = self._tracer.start_span(name, kind=SpanKind.CLIENT)
@@ -135,6 +136,10 @@
)
span.set_attribute(SpanAttributes.DB_NAME, event.database_name)
span.set_attribute(SpanAttributes.DB_STATEMENT, statement)
+ if collection:
+ span.set_attribute(
+ SpanAttributes.DB_MONGODB_COLLECTION, collection
+ )
if event.connection_id is not None:
span.set_attribute(
SpanAttributes.NET_PEER_NAME, event.connection_id[0]
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py\n@@ -126,6 +126,7 @@\n statement = event.command_name\n if command:\n statement += \" \" + str(command)\n+ collection = event.command.get(event.command_name)\n \n try:\n span = self._tracer.start_span(name, kind=SpanKind.CLIENT)\n@@ -135,6 +136,10 @@\n )\n span.set_attribute(SpanAttributes.DB_NAME, event.database_name)\n span.set_attribute(SpanAttributes.DB_STATEMENT, statement)\n+ if collection:\n+ span.set_attribute(\n+ SpanAttributes.DB_MONGODB_COLLECTION, collection\n+ )\n if event.connection_id is not None:\n span.set_attribute(\n SpanAttributes.NET_PEER_NAME, event.connection_id[0]\n", "issue": "pymongo is not collecting the property: db.mongodb.collection\nAccording to the [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md#call-level-attributes-for-specific-technologies) - \r\nmongodb should capture: \"The collection being accessed within the database stated in db.name.\"\r\nand save it in: `db.mongodb.collection`\r\n\r\n**Steps to reproduce**\r\nInstrument a client using PymongoInstrumentor().\r\nSend a request to the db.\r\n\r\n**What is the expected behavior?**\r\nProduce a span with `db.mongodb.collection` value containing the collection name.\r\n\r\n**What is the actual behavior?**\r\nProduce a span without generating `db.mongodb.collection`.\r\n\r\n**Example:**\r\nHere is a simple code example:\r\n\r\n```\r\nPymongoInstrumentor().instrument()\r\nclient = MongoClient()\r\nRECORD = {\"test\": \"123\"}\r\ndb = client[\"MongoDB_Database\"]\r\ncollection = db[\"MongoDB_Collection\"]\r\ncollection.find_one(RECORD)\r\n```\r\n\r\nand the result is missing the collection:\r\n```\r\n\"attributes\": {\r\n \"db.system\": \"mongodb\",\r\n \"db.name\": \"MongoDB_Database\",\r\n \"db.statement\": \"find\",\r\n \"net.peer.name\": \"localhost\",\r\n \"net.peer.port\": 27017\r\n }\r\n```\r\n\r\nIf you can - assign this to me, thanks :)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe integration with MongoDB supports the `pymongo`_ library, it can be\nenabled using the ``PymongoInstrumentor``.\n\n.. _pymongo: https://pypi.org/project/pymongo\n\nUsage\n-----\n\n.. code:: python\n\n from pymongo import MongoClient\n from opentelemetry.instrumentation.pymongo import PymongoInstrumentor\n\n PymongoInstrumentor().instrument()\n client = MongoClient()\n db = client[\"MongoDB_Database\"]\n collection = db[\"MongoDB_Collection\"]\n collection.find_one()\n\nAPI\n---\nThe `instrument` method accepts the following keyword args:\n\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) -\na function with extra user-defined logic to be performed before querying mongodb\nthis function signature is: def request_hook(span: Span, event: CommandStartedEvent) -> None\nresponse_hook (Callable) -\na function with extra user-defined logic to be performed after the query returns with a successful response\nthis function signature is: def response_hook(span: Span, event: CommandSucceededEvent) -> None\nfailed_hook (Callable) -\na function with extra user-defined logic to be performed after the query returns with a failed response\nthis function signature is: def failed_hook(span: Span, event: CommandFailedEvent) -> None\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.pymongo import PymongoInstrumentor\n from pymongo import MongoClient\n\n def request_hook(span, event):\n # request hook logic\n\n def response_hook(span, event):\n # response hook logic\n\n def failed_hook(span, event):\n # failed hook logic\n\n # Instrument pymongo with hooks\n PymongoInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook, failed_hook=failed_hook)\n\n # This will create a span with pymongo specific attributes, including custom attributes added from the hooks\n client = MongoClient()\n db = client[\"MongoDB_Database\"]\n collection = db[\"MongoDB_Collection\"]\n collection.find_one()\n\n\"\"\"\nfrom logging import getLogger\nfrom typing import Callable, Collection\n\nfrom pymongo import monitoring\n\nfrom opentelemetry import context\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.pymongo.package import _instruments\nfrom opentelemetry.instrumentation.pymongo.version import __version__\nfrom opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY\nfrom opentelemetry.semconv.trace import DbSystemValues, SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.span import Span\nfrom opentelemetry.trace.status import Status, StatusCode\n\n_LOG = getLogger(__name__)\n\nRequestHookT = Callable[[Span, monitoring.CommandStartedEvent], None]\nResponseHookT = Callable[[Span, monitoring.CommandSucceededEvent], None]\nFailedHookT = Callable[[Span, monitoring.CommandFailedEvent], None]\n\n\ndef dummy_callback(span, event):\n ...\n\n\nclass CommandTracer(monitoring.CommandListener):\n def __init__(\n self,\n tracer,\n request_hook: RequestHookT = dummy_callback,\n response_hook: ResponseHookT = dummy_callback,\n failed_hook: FailedHookT = dummy_callback,\n ):\n self._tracer = tracer\n self._span_dict = {}\n self.is_enabled = True\n self.start_hook = request_hook\n self.success_hook = response_hook\n self.failed_hook = failed_hook\n\n def started(self, event: monitoring.CommandStartedEvent):\n \"\"\"Method to handle a pymongo CommandStartedEvent\"\"\"\n if not self.is_enabled or context.get_value(\n _SUPPRESS_INSTRUMENTATION_KEY\n ):\n return\n command = event.command.get(event.command_name, \"\")\n name = event.database_name\n name += \".\" + event.command_name\n statement = event.command_name\n if command:\n statement += \" \" + str(command)\n\n try:\n span = self._tracer.start_span(name, kind=SpanKind.CLIENT)\n if span.is_recording():\n span.set_attribute(\n SpanAttributes.DB_SYSTEM, DbSystemValues.MONGODB.value\n )\n span.set_attribute(SpanAttributes.DB_NAME, event.database_name)\n span.set_attribute(SpanAttributes.DB_STATEMENT, statement)\n if event.connection_id is not None:\n span.set_attribute(\n SpanAttributes.NET_PEER_NAME, event.connection_id[0]\n )\n span.set_attribute(\n SpanAttributes.NET_PEER_PORT, event.connection_id[1]\n )\n try:\n self.start_hook(span, event)\n except Exception as hook_exception: # noqa pylint: disable=broad-except\n _LOG.exception(hook_exception)\n\n # Add Span to dictionary\n self._span_dict[_get_span_dict_key(event)] = span\n except Exception as ex: # noqa pylint: disable=broad-except\n if span is not None and span.is_recording():\n span.set_status(Status(StatusCode.ERROR, str(ex)))\n span.end()\n self._pop_span(event)\n\n def succeeded(self, event: monitoring.CommandSucceededEvent):\n \"\"\"Method to handle a pymongo CommandSucceededEvent\"\"\"\n if not self.is_enabled or context.get_value(\n _SUPPRESS_INSTRUMENTATION_KEY\n ):\n return\n span = self._pop_span(event)\n if span is None:\n return\n if span.is_recording():\n try:\n self.success_hook(span, event)\n except Exception as hook_exception: # noqa pylint: disable=broad-except\n _LOG.exception(hook_exception)\n span.end()\n\n def failed(self, event: monitoring.CommandFailedEvent):\n \"\"\"Method to handle a pymongo CommandFailedEvent\"\"\"\n if not self.is_enabled or context.get_value(\n _SUPPRESS_INSTRUMENTATION_KEY\n ):\n return\n span = self._pop_span(event)\n if span is None:\n return\n if span.is_recording():\n span.set_status(Status(StatusCode.ERROR, event.failure))\n try:\n self.failed_hook(span, event)\n except Exception as hook_exception: # noqa pylint: disable=broad-except\n _LOG.exception(hook_exception)\n span.end()\n\n def _pop_span(self, event):\n return self._span_dict.pop(_get_span_dict_key(event), None)\n\n\ndef _get_span_dict_key(event):\n if event.connection_id is not None:\n return event.request_id, event.connection_id\n return event.request_id\n\n\nclass PymongoInstrumentor(BaseInstrumentor):\n _commandtracer_instance = None # type CommandTracer\n # The instrumentation for PyMongo is based on the event listener interface\n # https://api.mongodb.com/python/current/api/pymongo/monitoring.html.\n # This interface only allows to register listeners and does not provide\n # an unregister API. In order to provide a mechanishm to disable\n # instrumentation an enabled flag is implemented in CommandTracer,\n # it's checked in the different listeners.\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"Integrate with pymongo to trace it using event listener.\n https://api.mongodb.com/python/current/api/pymongo/monitoring.html\n\n Args:\n tracer_provider: The `TracerProvider` to use. If none is passed the\n current configured one is used.\n \"\"\"\n\n tracer_provider = kwargs.get(\"tracer_provider\")\n request_hook = kwargs.get(\"request_hook\", dummy_callback)\n response_hook = kwargs.get(\"response_hook\", dummy_callback)\n failed_hook = kwargs.get(\"failed_hook\", dummy_callback)\n # Create and register a CommandTracer only the first time\n if self._commandtracer_instance is None:\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n self._commandtracer_instance = CommandTracer(\n tracer,\n request_hook=request_hook,\n response_hook=response_hook,\n failed_hook=failed_hook,\n )\n monitoring.register(self._commandtracer_instance)\n # If already created, just enable it\n self._commandtracer_instance.is_enabled = True\n\n def _uninstrument(self, **kwargs):\n if self._commandtracer_instance is not None:\n self._commandtracer_instance.is_enabled = False\n", "path": "instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe integration with MongoDB supports the `pymongo`_ library, it can be\nenabled using the ``PymongoInstrumentor``.\n\n.. _pymongo: https://pypi.org/project/pymongo\n\nUsage\n-----\n\n.. code:: python\n\n from pymongo import MongoClient\n from opentelemetry.instrumentation.pymongo import PymongoInstrumentor\n\n PymongoInstrumentor().instrument()\n client = MongoClient()\n db = client[\"MongoDB_Database\"]\n collection = db[\"MongoDB_Collection\"]\n collection.find_one()\n\nAPI\n---\nThe `instrument` method accepts the following keyword args:\n\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) -\na function with extra user-defined logic to be performed before querying mongodb\nthis function signature is: def request_hook(span: Span, event: CommandStartedEvent) -> None\nresponse_hook (Callable) -\na function with extra user-defined logic to be performed after the query returns with a successful response\nthis function signature is: def response_hook(span: Span, event: CommandSucceededEvent) -> None\nfailed_hook (Callable) -\na function with extra user-defined logic to be performed after the query returns with a failed response\nthis function signature is: def failed_hook(span: Span, event: CommandFailedEvent) -> None\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.pymongo import PymongoInstrumentor\n from pymongo import MongoClient\n\n def request_hook(span, event):\n # request hook logic\n\n def response_hook(span, event):\n # response hook logic\n\n def failed_hook(span, event):\n # failed hook logic\n\n # Instrument pymongo with hooks\n PymongoInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook, failed_hook=failed_hook)\n\n # This will create a span with pymongo specific attributes, including custom attributes added from the hooks\n client = MongoClient()\n db = client[\"MongoDB_Database\"]\n collection = db[\"MongoDB_Collection\"]\n collection.find_one()\n\n\"\"\"\nfrom logging import getLogger\nfrom typing import Callable, Collection\n\nfrom pymongo import monitoring\n\nfrom opentelemetry import context\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.pymongo.package import _instruments\nfrom opentelemetry.instrumentation.pymongo.version import __version__\nfrom opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY\nfrom opentelemetry.semconv.trace import DbSystemValues, SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.span import Span\nfrom opentelemetry.trace.status import Status, StatusCode\n\n_LOG = getLogger(__name__)\n\nRequestHookT = Callable[[Span, monitoring.CommandStartedEvent], None]\nResponseHookT = Callable[[Span, monitoring.CommandSucceededEvent], None]\nFailedHookT = Callable[[Span, monitoring.CommandFailedEvent], None]\n\n\ndef dummy_callback(span, event):\n ...\n\n\nclass CommandTracer(monitoring.CommandListener):\n def __init__(\n self,\n tracer,\n request_hook: RequestHookT = dummy_callback,\n response_hook: ResponseHookT = dummy_callback,\n failed_hook: FailedHookT = dummy_callback,\n ):\n self._tracer = tracer\n self._span_dict = {}\n self.is_enabled = True\n self.start_hook = request_hook\n self.success_hook = response_hook\n self.failed_hook = failed_hook\n\n def started(self, event: monitoring.CommandStartedEvent):\n \"\"\"Method to handle a pymongo CommandStartedEvent\"\"\"\n if not self.is_enabled or context.get_value(\n _SUPPRESS_INSTRUMENTATION_KEY\n ):\n return\n command = event.command.get(event.command_name, \"\")\n name = event.database_name\n name += \".\" + event.command_name\n statement = event.command_name\n if command:\n statement += \" \" + str(command)\n collection = event.command.get(event.command_name)\n\n try:\n span = self._tracer.start_span(name, kind=SpanKind.CLIENT)\n if span.is_recording():\n span.set_attribute(\n SpanAttributes.DB_SYSTEM, DbSystemValues.MONGODB.value\n )\n span.set_attribute(SpanAttributes.DB_NAME, event.database_name)\n span.set_attribute(SpanAttributes.DB_STATEMENT, statement)\n if collection:\n span.set_attribute(\n SpanAttributes.DB_MONGODB_COLLECTION, collection\n )\n if event.connection_id is not None:\n span.set_attribute(\n SpanAttributes.NET_PEER_NAME, event.connection_id[0]\n )\n span.set_attribute(\n SpanAttributes.NET_PEER_PORT, event.connection_id[1]\n )\n try:\n self.start_hook(span, event)\n except Exception as hook_exception: # noqa pylint: disable=broad-except\n _LOG.exception(hook_exception)\n\n # Add Span to dictionary\n self._span_dict[_get_span_dict_key(event)] = span\n except Exception as ex: # noqa pylint: disable=broad-except\n if span is not None and span.is_recording():\n span.set_status(Status(StatusCode.ERROR, str(ex)))\n span.end()\n self._pop_span(event)\n\n def succeeded(self, event: monitoring.CommandSucceededEvent):\n \"\"\"Method to handle a pymongo CommandSucceededEvent\"\"\"\n if not self.is_enabled or context.get_value(\n _SUPPRESS_INSTRUMENTATION_KEY\n ):\n return\n span = self._pop_span(event)\n if span is None:\n return\n if span.is_recording():\n try:\n self.success_hook(span, event)\n except Exception as hook_exception: # noqa pylint: disable=broad-except\n _LOG.exception(hook_exception)\n span.end()\n\n def failed(self, event: monitoring.CommandFailedEvent):\n \"\"\"Method to handle a pymongo CommandFailedEvent\"\"\"\n if not self.is_enabled or context.get_value(\n _SUPPRESS_INSTRUMENTATION_KEY\n ):\n return\n span = self._pop_span(event)\n if span is None:\n return\n if span.is_recording():\n span.set_status(Status(StatusCode.ERROR, event.failure))\n try:\n self.failed_hook(span, event)\n except Exception as hook_exception: # noqa pylint: disable=broad-except\n _LOG.exception(hook_exception)\n span.end()\n\n def _pop_span(self, event):\n return self._span_dict.pop(_get_span_dict_key(event), None)\n\n\ndef _get_span_dict_key(event):\n if event.connection_id is not None:\n return event.request_id, event.connection_id\n return event.request_id\n\n\nclass PymongoInstrumentor(BaseInstrumentor):\n _commandtracer_instance = None # type CommandTracer\n # The instrumentation for PyMongo is based on the event listener interface\n # https://api.mongodb.com/python/current/api/pymongo/monitoring.html.\n # This interface only allows to register listeners and does not provide\n # an unregister API. In order to provide a mechanishm to disable\n # instrumentation an enabled flag is implemented in CommandTracer,\n # it's checked in the different listeners.\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"Integrate with pymongo to trace it using event listener.\n https://api.mongodb.com/python/current/api/pymongo/monitoring.html\n\n Args:\n tracer_provider: The `TracerProvider` to use. If none is passed the\n current configured one is used.\n \"\"\"\n\n tracer_provider = kwargs.get(\"tracer_provider\")\n request_hook = kwargs.get(\"request_hook\", dummy_callback)\n response_hook = kwargs.get(\"response_hook\", dummy_callback)\n failed_hook = kwargs.get(\"failed_hook\", dummy_callback)\n # Create and register a CommandTracer only the first time\n if self._commandtracer_instance is None:\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n self._commandtracer_instance = CommandTracer(\n tracer,\n request_hook=request_hook,\n response_hook=response_hook,\n failed_hook=failed_hook,\n )\n monitoring.register(self._commandtracer_instance)\n # If already created, just enable it\n self._commandtracer_instance.is_enabled = True\n\n def _uninstrument(self, **kwargs):\n if self._commandtracer_instance is not None:\n self._commandtracer_instance.is_enabled = False\n", "path": "instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py"}]}
| 3,104 | 277 |
gh_patches_debug_14241
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1774
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect rule: "E1019: Sub parameter should be an object of 1 or string for..."
*cfn-lint version: 0.40.0*
I am getting an incorrect error that [`E1019`](https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/rules.md#E1019)` Sub parameter should be an object of 1 or string for...` when using YAML:
```
- Fn::Sub:
- 'example-${Var}-${Var2}'
- Var: 123
Var2: 456
```
Official docs: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html
Official docs sample:
```
Fn::Sub:
- String
- Var1Name: Var1Value
Var2Name: Var2Value
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/Sub.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import six
6 from cfnlint.helpers import PSEUDOPARAMS, VALID_PARAMETER_TYPES_LIST
7 from cfnlint.rules import CloudFormationLintRule
8 from cfnlint.rules import RuleMatch
9
10
11 class Sub(CloudFormationLintRule):
12 """Check if Sub values are correct"""
13 id = 'E1019'
14 shortdesc = 'Sub validation of parameters'
15 description = 'Making sure the sub function is properly configured'
16 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
17 tags = ['functions', 'sub']
18
19 def _test_string(self, cfn, sub_string, parameters, tree):
20 """Test if a string has appropriate parameters"""
21
22 matches = []
23 string_params = cfn.get_sub_parameters(sub_string)
24
25 for string_param in string_params:
26 if isinstance(string_param, (six.string_types)):
27 matches.extend(self._test_parameter(string_param, cfn, parameters, tree))
28
29 return matches
30
31 def _get_parameters(self, cfn):
32 """Get all Parameter Names"""
33 results = {}
34 parameters = cfn.template.get('Parameters', {})
35 if isinstance(parameters, dict):
36 for param_name, param_values in parameters.items():
37 # This rule isn't here to check the Types but we need
38 # something valid if it doesn't exist
39 results[param_name] = param_values.get('Type', 'String')
40
41 return results
42
43 def _test_parameters(self, parameters, cfn, tree):
44 """Check parameters for appropriate configuration"""
45
46 supported_functions = [
47 'Fn::Base64',
48 'Fn::FindInMap',
49 'Fn::GetAZs',
50 'Fn::GetAtt',
51 'Fn::If',
52 'Fn::ImportValue',
53 'Fn::Join',
54 'Fn::Select',
55 'Fn::Sub',
56 'Ref',
57 ]
58
59 matches = []
60 for parameter_name, parameter_value_obj in parameters.items():
61 param_tree = tree[:] + [parameter_name]
62 if isinstance(parameter_value_obj, dict):
63 if len(parameter_value_obj) == 1:
64 for key, value in parameter_value_obj.items():
65 if key not in supported_functions:
66 message = 'Sub parameter should use a valid function for {0}'
67 matches.append(RuleMatch(
68 param_tree, message.format('/'.join(map(str, tree)))))
69 elif key in ['Ref']:
70 matches.extend(self._test_parameter(value, cfn, {}, tree))
71 elif key in ['Fn::GetAtt']:
72 if isinstance(value, list):
73 # Only test this if all the items are a string
74 if_all_strings = True
75 for v in value:
76 if not isinstance(v, six.string_types):
77 # skip things got too complex
78 if_all_strings = False
79 if if_all_strings:
80 matches.extend(self._test_parameter(
81 '.'.join(value), cfn, {}, tree))
82 elif isinstance(value, six.string_types):
83 matches.extend(self._test_parameter(value, cfn, {}, tree))
84 else:
85 message = 'Sub parameter should be an object of 1 for {0}'
86 matches.append(RuleMatch(
87 param_tree, message.format('/'.join(map(str, tree)))))
88 elif not isinstance(parameter_value_obj, six.string_types):
89 message = 'Sub parameter should be an object of 1 or string for {0}'
90 matches.append(RuleMatch(
91 param_tree, message.format('/'.join(map(str, tree)))))
92
93 return matches
94
95 def _test_parameter(self, parameter, cfn, parameters, tree):
96 """ Test a parameter """
97
98 matches = []
99 get_atts = cfn.get_valid_getatts()
100
101 valid_params = list(PSEUDOPARAMS)
102 valid_params.extend(cfn.get_resource_names())
103 template_parameters = self._get_parameters(cfn)
104
105 for key, _ in parameters.items():
106 valid_params.append(key)
107
108 if parameter not in valid_params:
109 found = False
110 if parameter in template_parameters:
111 found = True
112 if template_parameters.get(parameter) in VALID_PARAMETER_TYPES_LIST:
113 message = 'Fn::Sub cannot use list {0} at {1}'
114 matches.append(RuleMatch(
115 tree, message.format(parameter, '/'.join(map(str, tree)))))
116 for resource, attributes in get_atts.items():
117 for attribute_name, attribute_values in attributes.items():
118 if resource == parameter.split('.')[0]:
119 if attribute_name == '*':
120 found = True
121 elif attribute_name == '.'.join(parameter.split('.')[1:]):
122 if attribute_values.get('Type') == 'List':
123 message = 'Fn::Sub cannot use list {0} at {1}'
124 matches.append(RuleMatch(
125 tree, message.format(parameter, '/'.join(map(str, tree)))))
126 found = True
127 else:
128 if attribute_name == parameter.split('.')[1] and attribute_values.get('Type') == 'Map':
129 found = True
130
131 if not found:
132 message = 'Parameter {0} for Fn::Sub not found at {1}'
133 matches.append(RuleMatch(
134 tree, message.format(parameter, '/'.join(map(str, tree)))))
135
136 return matches
137
138 def match(self, cfn):
139 matches = []
140
141 sub_objs = cfn.search_deep_keys('Fn::Sub')
142
143 for sub_obj in sub_objs:
144 sub_value_obj = sub_obj[-1]
145 tree = sub_obj[:-1]
146 if isinstance(sub_value_obj, six.string_types):
147 matches.extend(self._test_string(cfn, sub_value_obj, {}, tree))
148 elif isinstance(sub_value_obj, list):
149 if len(sub_value_obj) == 2:
150 sub_string = sub_value_obj[0]
151 parameters = sub_value_obj[1]
152 if not isinstance(sub_string, six.string_types):
153 message = 'Subs first element should be of type string for {0}'
154 matches.append(RuleMatch(
155 tree + [0], message.format('/'.join(map(str, tree)))))
156 if not isinstance(parameters, dict):
157 message = 'Subs second element should be an object for {0}'
158 matches.append(RuleMatch(
159 tree + [1], message.format('/'.join(map(str, tree)))))
160 else:
161 matches.extend(self._test_string(cfn, sub_string, parameters, tree + [0]))
162 matches.extend(self._test_parameters(parameters, cfn, tree))
163 else:
164 message = 'Sub should be an array of 2 for {0}'
165 matches.append(RuleMatch(
166 tree, message.format('/'.join(map(str, tree)))))
167 elif isinstance(sub_value_obj, dict):
168 if len(sub_value_obj) == 1:
169 for key, _ in sub_value_obj.items():
170 if not key == 'Fn::Transform':
171 message = 'Sub should be a string or array of 2 items for {0}'
172 matches.append(RuleMatch(
173 tree, message.format('/'.join(map(str, tree)))))
174 else:
175 message = 'Sub should be a string or array of 2 items for {0}'
176 matches.append(RuleMatch(
177 tree, message.format('/'.join(map(str, tree)))))
178 else:
179 message = 'Sub should be a string or array of 2 items for {0}'
180 matches.append(RuleMatch(
181 tree, message.format('/'.join(map(str, tree)))))
182
183 return matches
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/functions/Sub.py b/src/cfnlint/rules/functions/Sub.py
--- a/src/cfnlint/rules/functions/Sub.py
+++ b/src/cfnlint/rules/functions/Sub.py
@@ -85,8 +85,8 @@
message = 'Sub parameter should be an object of 1 for {0}'
matches.append(RuleMatch(
param_tree, message.format('/'.join(map(str, tree)))))
- elif not isinstance(parameter_value_obj, six.string_types):
- message = 'Sub parameter should be an object of 1 or string for {0}'
+ elif isinstance(parameter_value_obj, list):
+ message = 'Sub parameter value should be a string for {0}'
matches.append(RuleMatch(
param_tree, message.format('/'.join(map(str, tree)))))
|
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/Sub.py b/src/cfnlint/rules/functions/Sub.py\n--- a/src/cfnlint/rules/functions/Sub.py\n+++ b/src/cfnlint/rules/functions/Sub.py\n@@ -85,8 +85,8 @@\n message = 'Sub parameter should be an object of 1 for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n- elif not isinstance(parameter_value_obj, six.string_types):\n- message = 'Sub parameter should be an object of 1 or string for {0}'\n+ elif isinstance(parameter_value_obj, list):\n+ message = 'Sub parameter value should be a string for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n", "issue": "Incorrect rule: \"E1019: Sub parameter should be an object of 1 or string for...\"\n*cfn-lint version: 0.40.0*\r\n\r\nI am getting an incorrect error that [`E1019`](https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/rules.md#E1019)` Sub parameter should be an object of 1 or string for...` when using YAML:\r\n\r\n```\r\n - Fn::Sub:\r\n - 'example-${Var}-${Var2}'\r\n - Var: 123\r\n Var2: 456\r\n```\r\n\r\nOfficial docs: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html\r\n\r\nOfficial docs sample:\r\n```\r\nFn::Sub:\r\n - String\r\n - Var1Name: Var1Value\r\n Var2Name: Var2Value\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.helpers import PSEUDOPARAMS, VALID_PARAMETER_TYPES_LIST\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Sub(CloudFormationLintRule):\n \"\"\"Check if Sub values are correct\"\"\"\n id = 'E1019'\n shortdesc = 'Sub validation of parameters'\n description = 'Making sure the sub function is properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n def _test_string(self, cfn, sub_string, parameters, tree):\n \"\"\"Test if a string has appropriate parameters\"\"\"\n\n matches = []\n string_params = cfn.get_sub_parameters(sub_string)\n\n for string_param in string_params:\n if isinstance(string_param, (six.string_types)):\n matches.extend(self._test_parameter(string_param, cfn, parameters, tree))\n\n return matches\n\n def _get_parameters(self, cfn):\n \"\"\"Get all Parameter Names\"\"\"\n results = {}\n parameters = cfn.template.get('Parameters', {})\n if isinstance(parameters, dict):\n for param_name, param_values in parameters.items():\n # This rule isn't here to check the Types but we need\n # something valid if it doesn't exist\n results[param_name] = param_values.get('Type', 'String')\n\n return results\n\n def _test_parameters(self, parameters, cfn, tree):\n \"\"\"Check parameters for appropriate configuration\"\"\"\n\n supported_functions = [\n 'Fn::Base64',\n 'Fn::FindInMap',\n 'Fn::GetAZs',\n 'Fn::GetAtt',\n 'Fn::If',\n 'Fn::ImportValue',\n 'Fn::Join',\n 'Fn::Select',\n 'Fn::Sub',\n 'Ref',\n ]\n\n matches = []\n for parameter_name, parameter_value_obj in parameters.items():\n param_tree = tree[:] + [parameter_name]\n if isinstance(parameter_value_obj, dict):\n if len(parameter_value_obj) == 1:\n for key, value in parameter_value_obj.items():\n if key not in supported_functions:\n message = 'Sub parameter should use a valid function for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif key in ['Ref']:\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n elif key in ['Fn::GetAtt']:\n if isinstance(value, list):\n # Only test this if all the items are a string\n if_all_strings = True\n for v in value:\n if not isinstance(v, six.string_types):\n # skip things got too complex\n if_all_strings = False\n if if_all_strings:\n matches.extend(self._test_parameter(\n '.'.join(value), cfn, {}, tree))\n elif isinstance(value, six.string_types):\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n else:\n message = 'Sub parameter should be an object of 1 for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif not isinstance(parameter_value_obj, six.string_types):\n message = 'Sub parameter should be an object of 1 or string for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n\n def _test_parameter(self, parameter, cfn, parameters, tree):\n \"\"\" Test a parameter \"\"\"\n\n matches = []\n get_atts = cfn.get_valid_getatts()\n\n valid_params = list(PSEUDOPARAMS)\n valid_params.extend(cfn.get_resource_names())\n template_parameters = self._get_parameters(cfn)\n\n for key, _ in parameters.items():\n valid_params.append(key)\n\n if parameter not in valid_params:\n found = False\n if parameter in template_parameters:\n found = True\n if template_parameters.get(parameter) in VALID_PARAMETER_TYPES_LIST:\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n for resource, attributes in get_atts.items():\n for attribute_name, attribute_values in attributes.items():\n if resource == parameter.split('.')[0]:\n if attribute_name == '*':\n found = True\n elif attribute_name == '.'.join(parameter.split('.')[1:]):\n if attribute_values.get('Type') == 'List':\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n found = True\n else:\n if attribute_name == parameter.split('.')[1] and attribute_values.get('Type') == 'Map':\n found = True\n\n if not found:\n message = 'Parameter {0} for Fn::Sub not found at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n\n return matches\n\n def match(self, cfn):\n matches = []\n\n sub_objs = cfn.search_deep_keys('Fn::Sub')\n\n for sub_obj in sub_objs:\n sub_value_obj = sub_obj[-1]\n tree = sub_obj[:-1]\n if isinstance(sub_value_obj, six.string_types):\n matches.extend(self._test_string(cfn, sub_value_obj, {}, tree))\n elif isinstance(sub_value_obj, list):\n if len(sub_value_obj) == 2:\n sub_string = sub_value_obj[0]\n parameters = sub_value_obj[1]\n if not isinstance(sub_string, six.string_types):\n message = 'Subs first element should be of type string for {0}'\n matches.append(RuleMatch(\n tree + [0], message.format('/'.join(map(str, tree)))))\n if not isinstance(parameters, dict):\n message = 'Subs second element should be an object for {0}'\n matches.append(RuleMatch(\n tree + [1], message.format('/'.join(map(str, tree)))))\n else:\n matches.extend(self._test_string(cfn, sub_string, parameters, tree + [0]))\n matches.extend(self._test_parameters(parameters, cfn, tree))\n else:\n message = 'Sub should be an array of 2 for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n elif isinstance(sub_value_obj, dict):\n if len(sub_value_obj) == 1:\n for key, _ in sub_value_obj.items():\n if not key == 'Fn::Transform':\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n", "path": "src/cfnlint/rules/functions/Sub.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.helpers import PSEUDOPARAMS, VALID_PARAMETER_TYPES_LIST\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Sub(CloudFormationLintRule):\n \"\"\"Check if Sub values are correct\"\"\"\n id = 'E1019'\n shortdesc = 'Sub validation of parameters'\n description = 'Making sure the sub function is properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n def _test_string(self, cfn, sub_string, parameters, tree):\n \"\"\"Test if a string has appropriate parameters\"\"\"\n\n matches = []\n string_params = cfn.get_sub_parameters(sub_string)\n\n for string_param in string_params:\n if isinstance(string_param, (six.string_types)):\n matches.extend(self._test_parameter(string_param, cfn, parameters, tree))\n\n return matches\n\n def _get_parameters(self, cfn):\n \"\"\"Get all Parameter Names\"\"\"\n results = {}\n parameters = cfn.template.get('Parameters', {})\n if isinstance(parameters, dict):\n for param_name, param_values in parameters.items():\n # This rule isn't here to check the Types but we need\n # something valid if it doesn't exist\n results[param_name] = param_values.get('Type', 'String')\n\n return results\n\n def _test_parameters(self, parameters, cfn, tree):\n \"\"\"Check parameters for appropriate configuration\"\"\"\n\n supported_functions = [\n 'Fn::Base64',\n 'Fn::FindInMap',\n 'Fn::GetAZs',\n 'Fn::GetAtt',\n 'Fn::If',\n 'Fn::ImportValue',\n 'Fn::Join',\n 'Fn::Select',\n 'Fn::Sub',\n 'Ref',\n ]\n\n matches = []\n for parameter_name, parameter_value_obj in parameters.items():\n param_tree = tree[:] + [parameter_name]\n if isinstance(parameter_value_obj, dict):\n if len(parameter_value_obj) == 1:\n for key, value in parameter_value_obj.items():\n if key not in supported_functions:\n message = 'Sub parameter should use a valid function for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif key in ['Ref']:\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n elif key in ['Fn::GetAtt']:\n if isinstance(value, list):\n # Only test this if all the items are a string\n if_all_strings = True\n for v in value:\n if not isinstance(v, six.string_types):\n # skip things got too complex\n if_all_strings = False\n if if_all_strings:\n matches.extend(self._test_parameter(\n '.'.join(value), cfn, {}, tree))\n elif isinstance(value, six.string_types):\n matches.extend(self._test_parameter(value, cfn, {}, tree))\n else:\n message = 'Sub parameter should be an object of 1 for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n elif isinstance(parameter_value_obj, list):\n message = 'Sub parameter value should be a string for {0}'\n matches.append(RuleMatch(\n param_tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n\n def _test_parameter(self, parameter, cfn, parameters, tree):\n \"\"\" Test a parameter \"\"\"\n\n matches = []\n get_atts = cfn.get_valid_getatts()\n\n valid_params = list(PSEUDOPARAMS)\n valid_params.extend(cfn.get_resource_names())\n template_parameters = self._get_parameters(cfn)\n\n for key, _ in parameters.items():\n valid_params.append(key)\n\n if parameter not in valid_params:\n found = False\n if parameter in template_parameters:\n found = True\n if template_parameters.get(parameter) in VALID_PARAMETER_TYPES_LIST:\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n for resource, attributes in get_atts.items():\n for attribute_name, attribute_values in attributes.items():\n if resource == parameter.split('.')[0]:\n if attribute_name == '*':\n found = True\n elif attribute_name == '.'.join(parameter.split('.')[1:]):\n if attribute_values.get('Type') == 'List':\n message = 'Fn::Sub cannot use list {0} at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n found = True\n else:\n if attribute_name == parameter.split('.')[1] and attribute_values.get('Type') == 'Map':\n found = True\n\n if not found:\n message = 'Parameter {0} for Fn::Sub not found at {1}'\n matches.append(RuleMatch(\n tree, message.format(parameter, '/'.join(map(str, tree)))))\n\n return matches\n\n def match(self, cfn):\n matches = []\n\n sub_objs = cfn.search_deep_keys('Fn::Sub')\n\n for sub_obj in sub_objs:\n sub_value_obj = sub_obj[-1]\n tree = sub_obj[:-1]\n if isinstance(sub_value_obj, six.string_types):\n matches.extend(self._test_string(cfn, sub_value_obj, {}, tree))\n elif isinstance(sub_value_obj, list):\n if len(sub_value_obj) == 2:\n sub_string = sub_value_obj[0]\n parameters = sub_value_obj[1]\n if not isinstance(sub_string, six.string_types):\n message = 'Subs first element should be of type string for {0}'\n matches.append(RuleMatch(\n tree + [0], message.format('/'.join(map(str, tree)))))\n if not isinstance(parameters, dict):\n message = 'Subs second element should be an object for {0}'\n matches.append(RuleMatch(\n tree + [1], message.format('/'.join(map(str, tree)))))\n else:\n matches.extend(self._test_string(cfn, sub_string, parameters, tree + [0]))\n matches.extend(self._test_parameters(parameters, cfn, tree))\n else:\n message = 'Sub should be an array of 2 for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n elif isinstance(sub_value_obj, dict):\n if len(sub_value_obj) == 1:\n for key, _ in sub_value_obj.items():\n if not key == 'Fn::Transform':\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Sub should be a string or array of 2 items for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n\n return matches\n", "path": "src/cfnlint/rules/functions/Sub.py"}]}
| 2,494 | 174 |
gh_patches_debug_34814
|
rasdani/github-patches
|
git_diff
|
dynaconf__dynaconf-825
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RFC] Support multidoc yaml files
**Is your feature request related to a problem? Please describe.**
Sometimes it can be difficult or impossible to pass multiple files with config fragments. yaml support multiple documents in one file and `safe_load_all` from pyaml api loads that accordingly. It is standard yaml feature, it would be nice to support it and make in usable in cases when passing one file (composited from more files) would be easier.
**Describe the solution you'd like**
Support `safe_load_all` as yaml loader.
**Describe alternatives you've considered**
Passing multiple files will do the work, however it doesn't have to be always straightforward.
**Additional context**
I have prepared a patch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynaconf/loaders/yaml_loader.py`
Content:
```
1 from __future__ import annotations
2
3 import sys
4 from pathlib import Path
5 from typing import TextIO
6 from warnings import warn
7
8 from dynaconf import default_settings
9 from dynaconf.constants import YAML_EXTENSIONS
10 from dynaconf.loaders.base import BaseLoader
11 from dynaconf.utils import object_merge
12 from dynaconf.utils.parse_conf import try_to_encode
13 from dynaconf.vendor.ruamel import yaml
14
15 # Add support for Dynaconf Lazy values to YAML dumper
16 yaml.SafeDumper.yaml_representers[
17 None
18 ] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(
19 self, try_to_encode(data)
20 )
21
22
23 def load(obj, env=None, silent=True, key=None, filename=None, validate=False):
24 """
25 Reads and loads in to "obj" a single key or all keys from source file.
26
27 :param obj: the settings instance
28 :param env: settings current env default='development'
29 :param silent: if errors should raise
30 :param key: if defined load a single key, else load all in env
31 :param filename: Optional custom filename to load
32 :return: None
33 """
34 # Resolve the loaders
35 # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
36 # Possible values are `safe_load, full_load, unsafe_load, load`
37 yaml_reader = getattr(
38 yaml, obj.get("YAML_LOADER_FOR_DYNACONF"), yaml.safe_load
39 )
40 if yaml_reader.__name__ == "unsafe_load": # pragma: no cover
41 warn(
42 "yaml.unsafe_load is deprecated."
43 " Please read https://msg.pyyaml.org/load for full details."
44 " Try to use full_load or safe_load."
45 )
46
47 loader = BaseLoader(
48 obj=obj,
49 env=env,
50 identifier="yaml",
51 extensions=YAML_EXTENSIONS,
52 file_reader=yaml_reader,
53 string_reader=yaml_reader,
54 validate=validate,
55 )
56 loader.load(
57 filename=filename,
58 key=key,
59 silent=silent,
60 )
61
62
63 def write(settings_path, settings_data, merge=True):
64 """Write data to a settings file.
65
66 :param settings_path: the filepath
67 :param settings_data: a dictionary with data
68 :param merge: boolean if existing file should be merged with new data
69 :param stdout: boolean if should output to stdout instead of file
70 """
71 settings_path = Path(settings_path)
72 if settings_path.exists() and merge: # pragma: no cover
73 with open(
74 str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF
75 ) as open_file:
76 object_merge(yaml.safe_load(open_file), settings_data)
77
78 with open(
79 str(settings_path),
80 "w",
81 encoding=default_settings.ENCODING_FOR_DYNACONF,
82 ) as open_file:
83 yaml.dump(
84 settings_data,
85 open_file,
86 Dumper=yaml.dumper.SafeDumper,
87 explicit_start=True,
88 indent=2,
89 default_flow_style=False,
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py
--- a/dynaconf/loaders/yaml_loader.py
+++ b/dynaconf/loaders/yaml_loader.py
@@ -20,6 +20,41 @@
)
+class AllLoader(BaseLoader):
+ """YAML Loader to load multi doc files"""
+
+ @staticmethod
+ def _assign_data(data, source_file, content):
+ """Helper to iterate through all docs in a file"""
+ content = tuple(content)
+ if len(content) == 1:
+ data[source_file] = content[0]
+ elif len(content) > 1:
+ for i, doc in enumerate(content):
+ data[f"{source_file}[{i}]"] = doc
+
+ def get_source_data(self, files):
+ data = {}
+ for source_file in files:
+ if source_file.endswith(self.extensions):
+ try:
+ with open(source_file, **self.opener_params) as open_file:
+ content = self.file_reader(open_file)
+ self.obj._loaded_files.append(source_file)
+ self._assign_data(data, source_file, content)
+ except OSError as e:
+ if ".local." not in source_file:
+ warn(
+ f"{self.identifier}_loader: {source_file} "
+ f":{str(e)}"
+ )
+ else:
+ # for tests it is possible to pass string
+ content = self.string_reader(source_file)
+ self._assign_data(data, source_file, content)
+ return data
+
+
def load(obj, env=None, silent=True, key=None, filename=None, validate=False):
"""
Reads and loads in to "obj" a single key or all keys from source file.
@@ -33,7 +68,8 @@
"""
# Resolve the loaders
# https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
- # Possible values are `safe_load, full_load, unsafe_load, load`
+ # Possible values are:
+ # `safe_load, full_load, unsafe_load, load, safe_load_all`
yaml_reader = getattr(
yaml, obj.get("YAML_LOADER_FOR_DYNACONF"), yaml.safe_load
)
@@ -44,7 +80,11 @@
" Try to use full_load or safe_load."
)
- loader = BaseLoader(
+ _loader = BaseLoader
+ if yaml_reader.__name__.endswith("_all"):
+ _loader = AllLoader
+
+ loader = _loader(
obj=obj,
env=env,
identifier="yaml",
|
{"golden_diff": "diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py\n--- a/dynaconf/loaders/yaml_loader.py\n+++ b/dynaconf/loaders/yaml_loader.py\n@@ -20,6 +20,41 @@\n )\n \n \n+class AllLoader(BaseLoader):\n+ \"\"\"YAML Loader to load multi doc files\"\"\"\n+\n+ @staticmethod\n+ def _assign_data(data, source_file, content):\n+ \"\"\"Helper to iterate through all docs in a file\"\"\"\n+ content = tuple(content)\n+ if len(content) == 1:\n+ data[source_file] = content[0]\n+ elif len(content) > 1:\n+ for i, doc in enumerate(content):\n+ data[f\"{source_file}[{i}]\"] = doc\n+\n+ def get_source_data(self, files):\n+ data = {}\n+ for source_file in files:\n+ if source_file.endswith(self.extensions):\n+ try:\n+ with open(source_file, **self.opener_params) as open_file:\n+ content = self.file_reader(open_file)\n+ self.obj._loaded_files.append(source_file)\n+ self._assign_data(data, source_file, content)\n+ except OSError as e:\n+ if \".local.\" not in source_file:\n+ warn(\n+ f\"{self.identifier}_loader: {source_file} \"\n+ f\":{str(e)}\"\n+ )\n+ else:\n+ # for tests it is possible to pass string\n+ content = self.string_reader(source_file)\n+ self._assign_data(data, source_file, content)\n+ return data\n+\n+\n def load(obj, env=None, silent=True, key=None, filename=None, validate=False):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n@@ -33,7 +68,8 @@\n \"\"\"\n # Resolve the loaders\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n- # Possible values are `safe_load, full_load, unsafe_load, load`\n+ # Possible values are:\n+ # `safe_load, full_load, unsafe_load, load, safe_load_all`\n yaml_reader = getattr(\n yaml, obj.get(\"YAML_LOADER_FOR_DYNACONF\"), yaml.safe_load\n )\n@@ -44,7 +80,11 @@\n \" Try to use full_load or safe_load.\"\n )\n \n- loader = BaseLoader(\n+ _loader = BaseLoader\n+ if yaml_reader.__name__.endswith(\"_all\"):\n+ _loader = AllLoader\n+\n+ loader = _loader(\n obj=obj,\n env=env,\n identifier=\"yaml\",\n", "issue": "[RFC] Support multidoc yaml files\n**Is your feature request related to a problem? Please describe.**\r\nSometimes it can be difficult or impossible to pass multiple files with config fragments. yaml support multiple documents in one file and `safe_load_all` from pyaml api loads that accordingly. It is standard yaml feature, it would be nice to support it and make in usable in cases when passing one file (composited from more files) would be easier.\r\n\r\n**Describe the solution you'd like**\r\nSupport `safe_load_all` as yaml loader.\r\n\r\n**Describe alternatives you've considered**\r\nPassing multiple files will do the work, however it doesn't have to be always straightforward.\r\n\r\n**Additional context**\r\nI have prepared a patch\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TextIO\nfrom warnings import warn\n\nfrom dynaconf import default_settings\nfrom dynaconf.constants import YAML_EXTENSIONS\nfrom dynaconf.loaders.base import BaseLoader\nfrom dynaconf.utils import object_merge\nfrom dynaconf.utils.parse_conf import try_to_encode\nfrom dynaconf.vendor.ruamel import yaml\n\n# Add support for Dynaconf Lazy values to YAML dumper\nyaml.SafeDumper.yaml_representers[\n None\n] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(\n self, try_to_encode(data)\n)\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None, validate=False):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n\n :param obj: the settings instance\n :param env: settings current env default='development'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :param filename: Optional custom filename to load\n :return: None\n \"\"\"\n # Resolve the loaders\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n # Possible values are `safe_load, full_load, unsafe_load, load`\n yaml_reader = getattr(\n yaml, obj.get(\"YAML_LOADER_FOR_DYNACONF\"), yaml.safe_load\n )\n if yaml_reader.__name__ == \"unsafe_load\": # pragma: no cover\n warn(\n \"yaml.unsafe_load is deprecated.\"\n \" Please read https://msg.pyyaml.org/load for full details.\"\n \" Try to use full_load or safe_load.\"\n )\n\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier=\"yaml\",\n extensions=YAML_EXTENSIONS,\n file_reader=yaml_reader,\n string_reader=yaml_reader,\n validate=validate,\n )\n loader.load(\n filename=filename,\n key=key,\n silent=silent,\n )\n\n\ndef write(settings_path, settings_data, merge=True):\n \"\"\"Write data to a settings file.\n\n :param settings_path: the filepath\n :param settings_data: a dictionary with data\n :param merge: boolean if existing file should be merged with new data\n :param stdout: boolean if should output to stdout instead of file\n \"\"\"\n settings_path = Path(settings_path)\n if settings_path.exists() and merge: # pragma: no cover\n with open(\n str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF\n ) as open_file:\n object_merge(yaml.safe_load(open_file), settings_data)\n\n with open(\n str(settings_path),\n \"w\",\n encoding=default_settings.ENCODING_FOR_DYNACONF,\n ) as open_file:\n yaml.dump(\n settings_data,\n open_file,\n Dumper=yaml.dumper.SafeDumper,\n explicit_start=True,\n indent=2,\n default_flow_style=False,\n )\n", "path": "dynaconf/loaders/yaml_loader.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TextIO\nfrom warnings import warn\n\nfrom dynaconf import default_settings\nfrom dynaconf.constants import YAML_EXTENSIONS\nfrom dynaconf.loaders.base import BaseLoader\nfrom dynaconf.utils import object_merge\nfrom dynaconf.utils.parse_conf import try_to_encode\nfrom dynaconf.vendor.ruamel import yaml\n\n# Add support for Dynaconf Lazy values to YAML dumper\nyaml.SafeDumper.yaml_representers[\n None\n] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(\n self, try_to_encode(data)\n)\n\n\nclass AllLoader(BaseLoader):\n \"\"\"YAML Loader to load multi doc files\"\"\"\n\n @staticmethod\n def _assign_data(data, source_file, content):\n \"\"\"Helper to iterate through all docs in a file\"\"\"\n content = tuple(content)\n if len(content) == 1:\n data[source_file] = content[0]\n elif len(content) > 1:\n for i, doc in enumerate(content):\n data[f\"{source_file}[{i}]\"] = doc\n\n def get_source_data(self, files):\n data = {}\n for source_file in files:\n if source_file.endswith(self.extensions):\n try:\n with open(source_file, **self.opener_params) as open_file:\n content = self.file_reader(open_file)\n self.obj._loaded_files.append(source_file)\n self._assign_data(data, source_file, content)\n except OSError as e:\n if \".local.\" not in source_file:\n warn(\n f\"{self.identifier}_loader: {source_file} \"\n f\":{str(e)}\"\n )\n else:\n # for tests it is possible to pass string\n content = self.string_reader(source_file)\n self._assign_data(data, source_file, content)\n return data\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None, validate=False):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n\n :param obj: the settings instance\n :param env: settings current env default='development'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :param filename: Optional custom filename to load\n :return: None\n \"\"\"\n # Resolve the loaders\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n # Possible values are:\n # `safe_load, full_load, unsafe_load, load, safe_load_all`\n yaml_reader = getattr(\n yaml, obj.get(\"YAML_LOADER_FOR_DYNACONF\"), yaml.safe_load\n )\n if yaml_reader.__name__ == \"unsafe_load\": # pragma: no cover\n warn(\n \"yaml.unsafe_load is deprecated.\"\n \" Please read https://msg.pyyaml.org/load for full details.\"\n \" Try to use full_load or safe_load.\"\n )\n\n _loader = BaseLoader\n if yaml_reader.__name__.endswith(\"_all\"):\n _loader = AllLoader\n\n loader = _loader(\n obj=obj,\n env=env,\n identifier=\"yaml\",\n extensions=YAML_EXTENSIONS,\n file_reader=yaml_reader,\n string_reader=yaml_reader,\n validate=validate,\n )\n loader.load(\n filename=filename,\n key=key,\n silent=silent,\n )\n\n\ndef write(settings_path, settings_data, merge=True):\n \"\"\"Write data to a settings file.\n\n :param settings_path: the filepath\n :param settings_data: a dictionary with data\n :param merge: boolean if existing file should be merged with new data\n :param stdout: boolean if should output to stdout instead of file\n \"\"\"\n settings_path = Path(settings_path)\n if settings_path.exists() and merge: # pragma: no cover\n with open(\n str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF\n ) as open_file:\n object_merge(yaml.safe_load(open_file), settings_data)\n\n with open(\n str(settings_path),\n \"w\",\n encoding=default_settings.ENCODING_FOR_DYNACONF,\n ) as open_file:\n yaml.dump(\n settings_data,\n open_file,\n Dumper=yaml.dumper.SafeDumper,\n explicit_start=True,\n indent=2,\n default_flow_style=False,\n )\n", "path": "dynaconf/loaders/yaml_loader.py"}]}
| 1,259 | 601 |
gh_patches_debug_9478
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-548
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add new check: API Gateway V2 should have access logging enabled
AccessLogSettings: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-stage.html
Terraform does not currently support this: https://github.com/terraform-providers/terraform-provider-aws/issues/7004
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py`
Content:
```
1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
2 from checkov.common.models.enums import CheckCategories
3 from checkov.common.models.consts import ANY_VALUE
4
5
6 class APIGatewayAccessLogging(BaseResourceValueCheck):
7
8 def __init__(self):
9 name = "Ensure API Gateway has Access Logging enabled"
10 id = "CKV_AWS_76"
11 supported_resources = ['aws_api_gateway_stage']
12 categories = [CheckCategories.LOGGING]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def get_inspected_key(self):
16 return "access_log_settings/[0]/destination_arn"
17
18 def get_expected_value(self):
19 return ANY_VALUE
20
21
22 check = APIGatewayAccessLogging()
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py
--- a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py
+++ b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py
@@ -8,7 +8,7 @@
def __init__(self):
name = "Ensure API Gateway has Access Logging enabled"
id = "CKV_AWS_76"
- supported_resources = ['aws_api_gateway_stage']
+ supported_resources = ['aws_api_gateway_stage', 'aws_apigatewayv2_stage']
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py\n--- a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py\n+++ b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py\n@@ -8,7 +8,7 @@\n def __init__(self):\n name = \"Ensure API Gateway has Access Logging enabled\"\n id = \"CKV_AWS_76\"\n- supported_resources = ['aws_api_gateway_stage']\n+ supported_resources = ['aws_api_gateway_stage', 'aws_apigatewayv2_stage']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n", "issue": "Add new check: API Gateway V2 should have access logging enabled \nAccessLogSettings: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-stage.html\r\n\r\nTerraform does not currently support this: https://github.com/terraform-providers/terraform-provider-aws/issues/7004\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.common.models.consts import ANY_VALUE\n\n\nclass APIGatewayAccessLogging(BaseResourceValueCheck):\n\n def __init__(self):\n name = \"Ensure API Gateway has Access Logging enabled\"\n id = \"CKV_AWS_76\"\n supported_resources = ['aws_api_gateway_stage']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"access_log_settings/[0]/destination_arn\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = APIGatewayAccessLogging()\n", "path": "checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py"}], "after_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.common.models.consts import ANY_VALUE\n\n\nclass APIGatewayAccessLogging(BaseResourceValueCheck):\n\n def __init__(self):\n name = \"Ensure API Gateway has Access Logging enabled\"\n id = \"CKV_AWS_76\"\n supported_resources = ['aws_api_gateway_stage', 'aws_apigatewayv2_stage']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"access_log_settings/[0]/destination_arn\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = APIGatewayAccessLogging()\n", "path": "checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py"}]}
| 541 | 166 |
gh_patches_debug_1431
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-4077
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
utils.int_from_bytes gives incorrect answers when passed "builtins.bytes" in python 2.7
```
$ mkvirtualenv repro
$ python --version
Python 2.7.12
$ pip install cryptography future
$ python
from cryptography import utils
from builtins import bytes
x = bytes.fromhex('deadbeef')
y = utils.int_from_bytes(x, 'big')
hex(y)
'0x6227deadbeef27'
```
The reason this happens is that `int_from_bytes` (in py27 mode) casts the passed-in value to `bytes`, which, in py27 mode, is an alias for `str`. Passing a `builtins.bytes` value to `str` somewhat insanely wraps the string with `b'` and `'`. These then get parsed by the rest of `int_from_bytes` as if they were part of the original byte string.
I think this is particularly unfortunate since all the "cryptography" functions say they accept and return `bytes` in their docstrings. Ideally it'd be compatible with all three definitions of `bytes`: the py27 alias to `str`, the one from "future", and the py3 one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/utils.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8 import binascii
9 import inspect
10 import sys
11 import warnings
12
13
14 # We use a UserWarning subclass, instead of DeprecationWarning, because CPython
15 # decided deprecation warnings should be invisble by default.
16 class CryptographyDeprecationWarning(UserWarning):
17 pass
18
19
20 # Several APIs were deprecated with no specific end-of-life date because of the
21 # ubiquity of their use. They should not be removed until we agree on when that
22 # cycle ends.
23 PersistentlyDeprecated = CryptographyDeprecationWarning
24 DeprecatedIn21 = CryptographyDeprecationWarning
25
26
27 def _check_bytes(name, value):
28 if not isinstance(value, bytes):
29 raise TypeError("{0} must be bytes".format(name))
30
31
32 def read_only_property(name):
33 return property(lambda self: getattr(self, name))
34
35
36 def register_interface(iface):
37 def register_decorator(klass):
38 verify_interface(iface, klass)
39 iface.register(klass)
40 return klass
41 return register_decorator
42
43
44 def register_interface_if(predicate, iface):
45 def register_decorator(klass):
46 if predicate:
47 verify_interface(iface, klass)
48 iface.register(klass)
49 return klass
50 return register_decorator
51
52
53 if hasattr(int, "from_bytes"):
54 int_from_bytes = int.from_bytes
55 else:
56 def int_from_bytes(data, byteorder, signed=False):
57 assert byteorder == 'big'
58 assert not signed
59
60 # call bytes() on data to allow the use of bytearrays
61 return int(bytes(data).encode('hex'), 16)
62
63
64 if hasattr(int, "to_bytes"):
65 def int_to_bytes(integer, length=None):
66 return integer.to_bytes(
67 length or (integer.bit_length() + 7) // 8 or 1, 'big'
68 )
69 else:
70 def int_to_bytes(integer, length=None):
71 hex_string = '%x' % integer
72 if length is None:
73 n = len(hex_string)
74 else:
75 n = length * 2
76 return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
77
78
79 class InterfaceNotImplemented(Exception):
80 pass
81
82
83 if hasattr(inspect, "signature"):
84 signature = inspect.signature
85 else:
86 signature = inspect.getargspec
87
88
89 def verify_interface(iface, klass):
90 for method in iface.__abstractmethods__:
91 if not hasattr(klass, method):
92 raise InterfaceNotImplemented(
93 "{0} is missing a {1!r} method".format(klass, method)
94 )
95 if isinstance(getattr(iface, method), abc.abstractproperty):
96 # Can't properly verify these yet.
97 continue
98 sig = signature(getattr(iface, method))
99 actual = signature(getattr(klass, method))
100 if sig != actual:
101 raise InterfaceNotImplemented(
102 "{0}.{1}'s signature differs from the expected. Expected: "
103 "{2!r}. Received: {3!r}".format(
104 klass, method, sig, actual
105 )
106 )
107
108
109 # No longer needed as of 2.2, but retained because we have external consumers
110 # who use it.
111 def bit_length(x):
112 return x.bit_length()
113
114
115 class _DeprecatedValue(object):
116 def __init__(self, value, message, warning_class):
117 self.value = value
118 self.message = message
119 self.warning_class = warning_class
120
121
122 class _ModuleWithDeprecations(object):
123 def __init__(self, module):
124 self.__dict__["_module"] = module
125
126 def __getattr__(self, attr):
127 obj = getattr(self._module, attr)
128 if isinstance(obj, _DeprecatedValue):
129 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
130 obj = obj.value
131 return obj
132
133 def __setattr__(self, attr, value):
134 setattr(self._module, attr, value)
135
136 def __delattr__(self, attr):
137 obj = getattr(self._module, attr)
138 if isinstance(obj, _DeprecatedValue):
139 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
140
141 delattr(self._module, attr)
142
143 def __dir__(self):
144 return ["_module"] + dir(self._module)
145
146
147 def deprecated(value, module_name, message, warning_class):
148 module = sys.modules[module_name]
149 if not isinstance(module, _ModuleWithDeprecations):
150 sys.modules[module_name] = _ModuleWithDeprecations(module)
151 return _DeprecatedValue(value, message, warning_class)
152
153
154 def cached_property(func):
155 cached_name = "_cached_{0}".format(func)
156 sentinel = object()
157
158 def inner(instance):
159 cache = getattr(instance, cached_name, sentinel)
160 if cache is not sentinel:
161 return cache
162 result = func(instance)
163 setattr(instance, cached_name, result)
164 return result
165 return property(inner)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py
--- a/src/cryptography/utils.py
+++ b/src/cryptography/utils.py
@@ -57,8 +57,7 @@
assert byteorder == 'big'
assert not signed
- # call bytes() on data to allow the use of bytearrays
- return int(bytes(data).encode('hex'), 16)
+ return int(binascii.hexlify(data), 16)
if hasattr(int, "to_bytes"):
|
{"golden_diff": "diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -57,8 +57,7 @@\n assert byteorder == 'big'\n assert not signed\n \n- # call bytes() on data to allow the use of bytearrays\n- return int(bytes(data).encode('hex'), 16)\n+ return int(binascii.hexlify(data), 16)\n \n \n if hasattr(int, \"to_bytes\"):\n", "issue": "utils.int_from_bytes gives incorrect answers when passed \"builtins.bytes\" in python 2.7\n```\r\n$ mkvirtualenv repro\r\n$ python --version\r\nPython 2.7.12\r\n$ pip install cryptography future\r\n$ python\r\n\r\nfrom cryptography import utils\r\nfrom builtins import bytes\r\nx = bytes.fromhex('deadbeef')\r\ny = utils.int_from_bytes(x, 'big')\r\nhex(y)\r\n'0x6227deadbeef27'\r\n```\r\n\r\nThe reason this happens is that `int_from_bytes` (in py27 mode) casts the passed-in value to `bytes`, which, in py27 mode, is an alias for `str`. Passing a `builtins.bytes` value to `str` somewhat insanely wraps the string with `b'` and `'`. These then get parsed by the rest of `int_from_bytes` as if they were part of the original byte string.\r\n\r\nI think this is particularly unfortunate since all the \"cryptography\" functions say they accept and return `bytes` in their docstrings. Ideally it'd be compatible with all three definitions of `bytes`: the py27 alias to `str`, the one from \"future\", and the py3 one.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport sys\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nPersistentlyDeprecated = CryptographyDeprecationWarning\nDeprecatedIn21 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name, value):\n if not isinstance(value, bytes):\n raise TypeError(\"{0} must be bytes\".format(name))\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\ndef register_interface_if(predicate, iface):\n def register_decorator(klass):\n if predicate:\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n # call bytes() on data to allow the use of bytearrays\n return int(bytes(data).encode('hex'), 16)\n\n\nif hasattr(int, \"to_bytes\"):\n def int_to_bytes(integer, length=None):\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, 'big'\n )\nelse:\n def int_to_bytes(integer, length=None):\n hex_string = '%x' % integer\n if length is None:\n n = len(hex_string)\n else:\n n = length * 2\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\n# No longer needed as of 2.2, but retained because we have external consumers\n# who use it.\ndef bit_length(x):\n return x.bit_length()\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n\n\ndef cached_property(func):\n cached_name = \"_cached_{0}\".format(func)\n sentinel = object()\n\n def inner(instance):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n return property(inner)\n", "path": "src/cryptography/utils.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport sys\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nPersistentlyDeprecated = CryptographyDeprecationWarning\nDeprecatedIn21 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name, value):\n if not isinstance(value, bytes):\n raise TypeError(\"{0} must be bytes\".format(name))\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\ndef register_interface_if(predicate, iface):\n def register_decorator(klass):\n if predicate:\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n return int(binascii.hexlify(data), 16)\n\n\nif hasattr(int, \"to_bytes\"):\n def int_to_bytes(integer, length=None):\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, 'big'\n )\nelse:\n def int_to_bytes(integer, length=None):\n hex_string = '%x' % integer\n if length is None:\n n = len(hex_string)\n else:\n n = length * 2\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\n# No longer needed as of 2.2, but retained because we have external consumers\n# who use it.\ndef bit_length(x):\n return x.bit_length()\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n\n\ndef cached_property(func):\n cached_name = \"_cached_{0}\".format(func)\n sentinel = object()\n\n def inner(instance):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n return property(inner)\n", "path": "src/cryptography/utils.py"}]}
| 2,003 | 111 |
gh_patches_debug_27329
|
rasdani/github-patches
|
git_diff
|
pytorch__text-1467
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should hide the symbols from the third party
I am integrating KenLM in torchaudio and realized that KenLM uses double-conversion like torchtext does.
In torchaudio we are hiding the symbols of third party with `-fhidden` flag with compiling, but it turns out that torchtext does not do this. (and according to the conversation I had with @malfet about a year ago, PyTorch also hides the symbol of their own code, in addition to third party.)
Torchtext may want to do this in case client code imports the same package compiled differently.
## References:
- https://stackoverflow.com/a/22254251
- https://labjack.com/news/simple-cpp-symbol-visibility-demo
## Double conversion
```
nm torchtext/_torchtext.so| grep double_c | grep 'T __' | head -10
0000000000175a48 T __ZN17double_conversion13StrtodTrimmedENS_6VectorIKcEEi
000000000017175c T __ZN17double_conversion16PowersOfTenCache32GetCachedPowerForDecimalExponentEiPNS_5DiyFpEPi
00000000001716fc T __ZN17double_conversion16PowersOfTenCache36GetCachedPowerForBinaryExponentRangeEiiPNS_5DiyFpEPi
00000000001715bc T __ZN17double_conversion6Bignum11PlusCompareERKS0_S2_S2_
000000000016f7cc T __ZN17double_conversion6Bignum12AssignBignumERKS0_
000000000016f788 T __ZN17double_conversion6Bignum12AssignUInt16Et
000000000016f7a0 T __ZN17double_conversion6Bignum12AssignUInt64Ey
0000000000171188 T __ZN17double_conversion6Bignum13SubtractTimesERKS0_i
00000000001702e8 T __ZN17double_conversion6Bignum14SubtractBignumERKS0_
000000000016ff70 T __ZN17double_conversion6Bignum15AssignHexStringENS_6VectorIKcEE
```
## Sentencepiece
```
$ nm torchtext/_torchtext.so| grep sentencep | grep 'T __' | head -10
0000000000128718 T __ZN13sentencepiece10ModelProto12InternalSwapEPS0_
00000000001277fc T __ZN13sentencepiece10ModelProto14_InternalParseEPKcPN6google8protobuf8internal12ParseContextE
000000000012765c T __ZN13sentencepiece10ModelProto16default_instanceEv
0000000000128334 T __ZN13sentencepiece10ModelProto21CheckTypeAndMergeFromERKN6google8protobuf11MessageLiteE
00000000001276a0 T __ZN13sentencepiece10ModelProto5ClearEv
0000000000128620 T __ZN13sentencepiece10ModelProto8CopyFromERKS0_
0000000000127650 T __ZN13sentencepiece10ModelProto9ArenaDtorEPv
0000000000128338 T __ZN13sentencepiece10ModelProto9MergeFromERKS0_
0000000000127168 T __ZN13sentencepiece10ModelProto9_Internal12trainer_specEPKS0_
0000000000127178 T __ZN13sentencepiece10ModelProto9_Internal14self_test_dataEPKS0_
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `build_tools/setup_helpers/extension.py`
Content:
```
1 import os
2 import platform
3 import subprocess
4 from pathlib import Path
5
6 from torch.utils.cpp_extension import (
7 CppExtension,
8 BuildExtension as TorchBuildExtension
9 )
10
11 __all__ = [
12 'get_ext_modules',
13 'BuildExtension',
14 ]
15
16 _ROOT_DIR = Path(__file__).parent.parent.parent.resolve()
17 _CSRC_DIR = _ROOT_DIR / 'torchtext' / 'csrc'
18 _TP_BASE_DIR = _ROOT_DIR / 'third_party'
19 _TP_INSTALL_DIR = _TP_BASE_DIR / 'build'
20
21
22 def _get_eca(debug):
23 eca = []
24 if platform.system() == "Windows":
25 eca += ['/MT']
26 if debug:
27 eca += ["-O0", "-g"]
28 else:
29 if platform.system() == "Windows":
30 eca += ['-O2']
31 else:
32 eca += ["-O3"]
33 return eca
34
35
36 def _get_ela(debug):
37 ela = []
38 if debug:
39 if platform.system() == "Windows":
40 ela += ["/DEBUG:FULL"]
41 else:
42 ela += ["-O0", "-g"]
43 else:
44 if platform.system() != "Windows":
45 ela += ["-O3"]
46 return ela
47
48
49 def _get_srcs():
50 return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]
51
52
53 def _get_include_dirs():
54 return [
55 str(_CSRC_DIR),
56 str(_TP_INSTALL_DIR / 'include'),
57 ]
58
59
60 def _get_library_dirs():
61 return [
62 str(_TP_INSTALL_DIR / 'lib'),
63 str(_TP_INSTALL_DIR / 'lib64')
64 ]
65
66
67 def _get_libraries():
68 # NOTE: The order of the library listed bellow matters.
69 #
70 # For example, the symbol `sentencepiece::unigram::Model` is
71 # defined in sentencepiece but UNDEFINED in sentencepiece_train.
72 # GCC only remembers the last encountered symbol.
73 # Therefore placing 'sentencepiece_train' after 'sentencepiece' cause runtime error.
74 #
75 # $ nm third_party/build/lib/libsentencepiece_train.a | grep _ZTIN13sentencepiece7unigram5ModelE
76 # U _ZTIN13sentencepiece7unigram5ModelE
77 # $ nm third_party/build/lib/libsentencepiece.a | grep _ZTIN13sentencepiece7unigram5ModelE
78 # 0000000000000000 V _ZTIN13sentencepiece7unigram5ModelE
79 return [
80 'sentencepiece_train',
81 'sentencepiece',
82 're2',
83 'double-conversion'
84 ]
85
86
87 def _get_cxx11_abi():
88 try:
89 import torch
90 value = int(torch._C._GLIBCXX_USE_CXX11_ABI)
91 except ImportError:
92 value = 0
93 return '-D_GLIBCXX_USE_CXX11_ABI=' + str(value)
94
95
96 def _build_third_party(debug):
97 build_dir = _TP_BASE_DIR / 'build'
98 build_dir.mkdir(exist_ok=True)
99 build_env = os.environ.copy()
100 config = 'Debug' if debug else 'Release'
101 if platform.system() == 'Windows':
102 extra_args = [
103 '-GNinja',
104 ]
105 build_env.setdefault('CC', 'cl')
106 build_env.setdefault('CXX', 'cl')
107 else:
108 extra_args = ['-DCMAKE_CXX_FLAGS=-fPIC ' + _get_cxx11_abi()]
109 subprocess.run(
110 args=[
111 'cmake',
112 '-DBUILD_SHARED_LIBS=OFF',
113 '-DRE2_BUILD_TESTING=OFF',
114 '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
115 f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',
116 f'-DCMAKE_BUILD_TYPE={config}',
117 ] + extra_args + ['..'],
118 cwd=str(build_dir),
119 check=True,
120 env=build_env,
121 )
122 print('*** Command list Thirdparty ***')
123 with open(build_dir / 'compile_commands.json', 'r') as fileobj:
124 print(fileobj.read())
125 print('running cmake --build', flush=True)
126 subprocess.run(
127 args=['cmake', '--build', '.', '--target', 'install', '--config', config],
128 cwd=str(build_dir),
129 check=True,
130 env=build_env,
131 )
132
133
134 def _build_sentence_piece(debug):
135 build_dir = _TP_BASE_DIR / 'sentencepiece' / 'build'
136 build_dir.mkdir(exist_ok=True)
137 build_env = os.environ.copy()
138 config = 'Debug' if debug else 'Release'
139 if platform.system() == 'Windows':
140 extra_args = ['-GNinja']
141 build_env.setdefault('CC', 'cl')
142 build_env.setdefault('CXX', 'cl')
143 else:
144 extra_args = []
145 subprocess.run(
146 args=['cmake', '-DSPM_ENABLE_SHARED=OFF', f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',
147 '-DCMAKE_CXX_FLAGS=' + _get_cxx11_abi(),
148 f'-DCMAKE_BUILD_TYPE={config}'] + extra_args + ['..'],
149 cwd=str(build_dir),
150 check=True,
151 env=build_env,
152 )
153 subprocess.run(
154 args=['cmake', '--build', '.', '--target', 'install', '--config', config],
155 cwd=str(build_dir),
156 check=True,
157 env=build_env,
158 )
159
160
161 def _configure_third_party(debug):
162 _build_third_party(debug)
163 _build_sentence_piece(debug)
164
165
166 _EXT_NAME = 'torchtext._torchtext'
167
168
169 def get_ext_modules(debug=False):
170 return [
171 CppExtension(
172 _EXT_NAME,
173 _get_srcs(),
174 libraries=_get_libraries(),
175 include_dirs=_get_include_dirs(),
176 library_dirs=_get_library_dirs(),
177 extra_compile_args=_get_eca(debug),
178 extra_link_args=_get_ela(debug),
179 ),
180 ]
181
182
183 class BuildExtension(TorchBuildExtension):
184 def build_extension(self, ext):
185 if ext.name == _EXT_NAME:
186 _configure_third_party(self.debug)
187 super().build_extension(ext)
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/build_tools/setup_helpers/extension.py b/build_tools/setup_helpers/extension.py
--- a/build_tools/setup_helpers/extension.py
+++ b/build_tools/setup_helpers/extension.py
@@ -29,7 +29,7 @@
if platform.system() == "Windows":
eca += ['-O2']
else:
- eca += ["-O3"]
+ eca += ["-O3", "-fvisibility=hidden"]
return eca
@@ -114,6 +114,8 @@
'-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',
f'-DCMAKE_BUILD_TYPE={config}',
+ '-DCMAKE_CXX_VISIBILITY_PRESET=hidden',
+ '-DCMAKE_POLICY_DEFAULT_CMP0063=NEW',
] + extra_args + ['..'],
cwd=str(build_dir),
check=True,
@@ -144,8 +146,11 @@
extra_args = []
subprocess.run(
args=['cmake', '-DSPM_ENABLE_SHARED=OFF', f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',
+ '-DCMAKE_CXX_VISIBILITY_PRESET=hidden',
'-DCMAKE_CXX_FLAGS=' + _get_cxx11_abi(),
+ '-DCMAKE_POLICY_DEFAULT_CMP0063=NEW',
f'-DCMAKE_BUILD_TYPE={config}'] + extra_args + ['..'],
+
cwd=str(build_dir),
check=True,
env=build_env,
|
{"golden_diff": "diff --git a/build_tools/setup_helpers/extension.py b/build_tools/setup_helpers/extension.py\n--- a/build_tools/setup_helpers/extension.py\n+++ b/build_tools/setup_helpers/extension.py\n@@ -29,7 +29,7 @@\n if platform.system() == \"Windows\":\n eca += ['-O2']\n else:\n- eca += [\"-O3\"]\n+ eca += [\"-O3\", \"-fvisibility=hidden\"]\n return eca\n \n \n@@ -114,6 +114,8 @@\n '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',\n f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',\n f'-DCMAKE_BUILD_TYPE={config}',\n+ '-DCMAKE_CXX_VISIBILITY_PRESET=hidden',\n+ '-DCMAKE_POLICY_DEFAULT_CMP0063=NEW',\n ] + extra_args + ['..'],\n cwd=str(build_dir),\n check=True,\n@@ -144,8 +146,11 @@\n extra_args = []\n subprocess.run(\n args=['cmake', '-DSPM_ENABLE_SHARED=OFF', f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',\n+ '-DCMAKE_CXX_VISIBILITY_PRESET=hidden',\n '-DCMAKE_CXX_FLAGS=' + _get_cxx11_abi(),\n+ '-DCMAKE_POLICY_DEFAULT_CMP0063=NEW',\n f'-DCMAKE_BUILD_TYPE={config}'] + extra_args + ['..'],\n+\n cwd=str(build_dir),\n check=True,\n env=build_env,\n", "issue": "Should hide the symbols from the third party\nI am integrating KenLM in torchaudio and realized that KenLM uses double-conversion like torchtext does.\r\n\r\nIn torchaudio we are hiding the symbols of third party with `-fhidden` flag with compiling, but it turns out that torchtext does not do this. (and according to the conversation I had with @malfet about a year ago, PyTorch also hides the symbol of their own code, in addition to third party.)\r\n\r\nTorchtext may want to do this in case client code imports the same package compiled differently.\r\n\r\n## References:\r\n- https://stackoverflow.com/a/22254251\r\n- https://labjack.com/news/simple-cpp-symbol-visibility-demo\r\n\r\n## Double conversion\r\n\r\n```\r\nnm torchtext/_torchtext.so| grep double_c | grep 'T __' | head -10\r\n0000000000175a48 T __ZN17double_conversion13StrtodTrimmedENS_6VectorIKcEEi\r\n000000000017175c T __ZN17double_conversion16PowersOfTenCache32GetCachedPowerForDecimalExponentEiPNS_5DiyFpEPi\r\n00000000001716fc T __ZN17double_conversion16PowersOfTenCache36GetCachedPowerForBinaryExponentRangeEiiPNS_5DiyFpEPi\r\n00000000001715bc T __ZN17double_conversion6Bignum11PlusCompareERKS0_S2_S2_\r\n000000000016f7cc T __ZN17double_conversion6Bignum12AssignBignumERKS0_\r\n000000000016f788 T __ZN17double_conversion6Bignum12AssignUInt16Et\r\n000000000016f7a0 T __ZN17double_conversion6Bignum12AssignUInt64Ey\r\n0000000000171188 T __ZN17double_conversion6Bignum13SubtractTimesERKS0_i\r\n00000000001702e8 T __ZN17double_conversion6Bignum14SubtractBignumERKS0_\r\n000000000016ff70 T __ZN17double_conversion6Bignum15AssignHexStringENS_6VectorIKcEE\r\n```\r\n\r\n## Sentencepiece\r\n\r\n```\r\n$ nm torchtext/_torchtext.so| grep sentencep | grep 'T __' | head -10\r\n0000000000128718 T __ZN13sentencepiece10ModelProto12InternalSwapEPS0_\r\n00000000001277fc T __ZN13sentencepiece10ModelProto14_InternalParseEPKcPN6google8protobuf8internal12ParseContextE\r\n000000000012765c T __ZN13sentencepiece10ModelProto16default_instanceEv\r\n0000000000128334 T __ZN13sentencepiece10ModelProto21CheckTypeAndMergeFromERKN6google8protobuf11MessageLiteE\r\n00000000001276a0 T __ZN13sentencepiece10ModelProto5ClearEv\r\n0000000000128620 T __ZN13sentencepiece10ModelProto8CopyFromERKS0_\r\n0000000000127650 T __ZN13sentencepiece10ModelProto9ArenaDtorEPv\r\n0000000000128338 T __ZN13sentencepiece10ModelProto9MergeFromERKS0_\r\n0000000000127168 T __ZN13sentencepiece10ModelProto9_Internal12trainer_specEPKS0_\r\n0000000000127178 T __ZN13sentencepiece10ModelProto9_Internal14self_test_dataEPKS0_\r\n```\r\n\r\n\n", "before_files": [{"content": "import os\nimport platform\nimport subprocess\nfrom pathlib import Path\n\nfrom torch.utils.cpp_extension import (\n CppExtension,\n BuildExtension as TorchBuildExtension\n)\n\n__all__ = [\n 'get_ext_modules',\n 'BuildExtension',\n]\n\n_ROOT_DIR = Path(__file__).parent.parent.parent.resolve()\n_CSRC_DIR = _ROOT_DIR / 'torchtext' / 'csrc'\n_TP_BASE_DIR = _ROOT_DIR / 'third_party'\n_TP_INSTALL_DIR = _TP_BASE_DIR / 'build'\n\n\ndef _get_eca(debug):\n eca = []\n if platform.system() == \"Windows\":\n eca += ['/MT']\n if debug:\n eca += [\"-O0\", \"-g\"]\n else:\n if platform.system() == \"Windows\":\n eca += ['-O2']\n else:\n eca += [\"-O3\"]\n return eca\n\n\ndef _get_ela(debug):\n ela = []\n if debug:\n if platform.system() == \"Windows\":\n ela += [\"/DEBUG:FULL\"]\n else:\n ela += [\"-O0\", \"-g\"]\n else:\n if platform.system() != \"Windows\":\n ela += [\"-O3\"]\n return ela\n\n\ndef _get_srcs():\n return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]\n\n\ndef _get_include_dirs():\n return [\n str(_CSRC_DIR),\n str(_TP_INSTALL_DIR / 'include'),\n ]\n\n\ndef _get_library_dirs():\n return [\n str(_TP_INSTALL_DIR / 'lib'),\n str(_TP_INSTALL_DIR / 'lib64')\n ]\n\n\ndef _get_libraries():\n # NOTE: The order of the library listed bellow matters.\n #\n # For example, the symbol `sentencepiece::unigram::Model` is\n # defined in sentencepiece but UNDEFINED in sentencepiece_train.\n # GCC only remembers the last encountered symbol.\n # Therefore placing 'sentencepiece_train' after 'sentencepiece' cause runtime error.\n #\n # $ nm third_party/build/lib/libsentencepiece_train.a | grep _ZTIN13sentencepiece7unigram5ModelE\n # U _ZTIN13sentencepiece7unigram5ModelE\n # $ nm third_party/build/lib/libsentencepiece.a | grep _ZTIN13sentencepiece7unigram5ModelE\n # 0000000000000000 V _ZTIN13sentencepiece7unigram5ModelE\n return [\n 'sentencepiece_train',\n 'sentencepiece',\n 're2',\n 'double-conversion'\n ]\n\n\ndef _get_cxx11_abi():\n try:\n import torch\n value = int(torch._C._GLIBCXX_USE_CXX11_ABI)\n except ImportError:\n value = 0\n return '-D_GLIBCXX_USE_CXX11_ABI=' + str(value)\n\n\ndef _build_third_party(debug):\n build_dir = _TP_BASE_DIR / 'build'\n build_dir.mkdir(exist_ok=True)\n build_env = os.environ.copy()\n config = 'Debug' if debug else 'Release'\n if platform.system() == 'Windows':\n extra_args = [\n '-GNinja',\n ]\n build_env.setdefault('CC', 'cl')\n build_env.setdefault('CXX', 'cl')\n else:\n extra_args = ['-DCMAKE_CXX_FLAGS=-fPIC ' + _get_cxx11_abi()]\n subprocess.run(\n args=[\n 'cmake',\n '-DBUILD_SHARED_LIBS=OFF',\n '-DRE2_BUILD_TESTING=OFF',\n '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',\n f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',\n f'-DCMAKE_BUILD_TYPE={config}',\n ] + extra_args + ['..'],\n cwd=str(build_dir),\n check=True,\n env=build_env,\n )\n print('*** Command list Thirdparty ***')\n with open(build_dir / 'compile_commands.json', 'r') as fileobj:\n print(fileobj.read())\n print('running cmake --build', flush=True)\n subprocess.run(\n args=['cmake', '--build', '.', '--target', 'install', '--config', config],\n cwd=str(build_dir),\n check=True,\n env=build_env,\n )\n\n\ndef _build_sentence_piece(debug):\n build_dir = _TP_BASE_DIR / 'sentencepiece' / 'build'\n build_dir.mkdir(exist_ok=True)\n build_env = os.environ.copy()\n config = 'Debug' if debug else 'Release'\n if platform.system() == 'Windows':\n extra_args = ['-GNinja']\n build_env.setdefault('CC', 'cl')\n build_env.setdefault('CXX', 'cl')\n else:\n extra_args = []\n subprocess.run(\n args=['cmake', '-DSPM_ENABLE_SHARED=OFF', f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',\n '-DCMAKE_CXX_FLAGS=' + _get_cxx11_abi(),\n f'-DCMAKE_BUILD_TYPE={config}'] + extra_args + ['..'],\n cwd=str(build_dir),\n check=True,\n env=build_env,\n )\n subprocess.run(\n args=['cmake', '--build', '.', '--target', 'install', '--config', config],\n cwd=str(build_dir),\n check=True,\n env=build_env,\n )\n\n\ndef _configure_third_party(debug):\n _build_third_party(debug)\n _build_sentence_piece(debug)\n\n\n_EXT_NAME = 'torchtext._torchtext'\n\n\ndef get_ext_modules(debug=False):\n return [\n CppExtension(\n _EXT_NAME,\n _get_srcs(),\n libraries=_get_libraries(),\n include_dirs=_get_include_dirs(),\n library_dirs=_get_library_dirs(),\n extra_compile_args=_get_eca(debug),\n extra_link_args=_get_ela(debug),\n ),\n ]\n\n\nclass BuildExtension(TorchBuildExtension):\n def build_extension(self, ext):\n if ext.name == _EXT_NAME:\n _configure_third_party(self.debug)\n super().build_extension(ext)\n", "path": "build_tools/setup_helpers/extension.py"}], "after_files": [{"content": "import os\nimport platform\nimport subprocess\nfrom pathlib import Path\n\nfrom torch.utils.cpp_extension import (\n CppExtension,\n BuildExtension as TorchBuildExtension\n)\n\n__all__ = [\n 'get_ext_modules',\n 'BuildExtension',\n]\n\n_ROOT_DIR = Path(__file__).parent.parent.parent.resolve()\n_CSRC_DIR = _ROOT_DIR / 'torchtext' / 'csrc'\n_TP_BASE_DIR = _ROOT_DIR / 'third_party'\n_TP_INSTALL_DIR = _TP_BASE_DIR / 'build'\n\n\ndef _get_eca(debug):\n eca = []\n if platform.system() == \"Windows\":\n eca += ['/MT']\n if debug:\n eca += [\"-O0\", \"-g\"]\n else:\n if platform.system() == \"Windows\":\n eca += ['-O2']\n else:\n eca += [\"-O3\", \"-fvisibility=hidden\"]\n return eca\n\n\ndef _get_ela(debug):\n ela = []\n if debug:\n if platform.system() == \"Windows\":\n ela += [\"/DEBUG:FULL\"]\n else:\n ela += [\"-O0\", \"-g\"]\n else:\n if platform.system() != \"Windows\":\n ela += [\"-O3\"]\n return ela\n\n\ndef _get_srcs():\n return [str(p) for p in _CSRC_DIR.glob('**/*.cpp')]\n\n\ndef _get_include_dirs():\n return [\n str(_CSRC_DIR),\n str(_TP_INSTALL_DIR / 'include'),\n ]\n\n\ndef _get_library_dirs():\n return [\n str(_TP_INSTALL_DIR / 'lib'),\n str(_TP_INSTALL_DIR / 'lib64')\n ]\n\n\ndef _get_libraries():\n # NOTE: The order of the library listed bellow matters.\n #\n # For example, the symbol `sentencepiece::unigram::Model` is\n # defined in sentencepiece but UNDEFINED in sentencepiece_train.\n # GCC only remembers the last encountered symbol.\n # Therefore placing 'sentencepiece_train' after 'sentencepiece' cause runtime error.\n #\n # $ nm third_party/build/lib/libsentencepiece_train.a | grep _ZTIN13sentencepiece7unigram5ModelE\n # U _ZTIN13sentencepiece7unigram5ModelE\n # $ nm third_party/build/lib/libsentencepiece.a | grep _ZTIN13sentencepiece7unigram5ModelE\n # 0000000000000000 V _ZTIN13sentencepiece7unigram5ModelE\n return [\n 'sentencepiece_train',\n 'sentencepiece',\n 're2',\n 'double-conversion'\n ]\n\n\ndef _get_cxx11_abi():\n try:\n import torch\n value = int(torch._C._GLIBCXX_USE_CXX11_ABI)\n except ImportError:\n value = 0\n return '-D_GLIBCXX_USE_CXX11_ABI=' + str(value)\n\n\ndef _build_third_party(debug):\n build_dir = _TP_BASE_DIR / 'build'\n build_dir.mkdir(exist_ok=True)\n build_env = os.environ.copy()\n config = 'Debug' if debug else 'Release'\n if platform.system() == 'Windows':\n extra_args = [\n '-GNinja',\n ]\n build_env.setdefault('CC', 'cl')\n build_env.setdefault('CXX', 'cl')\n else:\n extra_args = ['-DCMAKE_CXX_FLAGS=-fPIC ' + _get_cxx11_abi()]\n subprocess.run(\n args=[\n 'cmake',\n '-DBUILD_SHARED_LIBS=OFF',\n '-DRE2_BUILD_TESTING=OFF',\n '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',\n f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',\n f'-DCMAKE_BUILD_TYPE={config}',\n '-DCMAKE_CXX_VISIBILITY_PRESET=hidden',\n '-DCMAKE_POLICY_DEFAULT_CMP0063=NEW',\n ] + extra_args + ['..'],\n cwd=str(build_dir),\n check=True,\n env=build_env,\n )\n print('*** Command list Thirdparty ***')\n with open(build_dir / 'compile_commands.json', 'r') as fileobj:\n print(fileobj.read())\n print('running cmake --build', flush=True)\n subprocess.run(\n args=['cmake', '--build', '.', '--target', 'install', '--config', config],\n cwd=str(build_dir),\n check=True,\n env=build_env,\n )\n\n\ndef _build_sentence_piece(debug):\n build_dir = _TP_BASE_DIR / 'sentencepiece' / 'build'\n build_dir.mkdir(exist_ok=True)\n build_env = os.environ.copy()\n config = 'Debug' if debug else 'Release'\n if platform.system() == 'Windows':\n extra_args = ['-GNinja']\n build_env.setdefault('CC', 'cl')\n build_env.setdefault('CXX', 'cl')\n else:\n extra_args = []\n subprocess.run(\n args=['cmake', '-DSPM_ENABLE_SHARED=OFF', f'-DCMAKE_INSTALL_PREFIX={_TP_INSTALL_DIR}',\n '-DCMAKE_CXX_VISIBILITY_PRESET=hidden',\n '-DCMAKE_CXX_FLAGS=' + _get_cxx11_abi(),\n '-DCMAKE_POLICY_DEFAULT_CMP0063=NEW',\n f'-DCMAKE_BUILD_TYPE={config}'] + extra_args + ['..'],\n\n cwd=str(build_dir),\n check=True,\n env=build_env,\n )\n subprocess.run(\n args=['cmake', '--build', '.', '--target', 'install', '--config', config],\n cwd=str(build_dir),\n check=True,\n env=build_env,\n )\n\n\ndef _configure_third_party(debug):\n _build_third_party(debug)\n _build_sentence_piece(debug)\n\n\n_EXT_NAME = 'torchtext._torchtext'\n\n\ndef get_ext_modules(debug=False):\n return [\n CppExtension(\n _EXT_NAME,\n _get_srcs(),\n libraries=_get_libraries(),\n include_dirs=_get_include_dirs(),\n library_dirs=_get_library_dirs(),\n extra_compile_args=_get_eca(debug),\n extra_link_args=_get_ela(debug),\n ),\n ]\n\n\nclass BuildExtension(TorchBuildExtension):\n def build_extension(self, ext):\n if ext.name == _EXT_NAME:\n _configure_third_party(self.debug)\n super().build_extension(ext)\n", "path": "build_tools/setup_helpers/extension.py"}]}
| 3,029 | 332 |
gh_patches_debug_26111
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-1200
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pascal training bug with CM
## 🐛 Bug description
```
File "./code/scripts/training.py", line 252, in log_cm
cm = cm_metric.compute().numpy()
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
2020-07-14 03:01:54,416 ignite.distributed.launcher.Parallel INFO: Finalized processing group with backend: 'nccl'
2020-07-14 03:01:54,417|training|ERROR|
```
## Environment
- PyTorch Version (e.g., 1.4):
- Ignite Version (e.g., 0.3.0):
- OS (e.g., Linux):
- How you installed Ignite (`conda`, `pip`, source):
- Python version:
- Any other relevant information:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/references/segmentation/pascal_voc2012/code/scripts/training.py`
Content:
```
1 # This a training script launched with py_config_runner
2 # It should obligatory contain `run(config, **kwargs)` method
3
4 from pathlib import Path
5 from collections.abc import Mapping
6
7 import torch
8
9 from apex import amp
10
11 import ignite
12 import ignite.distributed as idist
13 from ignite.contrib.engines import common
14 from ignite.engine import Engine, Events, create_supervised_evaluator
15 from ignite.handlers import DiskSaver
16 from ignite.metrics import ConfusionMatrix, IoU, mIoU
17 from ignite.utils import setup_logger
18
19 from py_config_runner.utils import set_seed
20 from py_config_runner.config_utils import get_params, TRAINVAL_CONFIG, assert_config
21
22 import sys
23
24 # Adds "code" folder to python path
25 sys.path.insert(0, Path(__file__).parent.parent.as_posix())
26
27 from utils.handlers import predictions_gt_images_handler
28 from utils import exp_tracking
29 from dataflow.datasets import VOCSegmentationOpencv
30
31
32 def initialize(config):
33
34 model = config.model.to(config.device)
35 optimizer = config.optimizer
36 # Setup Nvidia/Apex AMP
37 model, optimizer = amp.initialize(model, optimizer, opt_level=getattr(config, "fp16_opt_level", "O2"), num_losses=1)
38
39 # Adapt model to dist conf
40 model = idist.auto_model(model)
41
42 criterion = config.criterion.to(config.device)
43
44 return model, optimizer, criterion
45
46
47 def get_save_handler(config):
48 if exp_tracking.has_trains:
49 from ignite.contrib.handlers.trains_logger import TrainsSaver
50
51 return TrainsSaver(dirname=config.output_path.as_posix())
52
53 return DiskSaver(config.output_path.as_posix())
54
55
56 def create_trainer(model, optimizer, criterion, train_sampler, config, logger):
57 prepare_batch = config.prepare_batch
58 device = config.device
59
60 # Setup trainer
61 accumulation_steps = getattr(config, "accumulation_steps", 1)
62 model_output_transform = getattr(config, "model_output_transform", lambda x: x)
63
64 def train_update_function(engine, batch):
65
66 model.train()
67
68 x, y = prepare_batch(batch, device=device, non_blocking=True)
69 y_pred = model(x)
70 y_pred = model_output_transform(y_pred)
71 loss = criterion(y_pred, y)
72
73 if isinstance(loss, Mapping):
74 assert "supervised batch loss" in loss
75 loss_dict = loss
76 output = {k: v.item() for k, v in loss_dict.items()}
77 loss = loss_dict["supervised batch loss"] / accumulation_steps
78 else:
79 output = {"supervised batch loss": loss.item()}
80
81 with amp.scale_loss(loss, optimizer, loss_id=0) as scaled_loss:
82 scaled_loss.backward()
83
84 if engine.state.iteration % accumulation_steps == 0:
85 optimizer.step()
86 optimizer.zero_grad()
87
88 return output
89
90 output_names = getattr(config, "output_names", ["supervised batch loss",])
91 lr_scheduler = config.lr_scheduler
92
93 trainer = Engine(train_update_function)
94 trainer.logger = logger
95
96 to_save = {"model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler, "trainer": trainer, "amp": amp}
97
98 save_every_iters = getattr(config, "save_every_iters", 1000)
99
100 common.setup_common_training_handlers(
101 trainer,
102 train_sampler,
103 to_save=to_save,
104 save_every_iters=save_every_iters,
105 save_handler=get_save_handler(config),
106 lr_scheduler=lr_scheduler,
107 with_gpu_stats=exp_tracking.has_mlflow,
108 output_names=output_names,
109 with_pbars=False,
110 )
111
112 if idist.get_rank() == 0:
113 common.ProgressBar(persist=False).attach(trainer, metric_names="all")
114
115 return trainer
116
117
118 def create_evaluators(model, metrics, config):
119 model_output_transform = getattr(config, "model_output_transform", lambda x: x)
120
121 evaluator_args = dict(
122 model=model,
123 metrics=metrics,
124 device=config.device,
125 non_blocking=True,
126 prepare_batch=config.prepare_batch,
127 output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,),
128 )
129 train_evaluator = create_supervised_evaluator(**evaluator_args)
130 evaluator = create_supervised_evaluator(**evaluator_args)
131
132 if idist.get_rank() == 0:
133 common.ProgressBar(desc="Evaluation (train)", persist=False).attach(train_evaluator)
134 common.ProgressBar(desc="Evaluation (val)", persist=False).attach(evaluator)
135
136 return evaluator, train_evaluator
137
138
139 def log_metrics(logger, epoch, elapsed, tag, metrics):
140 logger.info(
141 "\nEpoch {} - Evaluation time (seconds): {} - {} metrics:\n {}".format(
142 epoch, int(elapsed), tag, "\n".join(["\t{}: {}".format(k, v) for k, v in metrics.items()])
143 )
144 )
145
146
147 def log_basic_info(logger, config):
148
149 msg = "\n- PyTorch version: {}".format(torch.__version__)
150 msg += "\n- Ignite version: {}".format(ignite.__version__)
151 msg += "\n- Cuda device name: {}".format(torch.cuda.get_device_name(idist.get_local_rank()))
152
153 logger.info(msg)
154
155 if idist.get_world_size() > 1:
156 msg = "\nDistributed setting:"
157 msg += "\tbackend: {}".format(idist.backend())
158 msg += "\trank: {}".format(idist.get_rank())
159 msg += "\tworld size: {}".format(idist.get_world_size())
160 logger.info(msg)
161
162
163 def training(local_rank, config, logger=None):
164
165 if not getattr(config, "use_fp16", True):
166 raise RuntimeError("This training script uses by default fp16 AMP")
167
168 torch.backends.cudnn.benchmark = True
169
170 set_seed(config.seed + local_rank)
171
172 train_loader, val_loader, train_eval_loader = config.train_loader, config.val_loader, config.train_eval_loader
173
174 # Setup model, optimizer, criterion
175 model, optimizer, criterion = initialize(config)
176
177 # Setup trainer for this specific task
178 trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger)
179
180 # Setup evaluators
181 num_classes = config.num_classes
182 cm_metric = ConfusionMatrix(num_classes=num_classes)
183
184 val_metrics = {
185 "IoU": IoU(cm_metric),
186 "mIoU_bg": mIoU(cm_metric),
187 }
188
189 if hasattr(config, "val_metrics") and isinstance(config.val_metrics, dict):
190 val_metrics.update(config.val_metrics)
191
192 evaluator, train_evaluator = create_evaluators(model, val_metrics, config)
193
194 @trainer.on(Events.EPOCH_COMPLETED(every=getattr(config, "val_interval", 1)) | Events.COMPLETED)
195 def run_validation():
196 epoch = trainer.state.epoch
197 state = train_evaluator.run(train_eval_loader)
198 log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
199 state = evaluator.run(val_loader)
200 log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
201
202 if getattr(config, "start_by_validation", False):
203 trainer.add_event_handler(Events.STARTED, run_validation)
204
205 score_metric_name = "mIoU_bg"
206
207 if hasattr(config, "es_patience"):
208 common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name)
209
210 # Store 3 best models by validation accuracy:
211 common.gen_save_best_models_by_val_score(
212 save_handler=get_save_handler(config),
213 evaluator=evaluator,
214 models=model,
215 metric_name=score_metric_name,
216 n_saved=3,
217 trainer=trainer,
218 tag="val",
219 )
220
221 if idist.get_rank() == 0:
222
223 tb_logger = common.setup_tb_logging(
224 config.output_path.as_posix(),
225 trainer,
226 optimizer,
227 evaluators={"training": train_evaluator, "validation": evaluator},
228 )
229
230 if not exp_tracking.has_trains:
231 exp_tracking_logger = exp_tracking.setup_logging(
232 trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator}
233 )
234
235 # Log val predictions:
236 tb_logger.attach(
237 evaluator,
238 log_handler=predictions_gt_images_handler(
239 img_denormalize_fn=config.img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation"
240 ),
241 event_name=Events.ITERATION_COMPLETED(once=len(val_loader) // 2),
242 )
243
244 # Log confusion matrix to Trains:
245 if exp_tracking.has_trains:
246 from trains import Task
247
248 trains_logger = Task.current_task().get_logger()
249
250 @trainer.on(Events.COMPLETED)
251 def log_cm():
252 cm = cm_metric.compute().numpy()
253 cm = cm / (cm.sum(axis=1)[:, None] + 1e-15)
254 trains_logger.report_confusion_matrix(
255 title="Final Confusion Matrix",
256 series="cm-preds-gt",
257 matrix=cm,
258 iteration=trainer.state.iteration,
259 xlabels=VOCSegmentationOpencv.target_names,
260 ylabels=VOCSegmentationOpencv.target_names,
261 )
262
263 trainer.run(train_loader, max_epochs=config.num_epochs)
264
265 if idist.get_rank() == 0:
266 tb_logger.close()
267 if not exp_tracking.has_trains:
268 exp_tracking_logger.close()
269
270
271 def run(config, **kwargs):
272 """This is the main method to run the training. As this training script is launched with `py_config_runner`
273 it should obligatory contain `run(config, **kwargs)` method.
274
275 """
276
277 assert torch.cuda.is_available(), torch.cuda.is_available()
278 assert torch.backends.cudnn.enabled, "Nvidia/Amp requires cudnn backend to be enabled."
279
280 with idist.Parallel(backend="nccl") as parallel:
281
282 logger = setup_logger(name="Pascal-VOC12 Training", distributed_rank=idist.get_rank())
283
284 assert_config(config, TRAINVAL_CONFIG)
285 # The following attributes are automatically added by py_config_runner
286 assert hasattr(config, "config_filepath") and isinstance(config.config_filepath, Path)
287 assert hasattr(config, "script_filepath") and isinstance(config.script_filepath, Path)
288
289 if idist.get_rank() == 0 and exp_tracking.has_trains:
290 from trains import Task
291
292 task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem)
293 task.connect_configuration(config.config_filepath.as_posix())
294
295 log_basic_info(logger, config)
296
297 config.output_path = Path(exp_tracking.get_output_path())
298 # dump python files to reproduce the run
299 exp_tracking.log_artifact(config.config_filepath.as_posix())
300 exp_tracking.log_artifact(config.script_filepath.as_posix())
301 exp_tracking.log_params(get_params(config, TRAINVAL_CONFIG))
302
303 try:
304 parallel.run(training, config, logger=logger)
305 except KeyboardInterrupt:
306 logger.info("Catched KeyboardInterrupt -> exit")
307 except Exception as e: # noqa
308 logger.exception("")
309 raise e
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/references/segmentation/pascal_voc2012/code/scripts/training.py b/examples/references/segmentation/pascal_voc2012/code/scripts/training.py
--- a/examples/references/segmentation/pascal_voc2012/code/scripts/training.py
+++ b/examples/references/segmentation/pascal_voc2012/code/scripts/training.py
@@ -179,7 +179,7 @@
# Setup evaluators
num_classes = config.num_classes
- cm_metric = ConfusionMatrix(num_classes=num_classes)
+ cm_metric = ConfusionMatrix(num_classes=num_classes, average="recall")
val_metrics = {
"IoU": IoU(cm_metric),
@@ -241,16 +241,17 @@
event_name=Events.ITERATION_COMPLETED(once=len(val_loader) // 2),
)
- # Log confusion matrix to Trains:
- if exp_tracking.has_trains:
- from trains import Task
+ # Log confusion matrix to Trains:
+ if exp_tracking.has_trains:
+
+ @trainer.on(Events.COMPLETED)
+ def compute_and_log_cm():
+ cm = cm_metric.compute().cpu().numpy()
- trains_logger = Task.current_task().get_logger()
+ if idist.get_rank() == 0:
+ from trains import Task
- @trainer.on(Events.COMPLETED)
- def log_cm():
- cm = cm_metric.compute().numpy()
- cm = cm / (cm.sum(axis=1)[:, None] + 1e-15)
+ trains_logger = Task.current_task().get_logger()
trains_logger.report_confusion_matrix(
title="Final Confusion Matrix",
series="cm-preds-gt",
|
{"golden_diff": "diff --git a/examples/references/segmentation/pascal_voc2012/code/scripts/training.py b/examples/references/segmentation/pascal_voc2012/code/scripts/training.py\n--- a/examples/references/segmentation/pascal_voc2012/code/scripts/training.py\n+++ b/examples/references/segmentation/pascal_voc2012/code/scripts/training.py\n@@ -179,7 +179,7 @@\n \n # Setup evaluators\n num_classes = config.num_classes\n- cm_metric = ConfusionMatrix(num_classes=num_classes)\n+ cm_metric = ConfusionMatrix(num_classes=num_classes, average=\"recall\")\n \n val_metrics = {\n \"IoU\": IoU(cm_metric),\n@@ -241,16 +241,17 @@\n event_name=Events.ITERATION_COMPLETED(once=len(val_loader) // 2),\n )\n \n- # Log confusion matrix to Trains:\n- if exp_tracking.has_trains:\n- from trains import Task\n+ # Log confusion matrix to Trains:\n+ if exp_tracking.has_trains:\n+\n+ @trainer.on(Events.COMPLETED)\n+ def compute_and_log_cm():\n+ cm = cm_metric.compute().cpu().numpy()\n \n- trains_logger = Task.current_task().get_logger()\n+ if idist.get_rank() == 0:\n+ from trains import Task\n \n- @trainer.on(Events.COMPLETED)\n- def log_cm():\n- cm = cm_metric.compute().numpy()\n- cm = cm / (cm.sum(axis=1)[:, None] + 1e-15)\n+ trains_logger = Task.current_task().get_logger()\n trains_logger.report_confusion_matrix(\n title=\"Final Confusion Matrix\",\n series=\"cm-preds-gt\",\n", "issue": "Pascal training bug with CM\n## \ud83d\udc1b Bug description\r\n\r\n```\r\n File \"./code/scripts/training.py\", line 252, in log_cm\r\ncm = cm_metric.compute().numpy()\r\nTypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.\r\n2020-07-14 03:01:54,416 ignite.distributed.launcher.Parallel INFO: Finalized processing group with backend: 'nccl'\r\n2020-07-14 03:01:54,417|training|ERROR|\r\n```\r\n\r\n## Environment\r\n\r\n - PyTorch Version (e.g., 1.4):\r\n - Ignite Version (e.g., 0.3.0):\r\n - OS (e.g., Linux):\r\n - How you installed Ignite (`conda`, `pip`, source):\r\n - Python version:\r\n - Any other relevant information:\r\n\n", "before_files": [{"content": "# This a training script launched with py_config_runner\n# It should obligatory contain `run(config, **kwargs)` method\n\nfrom pathlib import Path\nfrom collections.abc import Mapping\n\nimport torch\n\nfrom apex import amp\n\nimport ignite\nimport ignite.distributed as idist\nfrom ignite.contrib.engines import common\nfrom ignite.engine import Engine, Events, create_supervised_evaluator\nfrom ignite.handlers import DiskSaver\nfrom ignite.metrics import ConfusionMatrix, IoU, mIoU\nfrom ignite.utils import setup_logger\n\nfrom py_config_runner.utils import set_seed\nfrom py_config_runner.config_utils import get_params, TRAINVAL_CONFIG, assert_config\n\nimport sys\n\n# Adds \"code\" folder to python path\nsys.path.insert(0, Path(__file__).parent.parent.as_posix())\n\nfrom utils.handlers import predictions_gt_images_handler\nfrom utils import exp_tracking\nfrom dataflow.datasets import VOCSegmentationOpencv\n\n\ndef initialize(config):\n\n model = config.model.to(config.device)\n optimizer = config.optimizer\n # Setup Nvidia/Apex AMP\n model, optimizer = amp.initialize(model, optimizer, opt_level=getattr(config, \"fp16_opt_level\", \"O2\"), num_losses=1)\n\n # Adapt model to dist conf\n model = idist.auto_model(model)\n\n criterion = config.criterion.to(config.device)\n\n return model, optimizer, criterion\n\n\ndef get_save_handler(config):\n if exp_tracking.has_trains:\n from ignite.contrib.handlers.trains_logger import TrainsSaver\n\n return TrainsSaver(dirname=config.output_path.as_posix())\n\n return DiskSaver(config.output_path.as_posix())\n\n\ndef create_trainer(model, optimizer, criterion, train_sampler, config, logger):\n prepare_batch = config.prepare_batch\n device = config.device\n\n # Setup trainer\n accumulation_steps = getattr(config, \"accumulation_steps\", 1)\n model_output_transform = getattr(config, \"model_output_transform\", lambda x: x)\n\n def train_update_function(engine, batch):\n\n model.train()\n\n x, y = prepare_batch(batch, device=device, non_blocking=True)\n y_pred = model(x)\n y_pred = model_output_transform(y_pred)\n loss = criterion(y_pred, y)\n\n if isinstance(loss, Mapping):\n assert \"supervised batch loss\" in loss\n loss_dict = loss\n output = {k: v.item() for k, v in loss_dict.items()}\n loss = loss_dict[\"supervised batch loss\"] / accumulation_steps\n else:\n output = {\"supervised batch loss\": loss.item()}\n\n with amp.scale_loss(loss, optimizer, loss_id=0) as scaled_loss:\n scaled_loss.backward()\n\n if engine.state.iteration % accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n return output\n\n output_names = getattr(config, \"output_names\", [\"supervised batch loss\",])\n lr_scheduler = config.lr_scheduler\n\n trainer = Engine(train_update_function)\n trainer.logger = logger\n\n to_save = {\"model\": model, \"optimizer\": optimizer, \"lr_scheduler\": lr_scheduler, \"trainer\": trainer, \"amp\": amp}\n\n save_every_iters = getattr(config, \"save_every_iters\", 1000)\n\n common.setup_common_training_handlers(\n trainer,\n train_sampler,\n to_save=to_save,\n save_every_iters=save_every_iters,\n save_handler=get_save_handler(config),\n lr_scheduler=lr_scheduler,\n with_gpu_stats=exp_tracking.has_mlflow,\n output_names=output_names,\n with_pbars=False,\n )\n\n if idist.get_rank() == 0:\n common.ProgressBar(persist=False).attach(trainer, metric_names=\"all\")\n\n return trainer\n\n\ndef create_evaluators(model, metrics, config):\n model_output_transform = getattr(config, \"model_output_transform\", lambda x: x)\n\n evaluator_args = dict(\n model=model,\n metrics=metrics,\n device=config.device,\n non_blocking=True,\n prepare_batch=config.prepare_batch,\n output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,),\n )\n train_evaluator = create_supervised_evaluator(**evaluator_args)\n evaluator = create_supervised_evaluator(**evaluator_args)\n\n if idist.get_rank() == 0:\n common.ProgressBar(desc=\"Evaluation (train)\", persist=False).attach(train_evaluator)\n common.ProgressBar(desc=\"Evaluation (val)\", persist=False).attach(evaluator)\n\n return evaluator, train_evaluator\n\n\ndef log_metrics(logger, epoch, elapsed, tag, metrics):\n logger.info(\n \"\\nEpoch {} - Evaluation time (seconds): {} - {} metrics:\\n {}\".format(\n epoch, int(elapsed), tag, \"\\n\".join([\"\\t{}: {}\".format(k, v) for k, v in metrics.items()])\n )\n )\n\n\ndef log_basic_info(logger, config):\n\n msg = \"\\n- PyTorch version: {}\".format(torch.__version__)\n msg += \"\\n- Ignite version: {}\".format(ignite.__version__)\n msg += \"\\n- Cuda device name: {}\".format(torch.cuda.get_device_name(idist.get_local_rank()))\n\n logger.info(msg)\n\n if idist.get_world_size() > 1:\n msg = \"\\nDistributed setting:\"\n msg += \"\\tbackend: {}\".format(idist.backend())\n msg += \"\\trank: {}\".format(idist.get_rank())\n msg += \"\\tworld size: {}\".format(idist.get_world_size())\n logger.info(msg)\n\n\ndef training(local_rank, config, logger=None):\n\n if not getattr(config, \"use_fp16\", True):\n raise RuntimeError(\"This training script uses by default fp16 AMP\")\n\n torch.backends.cudnn.benchmark = True\n\n set_seed(config.seed + local_rank)\n\n train_loader, val_loader, train_eval_loader = config.train_loader, config.val_loader, config.train_eval_loader\n\n # Setup model, optimizer, criterion\n model, optimizer, criterion = initialize(config)\n\n # Setup trainer for this specific task\n trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger)\n\n # Setup evaluators\n num_classes = config.num_classes\n cm_metric = ConfusionMatrix(num_classes=num_classes)\n\n val_metrics = {\n \"IoU\": IoU(cm_metric),\n \"mIoU_bg\": mIoU(cm_metric),\n }\n\n if hasattr(config, \"val_metrics\") and isinstance(config.val_metrics, dict):\n val_metrics.update(config.val_metrics)\n\n evaluator, train_evaluator = create_evaluators(model, val_metrics, config)\n\n @trainer.on(Events.EPOCH_COMPLETED(every=getattr(config, \"val_interval\", 1)) | Events.COMPLETED)\n def run_validation():\n epoch = trainer.state.epoch\n state = train_evaluator.run(train_eval_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Train\", state.metrics)\n state = evaluator.run(val_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Test\", state.metrics)\n\n if getattr(config, \"start_by_validation\", False):\n trainer.add_event_handler(Events.STARTED, run_validation)\n\n score_metric_name = \"mIoU_bg\"\n\n if hasattr(config, \"es_patience\"):\n common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name)\n\n # Store 3 best models by validation accuracy:\n common.gen_save_best_models_by_val_score(\n save_handler=get_save_handler(config),\n evaluator=evaluator,\n models=model,\n metric_name=score_metric_name,\n n_saved=3,\n trainer=trainer,\n tag=\"val\",\n )\n\n if idist.get_rank() == 0:\n\n tb_logger = common.setup_tb_logging(\n config.output_path.as_posix(),\n trainer,\n optimizer,\n evaluators={\"training\": train_evaluator, \"validation\": evaluator},\n )\n\n if not exp_tracking.has_trains:\n exp_tracking_logger = exp_tracking.setup_logging(\n trainer, optimizer, evaluators={\"training\": train_evaluator, \"validation\": evaluator}\n )\n\n # Log val predictions:\n tb_logger.attach(\n evaluator,\n log_handler=predictions_gt_images_handler(\n img_denormalize_fn=config.img_denormalize, n_images=15, another_engine=trainer, prefix_tag=\"validation\"\n ),\n event_name=Events.ITERATION_COMPLETED(once=len(val_loader) // 2),\n )\n\n # Log confusion matrix to Trains:\n if exp_tracking.has_trains:\n from trains import Task\n\n trains_logger = Task.current_task().get_logger()\n\n @trainer.on(Events.COMPLETED)\n def log_cm():\n cm = cm_metric.compute().numpy()\n cm = cm / (cm.sum(axis=1)[:, None] + 1e-15)\n trains_logger.report_confusion_matrix(\n title=\"Final Confusion Matrix\",\n series=\"cm-preds-gt\",\n matrix=cm,\n iteration=trainer.state.iteration,\n xlabels=VOCSegmentationOpencv.target_names,\n ylabels=VOCSegmentationOpencv.target_names,\n )\n\n trainer.run(train_loader, max_epochs=config.num_epochs)\n\n if idist.get_rank() == 0:\n tb_logger.close()\n if not exp_tracking.has_trains:\n exp_tracking_logger.close()\n\n\ndef run(config, **kwargs):\n \"\"\"This is the main method to run the training. As this training script is launched with `py_config_runner`\n it should obligatory contain `run(config, **kwargs)` method.\n\n \"\"\"\n\n assert torch.cuda.is_available(), torch.cuda.is_available()\n assert torch.backends.cudnn.enabled, \"Nvidia/Amp requires cudnn backend to be enabled.\"\n\n with idist.Parallel(backend=\"nccl\") as parallel:\n\n logger = setup_logger(name=\"Pascal-VOC12 Training\", distributed_rank=idist.get_rank())\n\n assert_config(config, TRAINVAL_CONFIG)\n # The following attributes are automatically added by py_config_runner\n assert hasattr(config, \"config_filepath\") and isinstance(config.config_filepath, Path)\n assert hasattr(config, \"script_filepath\") and isinstance(config.script_filepath, Path)\n\n if idist.get_rank() == 0 and exp_tracking.has_trains:\n from trains import Task\n\n task = Task.init(\"Pascal-VOC12 Training\", config.config_filepath.stem)\n task.connect_configuration(config.config_filepath.as_posix())\n\n log_basic_info(logger, config)\n\n config.output_path = Path(exp_tracking.get_output_path())\n # dump python files to reproduce the run\n exp_tracking.log_artifact(config.config_filepath.as_posix())\n exp_tracking.log_artifact(config.script_filepath.as_posix())\n exp_tracking.log_params(get_params(config, TRAINVAL_CONFIG))\n\n try:\n parallel.run(training, config, logger=logger)\n except KeyboardInterrupt:\n logger.info(\"Catched KeyboardInterrupt -> exit\")\n except Exception as e: # noqa\n logger.exception(\"\")\n raise e\n", "path": "examples/references/segmentation/pascal_voc2012/code/scripts/training.py"}], "after_files": [{"content": "# This a training script launched with py_config_runner\n# It should obligatory contain `run(config, **kwargs)` method\n\nfrom pathlib import Path\nfrom collections.abc import Mapping\n\nimport torch\n\nfrom apex import amp\n\nimport ignite\nimport ignite.distributed as idist\nfrom ignite.contrib.engines import common\nfrom ignite.engine import Engine, Events, create_supervised_evaluator\nfrom ignite.handlers import DiskSaver\nfrom ignite.metrics import ConfusionMatrix, IoU, mIoU\nfrom ignite.utils import setup_logger\n\nfrom py_config_runner.utils import set_seed\nfrom py_config_runner.config_utils import get_params, TRAINVAL_CONFIG, assert_config\n\nimport sys\n\n# Adds \"code\" folder to python path\nsys.path.insert(0, Path(__file__).parent.parent.as_posix())\n\nfrom utils.handlers import predictions_gt_images_handler\nfrom utils import exp_tracking\nfrom dataflow.datasets import VOCSegmentationOpencv\n\n\ndef initialize(config):\n\n model = config.model.to(config.device)\n optimizer = config.optimizer\n # Setup Nvidia/Apex AMP\n model, optimizer = amp.initialize(model, optimizer, opt_level=getattr(config, \"fp16_opt_level\", \"O2\"), num_losses=1)\n\n # Adapt model to dist conf\n model = idist.auto_model(model)\n\n criterion = config.criterion.to(config.device)\n\n return model, optimizer, criterion\n\n\ndef get_save_handler(config):\n if exp_tracking.has_trains:\n from ignite.contrib.handlers.trains_logger import TrainsSaver\n\n return TrainsSaver(dirname=config.output_path.as_posix())\n\n return DiskSaver(config.output_path.as_posix())\n\n\ndef create_trainer(model, optimizer, criterion, train_sampler, config, logger):\n prepare_batch = config.prepare_batch\n device = config.device\n\n # Setup trainer\n accumulation_steps = getattr(config, \"accumulation_steps\", 1)\n model_output_transform = getattr(config, \"model_output_transform\", lambda x: x)\n\n def train_update_function(engine, batch):\n\n model.train()\n\n x, y = prepare_batch(batch, device=device, non_blocking=True)\n y_pred = model(x)\n y_pred = model_output_transform(y_pred)\n loss = criterion(y_pred, y)\n\n if isinstance(loss, Mapping):\n assert \"supervised batch loss\" in loss\n loss_dict = loss\n output = {k: v.item() for k, v in loss_dict.items()}\n loss = loss_dict[\"supervised batch loss\"] / accumulation_steps\n else:\n output = {\"supervised batch loss\": loss.item()}\n\n with amp.scale_loss(loss, optimizer, loss_id=0) as scaled_loss:\n scaled_loss.backward()\n\n if engine.state.iteration % accumulation_steps == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n return output\n\n output_names = getattr(config, \"output_names\", [\"supervised batch loss\",])\n lr_scheduler = config.lr_scheduler\n\n trainer = Engine(train_update_function)\n trainer.logger = logger\n\n to_save = {\"model\": model, \"optimizer\": optimizer, \"lr_scheduler\": lr_scheduler, \"trainer\": trainer, \"amp\": amp}\n\n save_every_iters = getattr(config, \"save_every_iters\", 1000)\n\n common.setup_common_training_handlers(\n trainer,\n train_sampler,\n to_save=to_save,\n save_every_iters=save_every_iters,\n save_handler=get_save_handler(config),\n lr_scheduler=lr_scheduler,\n with_gpu_stats=exp_tracking.has_mlflow,\n output_names=output_names,\n with_pbars=False,\n )\n\n if idist.get_rank() == 0:\n common.ProgressBar(persist=False).attach(trainer, metric_names=\"all\")\n\n return trainer\n\n\ndef create_evaluators(model, metrics, config):\n model_output_transform = getattr(config, \"model_output_transform\", lambda x: x)\n\n evaluator_args = dict(\n model=model,\n metrics=metrics,\n device=config.device,\n non_blocking=True,\n prepare_batch=config.prepare_batch,\n output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,),\n )\n train_evaluator = create_supervised_evaluator(**evaluator_args)\n evaluator = create_supervised_evaluator(**evaluator_args)\n\n if idist.get_rank() == 0:\n common.ProgressBar(desc=\"Evaluation (train)\", persist=False).attach(train_evaluator)\n common.ProgressBar(desc=\"Evaluation (val)\", persist=False).attach(evaluator)\n\n return evaluator, train_evaluator\n\n\ndef log_metrics(logger, epoch, elapsed, tag, metrics):\n logger.info(\n \"\\nEpoch {} - Evaluation time (seconds): {} - {} metrics:\\n {}\".format(\n epoch, int(elapsed), tag, \"\\n\".join([\"\\t{}: {}\".format(k, v) for k, v in metrics.items()])\n )\n )\n\n\ndef log_basic_info(logger, config):\n\n msg = \"\\n- PyTorch version: {}\".format(torch.__version__)\n msg += \"\\n- Ignite version: {}\".format(ignite.__version__)\n msg += \"\\n- Cuda device name: {}\".format(torch.cuda.get_device_name(idist.get_local_rank()))\n\n logger.info(msg)\n\n if idist.get_world_size() > 1:\n msg = \"\\nDistributed setting:\"\n msg += \"\\tbackend: {}\".format(idist.backend())\n msg += \"\\trank: {}\".format(idist.get_rank())\n msg += \"\\tworld size: {}\".format(idist.get_world_size())\n logger.info(msg)\n\n\ndef training(local_rank, config, logger=None):\n\n if not getattr(config, \"use_fp16\", True):\n raise RuntimeError(\"This training script uses by default fp16 AMP\")\n\n torch.backends.cudnn.benchmark = True\n\n set_seed(config.seed + local_rank)\n\n train_loader, val_loader, train_eval_loader = config.train_loader, config.val_loader, config.train_eval_loader\n\n # Setup model, optimizer, criterion\n model, optimizer, criterion = initialize(config)\n\n # Setup trainer for this specific task\n trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger)\n\n # Setup evaluators\n num_classes = config.num_classes\n cm_metric = ConfusionMatrix(num_classes=num_classes, average=\"recall\")\n\n val_metrics = {\n \"IoU\": IoU(cm_metric),\n \"mIoU_bg\": mIoU(cm_metric),\n }\n\n if hasattr(config, \"val_metrics\") and isinstance(config.val_metrics, dict):\n val_metrics.update(config.val_metrics)\n\n evaluator, train_evaluator = create_evaluators(model, val_metrics, config)\n\n @trainer.on(Events.EPOCH_COMPLETED(every=getattr(config, \"val_interval\", 1)) | Events.COMPLETED)\n def run_validation():\n epoch = trainer.state.epoch\n state = train_evaluator.run(train_eval_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Train\", state.metrics)\n state = evaluator.run(val_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Test\", state.metrics)\n\n if getattr(config, \"start_by_validation\", False):\n trainer.add_event_handler(Events.STARTED, run_validation)\n\n score_metric_name = \"mIoU_bg\"\n\n if hasattr(config, \"es_patience\"):\n common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name)\n\n # Store 3 best models by validation accuracy:\n common.gen_save_best_models_by_val_score(\n save_handler=get_save_handler(config),\n evaluator=evaluator,\n models=model,\n metric_name=score_metric_name,\n n_saved=3,\n trainer=trainer,\n tag=\"val\",\n )\n\n if idist.get_rank() == 0:\n\n tb_logger = common.setup_tb_logging(\n config.output_path.as_posix(),\n trainer,\n optimizer,\n evaluators={\"training\": train_evaluator, \"validation\": evaluator},\n )\n\n if not exp_tracking.has_trains:\n exp_tracking_logger = exp_tracking.setup_logging(\n trainer, optimizer, evaluators={\"training\": train_evaluator, \"validation\": evaluator}\n )\n\n # Log val predictions:\n tb_logger.attach(\n evaluator,\n log_handler=predictions_gt_images_handler(\n img_denormalize_fn=config.img_denormalize, n_images=15, another_engine=trainer, prefix_tag=\"validation\"\n ),\n event_name=Events.ITERATION_COMPLETED(once=len(val_loader) // 2),\n )\n\n # Log confusion matrix to Trains:\n if exp_tracking.has_trains:\n\n @trainer.on(Events.COMPLETED)\n def compute_and_log_cm():\n cm = cm_metric.compute().cpu().numpy()\n\n if idist.get_rank() == 0:\n from trains import Task\n\n trains_logger = Task.current_task().get_logger()\n trains_logger.report_confusion_matrix(\n title=\"Final Confusion Matrix\",\n series=\"cm-preds-gt\",\n matrix=cm,\n iteration=trainer.state.iteration,\n xlabels=VOCSegmentationOpencv.target_names,\n ylabels=VOCSegmentationOpencv.target_names,\n )\n\n trainer.run(train_loader, max_epochs=config.num_epochs)\n\n if idist.get_rank() == 0:\n tb_logger.close()\n if not exp_tracking.has_trains:\n exp_tracking_logger.close()\n\n\ndef run(config, **kwargs):\n \"\"\"This is the main method to run the training. As this training script is launched with `py_config_runner`\n it should obligatory contain `run(config, **kwargs)` method.\n\n \"\"\"\n\n assert torch.cuda.is_available(), torch.cuda.is_available()\n assert torch.backends.cudnn.enabled, \"Nvidia/Amp requires cudnn backend to be enabled.\"\n\n with idist.Parallel(backend=\"nccl\") as parallel:\n\n logger = setup_logger(name=\"Pascal-VOC12 Training\", distributed_rank=idist.get_rank())\n\n assert_config(config, TRAINVAL_CONFIG)\n # The following attributes are automatically added by py_config_runner\n assert hasattr(config, \"config_filepath\") and isinstance(config.config_filepath, Path)\n assert hasattr(config, \"script_filepath\") and isinstance(config.script_filepath, Path)\n\n if idist.get_rank() == 0 and exp_tracking.has_trains:\n from trains import Task\n\n task = Task.init(\"Pascal-VOC12 Training\", config.config_filepath.stem)\n task.connect_configuration(config.config_filepath.as_posix())\n\n log_basic_info(logger, config)\n\n config.output_path = Path(exp_tracking.get_output_path())\n # dump python files to reproduce the run\n exp_tracking.log_artifact(config.config_filepath.as_posix())\n exp_tracking.log_artifact(config.script_filepath.as_posix())\n exp_tracking.log_params(get_params(config, TRAINVAL_CONFIG))\n\n try:\n parallel.run(training, config, logger=logger)\n except KeyboardInterrupt:\n logger.info(\"Catched KeyboardInterrupt -> exit\")\n except Exception as e: # noqa\n logger.exception(\"\")\n raise e\n", "path": "examples/references/segmentation/pascal_voc2012/code/scripts/training.py"}]}
| 3,701 | 396 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.