problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_29339 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-486 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
celery 4.1 worker tasks aren't being traced with ddtrace-py 0.11.1
```
ddtrace==0.11.1
celery==4.1.0
```
I'm manually patching celery due to #423
I'm getting traces for 'defaultdb' (sqlite), 'redis', and 'postgres' when I run the celery worker
However, I'm not receiving any traces whatsoever for the task runs, and no 'celery' service appears in datadog.
Here's a gist with my worker startup in debug logging mode:
https://gist.github.com/eedwards-sk/f924548c043859901db6918aec95dada
I'm sending traces to a local running agent and I know that's working because I'm seeing the flush records in the logs, and I get traces for the above mentioned services when I'm running it.
(We have another application that is using celery 3.x and their traces show up for tasks, which we instrumented in more or less the same way)
Here's a gist showing how we're instantiating celery and the tracer:
https://gist.github.com/eedwards-sk/fe81f62406e7a245b7c26a9ada19c658
</issue>
<code>
[start of ddtrace/contrib/celery/task.py]
1 # Third party
2 import wrapt
3 import inspect
4 import celery
5
6 # Project
7 from ddtrace import Pin
8 from ddtrace.ext import AppTypes
9 from ...ext import errors
10 from .util import APP, PRODUCER_SERVICE, WORKER_SERVICE, meta_from_context, require_pin
11
12 PRODUCER_ROOT_SPAN = 'celery.apply'
13 WORKER_ROOT_SPAN = 'celery.run'
14 # Task operations
15 TASK_TAG_KEY = 'celery.action'
16 TASK_APPLY = 'apply'
17 TASK_APPLY_ASYNC = 'apply_async'
18 TASK_RUN = 'run'
19
20
21 def patch_task(task, pin=None):
22 """ patch_task will add tracing to a celery task """
23 # The service set here is actually ignored, because it's not possible to
24 # be certain whether this process is being used as a worker, a producer,
25 # or both. So the service as recorded in traces is set based on the actual
26 # work being done (ie. apply/apply_async vs run).
27 pin = pin or Pin(service=WORKER_SERVICE, app=APP, app_type=AppTypes.worker)
28
29 patch_methods = [
30 ('__init__', _task_init),
31 ('run', _task_run),
32 ('apply', _task_apply),
33 ('apply_async', _task_apply_async),
34 ]
35 for method_name, wrapper in patch_methods:
36 # Get original method
37 method = getattr(task, method_name, None)
38 if method is None:
39 continue
40
41 # Do not patch if method is already patched
42 if isinstance(method, wrapt.ObjectProxy):
43 continue
44
45 # If the function as been applied as a decorator for v1 Celery tasks, then a different patching is needed
46 if inspect.isclass(task) and issubclass(task, celery.task.Task):
47 wrapped = wrapt.FunctionWrapper(method, wrapper)
48 setattr(task, method_name, wrapped)
49 continue
50 # Patch method
51 # DEV: Using `BoundFunctionWrapper` ensures our `task` wrapper parameter is properly set
52 setattr(task, method_name, wrapt.BoundFunctionWrapper(method, task, wrapper))
53
54 # Attach our pin to the app
55 pin.onto(task)
56 return task
57
58 def unpatch_task(task):
59 """ unpatch_task will remove tracing from a celery task """
60 patched_methods = [
61 '__init__',
62 'run',
63 'apply',
64 'apply_async',
65 ]
66 for method_name in patched_methods:
67 # Get wrapped method
68 wrapper = getattr(task, method_name, None)
69 if wrapper is None:
70 continue
71
72 # Only unpatch if wrapper is an `ObjectProxy`
73 if not isinstance(wrapper, wrapt.ObjectProxy):
74 continue
75
76 # Restore original method
77 setattr(task, method_name, wrapper.__wrapped__)
78
79 return task
80
81
82 def _task_init(func, task, args, kwargs):
83 func(*args, **kwargs)
84
85 # Patch this task if our pin is enabled
86 pin = Pin.get_from(task)
87 if pin and pin.enabled():
88 patch_task(task, pin=pin)
89
90
91 @require_pin
92 def _task_run(pin, func, task, args, kwargs):
93 with pin.tracer.trace(WORKER_ROOT_SPAN, service=WORKER_SERVICE, resource=task.name) as span:
94 # Set meta data from task request
95 span.set_metas(meta_from_context(task.request))
96 span.set_meta(TASK_TAG_KEY, TASK_RUN)
97
98 # Call original `run` function
99 return func(*args, **kwargs)
100
101
102 @require_pin
103 def _task_apply(pin, func, task, args, kwargs):
104 with pin.tracer.trace(PRODUCER_ROOT_SPAN, service=PRODUCER_SERVICE, resource=task.name) as span:
105 # Call the original `apply` function
106 res = func(*args, **kwargs)
107
108 # Set meta data from response
109 span.set_meta('id', res.id)
110 span.set_meta('state', res.state)
111 span.set_meta(TASK_TAG_KEY, TASK_APPLY)
112 if res.traceback:
113 span.error = 1
114 span.set_meta(errors.STACK, res.traceback)
115 return res
116
117
118 @require_pin
119 def _task_apply_async(pin, func, task, args, kwargs):
120 with pin.tracer.trace(PRODUCER_ROOT_SPAN, service=PRODUCER_SERVICE, resource=task.name) as span:
121 # Extract meta data from `kwargs`
122 meta_keys = (
123 'compression', 'countdown', 'eta', 'exchange', 'expires',
124 'priority', 'routing_key', 'serializer', 'queue',
125 )
126 for name in meta_keys:
127 if name in kwargs:
128 span.set_meta(name, kwargs[name])
129 span.set_meta(TASK_TAG_KEY, TASK_APPLY_ASYNC)
130
131 # Call the original `apply_async` function
132 res = func(*args, **kwargs)
133
134 # Set meta data from response
135 # DEV: Calling `res.traceback` or `res.state` will make an
136 # API call to the backend for the properties
137 span.set_meta('id', res.id)
138 return res
139
[end of ddtrace/contrib/celery/task.py]
[start of ddtrace/contrib/celery/patch.py]
1 import celery
2
3 from wrapt import wrap_function_wrapper as _w
4
5 from .app import patch_app, unpatch_app
6 from .registry import _wrap_register
7 from ...utils.wrappers import unwrap as _u
8
9
10 def patch():
11 """Instrument Celery base application and the `TaskRegistry` so
12 that any new registered task is automatically instrumented
13 """
14 setattr(celery, 'Celery', patch_app(celery.Celery))
15 _w('celery.app.registry', 'TaskRegistry.register', _wrap_register)
16
17
18 def unpatch():
19 """Removes instrumentation from Celery"""
20 setattr(celery, 'Celery', unpatch_app(celery.Celery))
21 _u(celery.app.registry.TaskRegistry, 'register')
22
[end of ddtrace/contrib/celery/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/celery/patch.py b/ddtrace/contrib/celery/patch.py
--- a/ddtrace/contrib/celery/patch.py
+++ b/ddtrace/contrib/celery/patch.py
@@ -3,19 +3,24 @@
from wrapt import wrap_function_wrapper as _w
from .app import patch_app, unpatch_app
+from .task import _wrap_shared_task
from .registry import _wrap_register
from ...utils.wrappers import unwrap as _u
def patch():
"""Instrument Celery base application and the `TaskRegistry` so
- that any new registered task is automatically instrumented
+ that any new registered task is automatically instrumented. In the
+ case of Django-Celery integration, also the `@shared_task` decorator
+ must be instrumented because Django doesn't use the Celery registry.
"""
setattr(celery, 'Celery', patch_app(celery.Celery))
_w('celery.app.registry', 'TaskRegistry.register', _wrap_register)
+ _w('celery', 'shared_task', _wrap_shared_task)
def unpatch():
"""Removes instrumentation from Celery"""
setattr(celery, 'Celery', unpatch_app(celery.Celery))
_u(celery.app.registry.TaskRegistry, 'register')
+ _u(celery, 'shared_task')
diff --git a/ddtrace/contrib/celery/task.py b/ddtrace/contrib/celery/task.py
--- a/ddtrace/contrib/celery/task.py
+++ b/ddtrace/contrib/celery/task.py
@@ -79,6 +79,14 @@
return task
+def _wrap_shared_task(decorator, instance, args, kwargs):
+ """Wrapper for Django-Celery shared tasks. `shared_task` is a decorator
+ that returns a `Task` from the given function.
+ """
+ task = decorator(*args, **kwargs)
+ return patch_task(task)
+
+
def _task_init(func, task, args, kwargs):
func(*args, **kwargs)
| {"golden_diff": "diff --git a/ddtrace/contrib/celery/patch.py b/ddtrace/contrib/celery/patch.py\n--- a/ddtrace/contrib/celery/patch.py\n+++ b/ddtrace/contrib/celery/patch.py\n@@ -3,19 +3,24 @@\n from wrapt import wrap_function_wrapper as _w\n \n from .app import patch_app, unpatch_app\n+from .task import _wrap_shared_task\n from .registry import _wrap_register\n from ...utils.wrappers import unwrap as _u\n \n \n def patch():\n \"\"\"Instrument Celery base application and the `TaskRegistry` so\n- that any new registered task is automatically instrumented\n+ that any new registered task is automatically instrumented. In the\n+ case of Django-Celery integration, also the `@shared_task` decorator\n+ must be instrumented because Django doesn't use the Celery registry.\n \"\"\"\n setattr(celery, 'Celery', patch_app(celery.Celery))\n _w('celery.app.registry', 'TaskRegistry.register', _wrap_register)\n+ _w('celery', 'shared_task', _wrap_shared_task)\n \n \n def unpatch():\n \"\"\"Removes instrumentation from Celery\"\"\"\n setattr(celery, 'Celery', unpatch_app(celery.Celery))\n _u(celery.app.registry.TaskRegistry, 'register')\n+ _u(celery, 'shared_task')\ndiff --git a/ddtrace/contrib/celery/task.py b/ddtrace/contrib/celery/task.py\n--- a/ddtrace/contrib/celery/task.py\n+++ b/ddtrace/contrib/celery/task.py\n@@ -79,6 +79,14 @@\n return task\n \n \n+def _wrap_shared_task(decorator, instance, args, kwargs):\n+ \"\"\"Wrapper for Django-Celery shared tasks. `shared_task` is a decorator\n+ that returns a `Task` from the given function.\n+ \"\"\"\n+ task = decorator(*args, **kwargs)\n+ return patch_task(task)\n+\n+\n def _task_init(func, task, args, kwargs):\n func(*args, **kwargs)\n", "issue": "celery 4.1 worker tasks aren't being traced with ddtrace-py 0.11.1\n```\r\nddtrace==0.11.1\r\ncelery==4.1.0\r\n```\r\n\r\nI'm manually patching celery due to #423 \r\n\r\nI'm getting traces for 'defaultdb' (sqlite), 'redis', and 'postgres' when I run the celery worker\r\n\r\nHowever, I'm not receiving any traces whatsoever for the task runs, and no 'celery' service appears in datadog.\r\n\r\nHere's a gist with my worker startup in debug logging mode:\r\nhttps://gist.github.com/eedwards-sk/f924548c043859901db6918aec95dada\r\n\r\nI'm sending traces to a local running agent and I know that's working because I'm seeing the flush records in the logs, and I get traces for the above mentioned services when I'm running it.\r\n\r\n(We have another application that is using celery 3.x and their traces show up for tasks, which we instrumented in more or less the same way)\r\n\r\nHere's a gist showing how we're instantiating celery and the tracer:\r\nhttps://gist.github.com/eedwards-sk/fe81f62406e7a245b7c26a9ada19c658\n", "before_files": [{"content": "# Third party\nimport wrapt\nimport inspect\nimport celery\n\n# Project\nfrom ddtrace import Pin\nfrom ddtrace.ext import AppTypes\nfrom ...ext import errors\nfrom .util import APP, PRODUCER_SERVICE, WORKER_SERVICE, meta_from_context, require_pin\n\nPRODUCER_ROOT_SPAN = 'celery.apply'\nWORKER_ROOT_SPAN = 'celery.run'\n# Task operations\nTASK_TAG_KEY = 'celery.action'\nTASK_APPLY = 'apply'\nTASK_APPLY_ASYNC = 'apply_async'\nTASK_RUN = 'run'\n\n\ndef patch_task(task, pin=None):\n \"\"\" patch_task will add tracing to a celery task \"\"\"\n # The service set here is actually ignored, because it's not possible to\n # be certain whether this process is being used as a worker, a producer,\n # or both. So the service as recorded in traces is set based on the actual\n # work being done (ie. apply/apply_async vs run).\n pin = pin or Pin(service=WORKER_SERVICE, app=APP, app_type=AppTypes.worker)\n\n patch_methods = [\n ('__init__', _task_init),\n ('run', _task_run),\n ('apply', _task_apply),\n ('apply_async', _task_apply_async),\n ]\n for method_name, wrapper in patch_methods:\n # Get original method\n method = getattr(task, method_name, None)\n if method is None:\n continue\n\n # Do not patch if method is already patched\n if isinstance(method, wrapt.ObjectProxy):\n continue\n\n # If the function as been applied as a decorator for v1 Celery tasks, then a different patching is needed\n if inspect.isclass(task) and issubclass(task, celery.task.Task):\n wrapped = wrapt.FunctionWrapper(method, wrapper)\n setattr(task, method_name, wrapped)\n continue\n # Patch method\n # DEV: Using `BoundFunctionWrapper` ensures our `task` wrapper parameter is properly set\n setattr(task, method_name, wrapt.BoundFunctionWrapper(method, task, wrapper))\n\n # Attach our pin to the app\n pin.onto(task)\n return task\n\ndef unpatch_task(task):\n \"\"\" unpatch_task will remove tracing from a celery task \"\"\"\n patched_methods = [\n '__init__',\n 'run',\n 'apply',\n 'apply_async',\n ]\n for method_name in patched_methods:\n # Get wrapped method\n wrapper = getattr(task, method_name, None)\n if wrapper is None:\n continue\n\n # Only unpatch if wrapper is an `ObjectProxy`\n if not isinstance(wrapper, wrapt.ObjectProxy):\n continue\n\n # Restore original method\n setattr(task, method_name, wrapper.__wrapped__)\n\n return task\n\n\ndef _task_init(func, task, args, kwargs):\n func(*args, **kwargs)\n\n # Patch this task if our pin is enabled\n pin = Pin.get_from(task)\n if pin and pin.enabled():\n patch_task(task, pin=pin)\n\n\n@require_pin\ndef _task_run(pin, func, task, args, kwargs):\n with pin.tracer.trace(WORKER_ROOT_SPAN, service=WORKER_SERVICE, resource=task.name) as span:\n # Set meta data from task request\n span.set_metas(meta_from_context(task.request))\n span.set_meta(TASK_TAG_KEY, TASK_RUN)\n\n # Call original `run` function\n return func(*args, **kwargs)\n\n\n@require_pin\ndef _task_apply(pin, func, task, args, kwargs):\n with pin.tracer.trace(PRODUCER_ROOT_SPAN, service=PRODUCER_SERVICE, resource=task.name) as span:\n # Call the original `apply` function\n res = func(*args, **kwargs)\n\n # Set meta data from response\n span.set_meta('id', res.id)\n span.set_meta('state', res.state)\n span.set_meta(TASK_TAG_KEY, TASK_APPLY)\n if res.traceback:\n span.error = 1\n span.set_meta(errors.STACK, res.traceback)\n return res\n\n\n@require_pin\ndef _task_apply_async(pin, func, task, args, kwargs):\n with pin.tracer.trace(PRODUCER_ROOT_SPAN, service=PRODUCER_SERVICE, resource=task.name) as span:\n # Extract meta data from `kwargs`\n meta_keys = (\n 'compression', 'countdown', 'eta', 'exchange', 'expires',\n 'priority', 'routing_key', 'serializer', 'queue',\n )\n for name in meta_keys:\n if name in kwargs:\n span.set_meta(name, kwargs[name])\n span.set_meta(TASK_TAG_KEY, TASK_APPLY_ASYNC)\n\n # Call the original `apply_async` function\n res = func(*args, **kwargs)\n\n # Set meta data from response\n # DEV: Calling `res.traceback` or `res.state` will make an\n # API call to the backend for the properties\n span.set_meta('id', res.id)\n return res\n", "path": "ddtrace/contrib/celery/task.py"}, {"content": "import celery\n\nfrom wrapt import wrap_function_wrapper as _w\n\nfrom .app import patch_app, unpatch_app\nfrom .registry import _wrap_register\nfrom ...utils.wrappers import unwrap as _u\n\n\ndef patch():\n \"\"\"Instrument Celery base application and the `TaskRegistry` so\n that any new registered task is automatically instrumented\n \"\"\"\n setattr(celery, 'Celery', patch_app(celery.Celery))\n _w('celery.app.registry', 'TaskRegistry.register', _wrap_register)\n\n\ndef unpatch():\n \"\"\"Removes instrumentation from Celery\"\"\"\n setattr(celery, 'Celery', unpatch_app(celery.Celery))\n _u(celery.app.registry.TaskRegistry, 'register')\n", "path": "ddtrace/contrib/celery/patch.py"}]} | 2,477 | 470 |
gh_patches_debug_34461 | rasdani/github-patches | git_diff | apache__airflow-35677 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TaskInstances do not succeed when using enable_logging=True option in DockerSwarmOperator
### Apache Airflow Provider(s)
docker
### Versions of Apache Airflow Providers
apache-airflow-providers-celery==3.1.0
apache-airflow-providers-docker==3.3.0
### Apache Airflow version
2.5.0
### Operating System
centos 7
### Deployment
Other Docker-based deployment
### Deployment details
Running an a docker-swarm cluster deployed locally.
### What happened
Same issue as https://github.com/apache/airflow/issues/13675
With logging_enabled=True the DAG never completes and stays in running.
When using DockerSwarmOperator together with the default enable_logging=True option, tasks do not succeed and stay in state running. When checking the docker service logs I can clearly see that the container ran and ended successfully. Airflow however does not recognize that the container finished and keeps the tasks in state running.
### What you think should happen instead
DAG should complete.
### How to reproduce
Docker-compose deployment:
```console
curl -LfO 'https://airflow.apache.org/docs/apache-airflow/2.5.0/docker-compose.yaml'
docker compose up airflow-init
docker compose up -d
```
DAG code:
```python
from airflow import DAG
from docker.types import Mount, SecretReference
from airflow.providers.docker.operators.docker_swarm import DockerSwarmOperator
from datetime import timedelta
from airflow.utils.dates import days_ago
from airflow.models import Variable
# Setup default args for the job
default_args = {
'owner': 'airflow',
'start_date': days_ago(2),
'retries': 0
}
# Create the DAG
dag = DAG(
'test_dag', # DAG ID
default_args=default_args,
schedule_interval='0 0 * * *',
catchup=False
)
# # Create the DAG object
with dag as dag:
docker_swarm_task = DockerSwarmOperator(
task_id="job_run",
image="<any image>",
execution_timeout=timedelta(minutes=5),
command="<specific code>",
api_version='auto',
tty=True,
enable_logging=True
)
```
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of airflow/providers/docker/operators/docker_swarm.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 """Run ephemeral Docker Swarm services."""
18 from __future__ import annotations
19
20 from typing import TYPE_CHECKING
21
22 from docker import types
23
24 from airflow.exceptions import AirflowException
25 from airflow.providers.docker.operators.docker import DockerOperator
26 from airflow.utils.strings import get_random_string
27
28 if TYPE_CHECKING:
29 from airflow.utils.context import Context
30
31
32 class DockerSwarmOperator(DockerOperator):
33 """
34 Execute a command as an ephemeral docker swarm service.
35
36 Example use-case - Using Docker Swarm orchestration to make one-time
37 scripts highly available.
38
39 A temporary directory is created on the host and
40 mounted into a container to allow storing files
41 that together exceed the default disk size of 10GB in a container.
42 The path to the mounted directory can be accessed
43 via the environment variable ``AIRFLOW_TMP_DIR``.
44
45 If a login to a private registry is required prior to pulling the image, a
46 Docker connection needs to be configured in Airflow and the connection ID
47 be provided with the parameter ``docker_conn_id``.
48
49 :param image: Docker image from which to create the container.
50 If image tag is omitted, "latest" will be used.
51 :param api_version: Remote API version. Set to ``auto`` to automatically
52 detect the server's version.
53 :param auto_remove: Auto-removal of the container on daemon side when the
54 container's process exits.
55 The default is False.
56 :param command: Command to be run in the container. (templated)
57 :param docker_url: URL of the host running the docker daemon.
58 Default is unix://var/run/docker.sock
59 :param environment: Environment variables to set in the container. (templated)
60 :param force_pull: Pull the docker image on every run. Default is False.
61 :param mem_limit: Maximum amount of memory the container can use.
62 Either a float value, which represents the limit in bytes,
63 or a string like ``128m`` or ``1g``.
64 :param tls_ca_cert: Path to a PEM-encoded certificate authority
65 to secure the docker connection.
66 :param tls_client_cert: Path to the PEM-encoded certificate
67 used to authenticate docker client.
68 :param tls_client_key: Path to the PEM-encoded key used to authenticate docker client.
69 :param tls_hostname: Hostname to match against
70 the docker server certificate or False to disable the check.
71 :param tls_ssl_version: Version of SSL to use when communicating with docker daemon.
72 :param tmp_dir: Mount point inside the container to
73 a temporary directory created on the host by the operator.
74 The path is also made available via the environment variable
75 ``AIRFLOW_TMP_DIR`` inside the container.
76 :param user: Default user inside the docker container.
77 :param docker_conn_id: The :ref:`Docker connection id <howto/connection:docker>`
78 :param tty: Allocate pseudo-TTY to the container of this service
79 This needs to be set see logs of the Docker container / service.
80 :param enable_logging: Show the application's logs in operator's logs.
81 Supported only if the Docker engine is using json-file or journald logging drivers.
82 The `tty` parameter should be set to use this with Python applications.
83 :param configs: List of docker configs to be exposed to the containers of the swarm service.
84 The configs are ConfigReference objects as per the docker api
85 [https://docker-py.readthedocs.io/en/stable/services.html#docker.models.services.ServiceCollection.create]_
86 :param secrets: List of docker secrets to be exposed to the containers of the swarm service.
87 The secrets are SecretReference objects as per the docker create_service api.
88 [https://docker-py.readthedocs.io/en/stable/services.html#docker.models.services.ServiceCollection.create]_
89 :param mode: Indicate whether a service should be deployed as a replicated or global service,
90 and associated parameters
91 :param networks: List of network names or IDs or NetworkAttachmentConfig to attach the service to.
92 :param placement: Placement instructions for the scheduler. If a list is passed instead,
93 it is assumed to be a list of constraints as part of a Placement object.
94 """
95
96 def __init__(
97 self,
98 *,
99 image: str,
100 enable_logging: bool = True,
101 configs: list[types.ConfigReference] | None = None,
102 secrets: list[types.SecretReference] | None = None,
103 mode: types.ServiceMode | None = None,
104 networks: list[str | types.NetworkAttachmentConfig] | None = None,
105 placement: types.Placement | list[types.Placement] | None = None,
106 **kwargs,
107 ) -> None:
108 super().__init__(image=image, **kwargs)
109 self.enable_logging = enable_logging
110 self.service = None
111 self.configs = configs
112 self.secrets = secrets
113 self.mode = mode
114 self.networks = networks
115 self.placement = placement
116
117 def execute(self, context: Context) -> None:
118 self.environment["AIRFLOW_TMP_DIR"] = self.tmp_dir
119 return self._run_service()
120
121 def _run_service(self) -> None:
122 self.log.info("Starting docker service from image %s", self.image)
123 self.service = self.cli.create_service(
124 types.TaskTemplate(
125 container_spec=types.ContainerSpec(
126 image=self.image,
127 command=self.format_command(self.command),
128 mounts=self.mounts,
129 env=self.environment,
130 user=self.user,
131 tty=self.tty,
132 configs=self.configs,
133 secrets=self.secrets,
134 ),
135 restart_policy=types.RestartPolicy(condition="none"),
136 resources=types.Resources(mem_limit=self.mem_limit),
137 networks=self.networks,
138 placement=self.placement,
139 ),
140 name=f"airflow-{get_random_string()}",
141 labels={"name": f"airflow__{self.dag_id}__{self.task_id}"},
142 mode=self.mode,
143 )
144 if self.service is None:
145 raise Exception("Service should be set here")
146 self.log.info("Service started: %s", self.service)
147
148 # wait for the service to start the task
149 while not self.cli.tasks(filters={"service": self.service["ID"]}):
150 continue
151
152 if self.enable_logging:
153 self._stream_logs_to_output()
154
155 while True:
156 if self._has_service_terminated():
157 self.log.info("Service status before exiting: %s", self._service_status())
158 break
159
160 self.log.info("auto_removeauto_removeauto_removeauto_removeauto_remove : %s", str(self.auto_remove))
161 if self.service and self._service_status() != "complete":
162 if self.auto_remove == "success":
163 self.cli.remove_service(self.service["ID"])
164 raise AirflowException(f"Service did not complete: {self.service!r}")
165 elif self.auto_remove == "success":
166 if not self.service:
167 raise Exception("The 'service' should be initialized before!")
168 self.cli.remove_service(self.service["ID"])
169
170 def _service_status(self) -> str | None:
171 if not self.service:
172 raise Exception("The 'service' should be initialized before!")
173 return self.cli.tasks(filters={"service": self.service["ID"]})[0]["Status"]["State"]
174
175 def _has_service_terminated(self) -> bool:
176 status = self._service_status()
177 return status in ["complete", "failed", "shutdown", "rejected", "orphaned", "remove"]
178
179 def _stream_logs_to_output(self) -> None:
180 if not self.service:
181 raise Exception("The 'service' should be initialized before!")
182 logs = self.cli.service_logs(
183 self.service["ID"], follow=True, stdout=True, stderr=True, is_tty=self.tty
184 )
185 line = ""
186 for log in logs:
187 try:
188 log = log.decode()
189 except UnicodeDecodeError:
190 continue
191 if log == "\n":
192 self.log.info(line)
193 line = ""
194 else:
195 line += log
196 # flush any remaining log stream
197 if line:
198 self.log.info(line)
199
200 def on_kill(self) -> None:
201 if self.hook.client_created and self.service is not None:
202 self.log.info("Removing docker service: %s", self.service["ID"])
203 self.cli.remove_service(self.service["ID"])
204
[end of airflow/providers/docker/operators/docker_swarm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/providers/docker/operators/docker_swarm.py b/airflow/providers/docker/operators/docker_swarm.py
--- a/airflow/providers/docker/operators/docker_swarm.py
+++ b/airflow/providers/docker/operators/docker_swarm.py
@@ -17,6 +17,9 @@
"""Run ephemeral Docker Swarm services."""
from __future__ import annotations
+import re
+from datetime import datetime
+from time import sleep
from typing import TYPE_CHECKING
from docker import types
@@ -179,23 +182,34 @@
def _stream_logs_to_output(self) -> None:
if not self.service:
raise Exception("The 'service' should be initialized before!")
- logs = self.cli.service_logs(
- self.service["ID"], follow=True, stdout=True, stderr=True, is_tty=self.tty
- )
- line = ""
- for log in logs:
- try:
- log = log.decode()
- except UnicodeDecodeError:
- continue
- if log == "\n":
- self.log.info(line)
- line = ""
- else:
- line += log
- # flush any remaining log stream
- if line:
- self.log.info(line)
+ last_line_logged, last_timestamp = "", 0
+
+ def stream_new_logs(last_line_logged, since=0):
+ logs = self.cli.service_logs(
+ self.service["ID"],
+ follow=False,
+ stdout=True,
+ stderr=True,
+ is_tty=self.tty,
+ since=since,
+ timestamps=True,
+ )
+ logs = b"".join(logs).decode().splitlines()
+ if last_line_logged in logs:
+ logs = logs[logs.index(last_line_logged) + 1 :]
+ for line in logs:
+ match = re.match(r"(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6,}Z) (.*)", line)
+ timestamp, message = match.groups()
+ self.log.info(message)
+ # Floor nanoseconds to microseconds
+ last_timestamp = re.sub(r"(\.\d{6})\d+Z", r"\1Z", timestamp)
+ last_timestamp = datetime.strptime(last_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
+ last_timestamp = last_timestamp.timestamp()
+ return last_line_logged, last_timestamp
+
+ while not self._has_service_terminated():
+ sleep(2)
+ last_line_logged, last_timestamp = stream_new_logs(last_line_logged, since=last_timestamp)
def on_kill(self) -> None:
if self.hook.client_created and self.service is not None:
| {"golden_diff": "diff --git a/airflow/providers/docker/operators/docker_swarm.py b/airflow/providers/docker/operators/docker_swarm.py\n--- a/airflow/providers/docker/operators/docker_swarm.py\n+++ b/airflow/providers/docker/operators/docker_swarm.py\n@@ -17,6 +17,9 @@\n \"\"\"Run ephemeral Docker Swarm services.\"\"\"\n from __future__ import annotations\n \n+import re\n+from datetime import datetime\n+from time import sleep\n from typing import TYPE_CHECKING\n \n from docker import types\n@@ -179,23 +182,34 @@\n def _stream_logs_to_output(self) -> None:\n if not self.service:\n raise Exception(\"The 'service' should be initialized before!\")\n- logs = self.cli.service_logs(\n- self.service[\"ID\"], follow=True, stdout=True, stderr=True, is_tty=self.tty\n- )\n- line = \"\"\n- for log in logs:\n- try:\n- log = log.decode()\n- except UnicodeDecodeError:\n- continue\n- if log == \"\\n\":\n- self.log.info(line)\n- line = \"\"\n- else:\n- line += log\n- # flush any remaining log stream\n- if line:\n- self.log.info(line)\n+ last_line_logged, last_timestamp = \"\", 0\n+\n+ def stream_new_logs(last_line_logged, since=0):\n+ logs = self.cli.service_logs(\n+ self.service[\"ID\"],\n+ follow=False,\n+ stdout=True,\n+ stderr=True,\n+ is_tty=self.tty,\n+ since=since,\n+ timestamps=True,\n+ )\n+ logs = b\"\".join(logs).decode().splitlines()\n+ if last_line_logged in logs:\n+ logs = logs[logs.index(last_line_logged) + 1 :]\n+ for line in logs:\n+ match = re.match(r\"(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{6,}Z) (.*)\", line)\n+ timestamp, message = match.groups()\n+ self.log.info(message)\n+ # Floor nanoseconds to microseconds\n+ last_timestamp = re.sub(r\"(\\.\\d{6})\\d+Z\", r\"\\1Z\", timestamp)\n+ last_timestamp = datetime.strptime(last_timestamp, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n+ last_timestamp = last_timestamp.timestamp()\n+ return last_line_logged, last_timestamp\n+\n+ while not self._has_service_terminated():\n+ sleep(2)\n+ last_line_logged, last_timestamp = stream_new_logs(last_line_logged, since=last_timestamp)\n \n def on_kill(self) -> None:\n if self.hook.client_created and self.service is not None:\n", "issue": "TaskInstances do not succeed when using enable_logging=True option in DockerSwarmOperator\n### Apache Airflow Provider(s)\r\n\r\ndocker\r\n\r\n### Versions of Apache Airflow Providers\r\n\r\napache-airflow-providers-celery==3.1.0\r\napache-airflow-providers-docker==3.3.0\r\n\r\n### Apache Airflow version\r\n\r\n2.5.0\r\n\r\n### Operating System\r\n\r\ncentos 7\r\n\r\n### Deployment\r\n\r\nOther Docker-based deployment\r\n\r\n### Deployment details\r\n\r\nRunning an a docker-swarm cluster deployed locally. \r\n\r\n### What happened\r\n\r\nSame issue as https://github.com/apache/airflow/issues/13675\r\n\r\nWith logging_enabled=True the DAG never completes and stays in running. \r\n\r\nWhen using DockerSwarmOperator together with the default enable_logging=True option, tasks do not succeed and stay in state running. When checking the docker service logs I can clearly see that the container ran and ended successfully. Airflow however does not recognize that the container finished and keeps the tasks in state running.\r\n\r\n### What you think should happen instead\r\n\r\nDAG should complete.\r\n\r\n### How to reproduce\r\n\r\nDocker-compose deployment:\r\n```console\r\ncurl -LfO 'https://airflow.apache.org/docs/apache-airflow/2.5.0/docker-compose.yaml'\r\ndocker compose up airflow-init\r\ndocker compose up -d\r\n```\r\n\r\nDAG code:\r\n\r\n```python\r\nfrom airflow import DAG\r\nfrom docker.types import Mount, SecretReference\r\nfrom airflow.providers.docker.operators.docker_swarm import DockerSwarmOperator\r\nfrom datetime import timedelta\r\nfrom airflow.utils.dates import days_ago\r\nfrom airflow.models import Variable\r\n\r\n\r\n# Setup default args for the job\r\ndefault_args = {\r\n\t'owner': 'airflow',\r\n\t'start_date': days_ago(2),\r\n\t'retries': 0\r\n}\r\n \r\n# Create the DAG\r\ndag = DAG(\r\n 'test_dag', # DAG ID\r\n default_args=default_args,\r\n schedule_interval='0 0 * * *', \r\n catchup=False\r\n)\r\n\r\n# # Create the DAG object\r\nwith dag as dag:\r\n\t docker_swarm_task = DockerSwarmOperator(\r\n\t\t\ttask_id=\"job_run\",\r\n\t\t\timage=\"<any image>\",\r\n\t\t\texecution_timeout=timedelta(minutes=5),\r\n\t\t\tcommand=\"<specific code>\",\r\n\t\t\tapi_version='auto',\r\n\t\t\ttty=True,\r\n\t\t\tenable_logging=True\r\n\t\t)\r\n```\r\n\r\n### Anything else\r\n\r\n_No response_\r\n\r\n### Are you willing to submit PR?\r\n\r\n- [X] Yes I am willing to submit a PR!\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Run ephemeral Docker Swarm services.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom docker import types\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.providers.docker.operators.docker import DockerOperator\nfrom airflow.utils.strings import get_random_string\n\nif TYPE_CHECKING:\n from airflow.utils.context import Context\n\n\nclass DockerSwarmOperator(DockerOperator):\n \"\"\"\n Execute a command as an ephemeral docker swarm service.\n\n Example use-case - Using Docker Swarm orchestration to make one-time\n scripts highly available.\n\n A temporary directory is created on the host and\n mounted into a container to allow storing files\n that together exceed the default disk size of 10GB in a container.\n The path to the mounted directory can be accessed\n via the environment variable ``AIRFLOW_TMP_DIR``.\n\n If a login to a private registry is required prior to pulling the image, a\n Docker connection needs to be configured in Airflow and the connection ID\n be provided with the parameter ``docker_conn_id``.\n\n :param image: Docker image from which to create the container.\n If image tag is omitted, \"latest\" will be used.\n :param api_version: Remote API version. Set to ``auto`` to automatically\n detect the server's version.\n :param auto_remove: Auto-removal of the container on daemon side when the\n container's process exits.\n The default is False.\n :param command: Command to be run in the container. (templated)\n :param docker_url: URL of the host running the docker daemon.\n Default is unix://var/run/docker.sock\n :param environment: Environment variables to set in the container. (templated)\n :param force_pull: Pull the docker image on every run. Default is False.\n :param mem_limit: Maximum amount of memory the container can use.\n Either a float value, which represents the limit in bytes,\n or a string like ``128m`` or ``1g``.\n :param tls_ca_cert: Path to a PEM-encoded certificate authority\n to secure the docker connection.\n :param tls_client_cert: Path to the PEM-encoded certificate\n used to authenticate docker client.\n :param tls_client_key: Path to the PEM-encoded key used to authenticate docker client.\n :param tls_hostname: Hostname to match against\n the docker server certificate or False to disable the check.\n :param tls_ssl_version: Version of SSL to use when communicating with docker daemon.\n :param tmp_dir: Mount point inside the container to\n a temporary directory created on the host by the operator.\n The path is also made available via the environment variable\n ``AIRFLOW_TMP_DIR`` inside the container.\n :param user: Default user inside the docker container.\n :param docker_conn_id: The :ref:`Docker connection id <howto/connection:docker>`\n :param tty: Allocate pseudo-TTY to the container of this service\n This needs to be set see logs of the Docker container / service.\n :param enable_logging: Show the application's logs in operator's logs.\n Supported only if the Docker engine is using json-file or journald logging drivers.\n The `tty` parameter should be set to use this with Python applications.\n :param configs: List of docker configs to be exposed to the containers of the swarm service.\n The configs are ConfigReference objects as per the docker api\n [https://docker-py.readthedocs.io/en/stable/services.html#docker.models.services.ServiceCollection.create]_\n :param secrets: List of docker secrets to be exposed to the containers of the swarm service.\n The secrets are SecretReference objects as per the docker create_service api.\n [https://docker-py.readthedocs.io/en/stable/services.html#docker.models.services.ServiceCollection.create]_\n :param mode: Indicate whether a service should be deployed as a replicated or global service,\n and associated parameters\n :param networks: List of network names or IDs or NetworkAttachmentConfig to attach the service to.\n :param placement: Placement instructions for the scheduler. If a list is passed instead,\n it is assumed to be a list of constraints as part of a Placement object.\n \"\"\"\n\n def __init__(\n self,\n *,\n image: str,\n enable_logging: bool = True,\n configs: list[types.ConfigReference] | None = None,\n secrets: list[types.SecretReference] | None = None,\n mode: types.ServiceMode | None = None,\n networks: list[str | types.NetworkAttachmentConfig] | None = None,\n placement: types.Placement | list[types.Placement] | None = None,\n **kwargs,\n ) -> None:\n super().__init__(image=image, **kwargs)\n self.enable_logging = enable_logging\n self.service = None\n self.configs = configs\n self.secrets = secrets\n self.mode = mode\n self.networks = networks\n self.placement = placement\n\n def execute(self, context: Context) -> None:\n self.environment[\"AIRFLOW_TMP_DIR\"] = self.tmp_dir\n return self._run_service()\n\n def _run_service(self) -> None:\n self.log.info(\"Starting docker service from image %s\", self.image)\n self.service = self.cli.create_service(\n types.TaskTemplate(\n container_spec=types.ContainerSpec(\n image=self.image,\n command=self.format_command(self.command),\n mounts=self.mounts,\n env=self.environment,\n user=self.user,\n tty=self.tty,\n configs=self.configs,\n secrets=self.secrets,\n ),\n restart_policy=types.RestartPolicy(condition=\"none\"),\n resources=types.Resources(mem_limit=self.mem_limit),\n networks=self.networks,\n placement=self.placement,\n ),\n name=f\"airflow-{get_random_string()}\",\n labels={\"name\": f\"airflow__{self.dag_id}__{self.task_id}\"},\n mode=self.mode,\n )\n if self.service is None:\n raise Exception(\"Service should be set here\")\n self.log.info(\"Service started: %s\", self.service)\n\n # wait for the service to start the task\n while not self.cli.tasks(filters={\"service\": self.service[\"ID\"]}):\n continue\n\n if self.enable_logging:\n self._stream_logs_to_output()\n\n while True:\n if self._has_service_terminated():\n self.log.info(\"Service status before exiting: %s\", self._service_status())\n break\n\n self.log.info(\"auto_removeauto_removeauto_removeauto_removeauto_remove : %s\", str(self.auto_remove))\n if self.service and self._service_status() != \"complete\":\n if self.auto_remove == \"success\":\n self.cli.remove_service(self.service[\"ID\"])\n raise AirflowException(f\"Service did not complete: {self.service!r}\")\n elif self.auto_remove == \"success\":\n if not self.service:\n raise Exception(\"The 'service' should be initialized before!\")\n self.cli.remove_service(self.service[\"ID\"])\n\n def _service_status(self) -> str | None:\n if not self.service:\n raise Exception(\"The 'service' should be initialized before!\")\n return self.cli.tasks(filters={\"service\": self.service[\"ID\"]})[0][\"Status\"][\"State\"]\n\n def _has_service_terminated(self) -> bool:\n status = self._service_status()\n return status in [\"complete\", \"failed\", \"shutdown\", \"rejected\", \"orphaned\", \"remove\"]\n\n def _stream_logs_to_output(self) -> None:\n if not self.service:\n raise Exception(\"The 'service' should be initialized before!\")\n logs = self.cli.service_logs(\n self.service[\"ID\"], follow=True, stdout=True, stderr=True, is_tty=self.tty\n )\n line = \"\"\n for log in logs:\n try:\n log = log.decode()\n except UnicodeDecodeError:\n continue\n if log == \"\\n\":\n self.log.info(line)\n line = \"\"\n else:\n line += log\n # flush any remaining log stream\n if line:\n self.log.info(line)\n\n def on_kill(self) -> None:\n if self.hook.client_created and self.service is not None:\n self.log.info(\"Removing docker service: %s\", self.service[\"ID\"])\n self.cli.remove_service(self.service[\"ID\"])\n", "path": "airflow/providers/docker/operators/docker_swarm.py"}]} | 3,566 | 613 |
gh_patches_debug_38160 | rasdani/github-patches | git_diff | archlinux__archinstall-238 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Look in to enabling SMART for drives that support it
Something like `smartctl --smart=on --offlineauto=on --saveauto=on /dev/sda` where `archinstall.hardware.detectSmart()` finds drives that support it (to extend drive lifetime if possible).
</issue>
<code>
[start of profiles/desktop.py]
1 # A desktop environment selector.
2
3 import archinstall, os
4
5 is_top_level_profile = True
6
7 def _prep_function(*args, **kwargs):
8 """
9 Magic function called by the importing installer
10 before continuing any further. It also avoids executing any
11 other code in this stage. So it's a safe way to ask the user
12 for more input before any other installer steps start.
13 """
14
15 supported_desktops = ['gnome', 'kde', 'awesome']
16 desktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')
17
18 # Temporarily store the selected desktop profile
19 # in a session-safe location, since this module will get reloaded
20 # the next time it gets executed.
21 archinstall.storage['_desktop_profile'] = desktop
22
23 profile = archinstall.Profile(None, desktop)
24 # Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.
25 with profile.load_instructions(namespace=f"{desktop}.py") as imported:
26 if hasattr(imported, '_prep_function'):
27 return imported._prep_function()
28 else:
29 print(f"Deprecated (??): {desktop} profile has no _prep_function() anymore")
30
31 if __name__ == 'desktop':
32 """
33 This "profile" is a meta-profile.
34 There are no desktop-specific steps, it simply routes
35 the installer to whichever desktop environment/window manager was chosen.
36
37 Maybe in the future, a network manager or similar things *could* be added here.
38 We should honor that Arch Linux does not officially endorse a desktop-setup, nor is
39 it trying to be a turn-key desktop distribution.
40
41 There are plenty of desktop-turn-key-solutions based on Arch Linux,
42 this is therefore just a helper to get started
43 """
44
45 # TODO: Remove magic variable 'installation' and place it
46 # in archinstall.storage or archinstall.session/archinstall.installation
47 installation.install_profile(archinstall.storage['_desktop_profile'])
48
[end of profiles/desktop.py]
[start of profiles/awesome.py]
1 # A desktop environment using "Awesome" window manager.
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 # New way of defining packages for a profile, which is iterable and can be used out side
8 # of the profile to get a list of "what packages will be installed".
9 __packages__ = ['nano', 'nemo', 'gpicview-gtk3', 'openssh', 'sshfs', 'htop', 'scrot', 'wget']
10
11 def _prep_function(*args, **kwargs):
12 """
13 Magic function called by the importing installer
14 before continuing any further. It also avoids executing any
15 other code in this stage. So it's a safe way to ask the user
16 for more input before any other installer steps start.
17 """
18
19 # Awesome WM requires that xorg is installed
20 profile = archinstall.Profile(None, 'xorg')
21 with profile.load_instructions(namespace='xorg.py') as imported:
22 if hasattr(imported, '_prep_function'):
23 return imported._prep_function()
24 else:
25 print('Deprecated (??): xorg profile has no _prep_function() anymore')
26
27
28 # Ensures that this code only gets executed if executed
29 # through importlib.util.spec_from_file_location("awesome", "/somewhere/awesome.py")
30 # or through conventional import awesome
31 if __name__ == 'awesome':
32 # Install the application awesome from the template under /applications/
33 awesome = archinstall.Application(installation, 'awesome')
34 awesome.install()
35
36 # Then setup and configure the desktop environment: awesome
37 editor = "nano"
38 filebrowser = "nemo gpicview-gtk3"
39 utils = "openssh sshfs htop scrot wget"
40
41
42 installation.add_additional_packages(f"{utils} {filebrowser} {editor}")
43
44 alacritty = archinstall.Application(installation, 'alacritty')
45 alacritty.install()
46
47 # TODO: Copy a full configuration to ~/.config/awesome/rc.lua instead.
48 with open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'r') as fh:
49 awesome_lua = fh.read()
50
51 ## Replace xterm with alacritty for a smoother experience.
52 awesome_lua = awesome_lua.replace('"xterm"', '"alacritty"')
53
54 with open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'w') as fh:
55 fh.write(awesome_lua)
56
57 ## TODO: Configure the right-click-menu to contain the above packages that were installed. (as a user config)
58
59 ## Remove some interfering nemo settings
60 installation.arch_chroot("gsettings set org.nemo.desktop show-desktop-icons false")
61 installation.arch_chroot("xdg-mime default nemo.desktop inode/directory application/x-gnome-saved-search")
62
[end of profiles/awesome.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/profiles/awesome.py b/profiles/awesome.py
--- a/profiles/awesome.py
+++ b/profiles/awesome.py
@@ -6,7 +6,7 @@
# New way of defining packages for a profile, which is iterable and can be used out side
# of the profile to get a list of "what packages will be installed".
-__packages__ = ['nano', 'nemo', 'gpicview-gtk3', 'openssh', 'sshfs', 'htop', 'scrot', 'wget']
+__packages__ = ['nemo', 'gpicview-gtk3', 'scrot']
def _prep_function(*args, **kwargs):
"""
@@ -33,13 +33,7 @@
awesome = archinstall.Application(installation, 'awesome')
awesome.install()
- # Then setup and configure the desktop environment: awesome
- editor = "nano"
- filebrowser = "nemo gpicview-gtk3"
- utils = "openssh sshfs htop scrot wget"
-
-
- installation.add_additional_packages(f"{utils} {filebrowser} {editor}")
+ installation.add_additional_packages(__packages__)
alacritty = archinstall.Application(installation, 'alacritty')
alacritty.install()
diff --git a/profiles/desktop.py b/profiles/desktop.py
--- a/profiles/desktop.py
+++ b/profiles/desktop.py
@@ -4,6 +4,10 @@
is_top_level_profile = True
+# New way of defining packages for a profile, which is iterable and can be used out side
+# of the profile to get a list of "what packages will be installed".
+__packages__ = ['nano', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']
+
def _prep_function(*args, **kwargs):
"""
Magic function called by the importing installer
@@ -14,7 +18,7 @@
supported_desktops = ['gnome', 'kde', 'awesome']
desktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')
-
+
# Temporarily store the selected desktop profile
# in a session-safe location, since this module will get reloaded
# the next time it gets executed.
@@ -41,7 +45,11 @@
There are plenty of desktop-turn-key-solutions based on Arch Linux,
this is therefore just a helper to get started
"""
+
+ # Install common packages for all desktop environments
+ installation.add_additional_packages(__packages__)
# TODO: Remove magic variable 'installation' and place it
# in archinstall.storage or archinstall.session/archinstall.installation
installation.install_profile(archinstall.storage['_desktop_profile'])
+
| {"golden_diff": "diff --git a/profiles/awesome.py b/profiles/awesome.py\n--- a/profiles/awesome.py\n+++ b/profiles/awesome.py\n@@ -6,7 +6,7 @@\n \n # New way of defining packages for a profile, which is iterable and can be used out side\n # of the profile to get a list of \"what packages will be installed\".\n-__packages__ = ['nano', 'nemo', 'gpicview-gtk3', 'openssh', 'sshfs', 'htop', 'scrot', 'wget']\n+__packages__ = ['nemo', 'gpicview-gtk3', 'scrot']\n \n def _prep_function(*args, **kwargs):\n \t\"\"\"\n@@ -33,13 +33,7 @@\n \tawesome = archinstall.Application(installation, 'awesome')\n \tawesome.install()\n \n-\t# Then setup and configure the desktop environment: awesome\n-\teditor = \"nano\"\n-\tfilebrowser = \"nemo gpicview-gtk3\"\n-\tutils = \"openssh sshfs htop scrot wget\"\n-\n-\n-\tinstallation.add_additional_packages(f\"{utils} {filebrowser} {editor}\")\n+\tinstallation.add_additional_packages(__packages__)\n \n \talacritty = archinstall.Application(installation, 'alacritty')\n \talacritty.install()\ndiff --git a/profiles/desktop.py b/profiles/desktop.py\n--- a/profiles/desktop.py\n+++ b/profiles/desktop.py\n@@ -4,6 +4,10 @@\n \n is_top_level_profile = True\n \n+# New way of defining packages for a profile, which is iterable and can be used out side\n+# of the profile to get a list of \"what packages will be installed\".\n+__packages__ = ['nano', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']\n+\n def _prep_function(*args, **kwargs):\n \t\"\"\"\n \tMagic function called by the importing installer\n@@ -14,7 +18,7 @@\n \n \tsupported_desktops = ['gnome', 'kde', 'awesome']\n \tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n-\n+\t\n \t# Temporarily store the selected desktop profile\n \t# in a session-safe location, since this module will get reloaded\n \t# the next time it gets executed.\n@@ -41,7 +45,11 @@\n \tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n \tthis is therefore just a helper to get started\n \t\"\"\"\n+\t\n+\t# Install common packages for all desktop environments\n+\tinstallation.add_additional_packages(__packages__)\n \n \t# TODO: Remove magic variable 'installation' and place it\n \t# in archinstall.storage or archinstall.session/archinstall.installation\n \tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n+\n", "issue": "Look in to enabling SMART for drives that support it\nSomething like `smartctl --smart=on --offlineauto=on --saveauto=on /dev/sda` where `archinstall.hardware.detectSmart()` finds drives that support it (to extend drive lifetime if possible).\n", "before_files": [{"content": "# A desktop environment selector.\n\nimport archinstall, os\n\nis_top_level_profile = True\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tsupported_desktops = ['gnome', 'kde', 'awesome']\n\tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n\n\t# Temporarily store the selected desktop profile\n\t# in a session-safe location, since this module will get reloaded\n\t# the next time it gets executed.\n\tarchinstall.storage['_desktop_profile'] = desktop\n\n\tprofile = archinstall.Profile(None, desktop)\n\t# Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.\n\twith profile.load_instructions(namespace=f\"{desktop}.py\") as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint(f\"Deprecated (??): {desktop} profile has no _prep_function() anymore\")\n\nif __name__ == 'desktop':\n\t\"\"\"\n\tThis \"profile\" is a meta-profile.\n\tThere are no desktop-specific steps, it simply routes\n\tthe installer to whichever desktop environment/window manager was chosen.\n\n\tMaybe in the future, a network manager or similar things *could* be added here.\n\tWe should honor that Arch Linux does not officially endorse a desktop-setup, nor is\n\tit trying to be a turn-key desktop distribution.\n\n\tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n\tthis is therefore just a helper to get started\n\t\"\"\"\n\n\t# TODO: Remove magic variable 'installation' and place it\n\t# in archinstall.storage or archinstall.session/archinstall.installation\n\tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n", "path": "profiles/desktop.py"}, {"content": "# A desktop environment using \"Awesome\" window manager.\n\nimport archinstall\n\nis_top_level_profile = False\n\n# New way of defining packages for a profile, which is iterable and can be used out side\n# of the profile to get a list of \"what packages will be installed\".\n__packages__ = ['nano', 'nemo', 'gpicview-gtk3', 'openssh', 'sshfs', 'htop', 'scrot', 'wget']\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# Awesome WM requires that xorg is installed\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"awesome\", \"/somewhere/awesome.py\")\n# or through conventional import awesome\nif __name__ == 'awesome':\n\t# Install the application awesome from the template under /applications/\n\tawesome = archinstall.Application(installation, 'awesome')\n\tawesome.install()\n\n\t# Then setup and configure the desktop environment: awesome\n\teditor = \"nano\"\n\tfilebrowser = \"nemo gpicview-gtk3\"\n\tutils = \"openssh sshfs htop scrot wget\"\n\n\n\tinstallation.add_additional_packages(f\"{utils} {filebrowser} {editor}\")\n\n\talacritty = archinstall.Application(installation, 'alacritty')\n\talacritty.install()\n\n\t# TODO: Copy a full configuration to ~/.config/awesome/rc.lua instead.\n\twith open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'r') as fh:\n\t\tawesome_lua = fh.read()\n\n\t## Replace xterm with alacritty for a smoother experience.\n\tawesome_lua = awesome_lua.replace('\"xterm\"', '\"alacritty\"')\n\n\twith open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'w') as fh:\n\t\tfh.write(awesome_lua)\n\n\t## TODO: Configure the right-click-menu to contain the above packages that were installed. (as a user config)\n\t\n\t## Remove some interfering nemo settings\n\tinstallation.arch_chroot(\"gsettings set org.nemo.desktop show-desktop-icons false\")\n\tinstallation.arch_chroot(\"xdg-mime default nemo.desktop inode/directory application/x-gnome-saved-search\")\n", "path": "profiles/awesome.py"}]} | 1,873 | 640 |
gh_patches_debug_3276 | rasdani/github-patches | git_diff | scikit-hep__pyhf-362 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lock Tensorflow to 1.11.0 release until TensorFlow probability has caught up
# Description
[TensorFlow 1.12.0 has been released](https://github.com/tensorflow/tensorflow/releases/tag/v1.12.0) and it has breaking changes. Most notably
> Remove `tf.contrib.linalg`. `tf.linalg` should be used instead.
This doesn't affect us, but it does affect [TensorFlow Probability `v0.3.0`, which breaks](https://travis-ci.org/diana-hep/pyhf/jobs/451151767#L668-L685):
```
ImportError while loading conftest '/home/travis/build/diana-hep/pyhf/tests/conftest.py'.
tests/conftest.py:46: in <module>
(pyhf.tensor.tensorflow_backend(session=tf.Session()), None)
pyhf/tensor/__init__.py:28: in __getattr__
from .tensorflow_backend import tensorflow_backend
pyhf/tensor/tensorflow_backend.py:3: in <module>
import tensorflow_probability as tfp
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/__init__.py:21: in <module>
from tensorflow_probability.python import * # pylint: disable=wildcard-import
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/__init__.py:22: in <module>
from tensorflow_probability.python import distributions
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/distributions/__init__.py:44: in <module>
from tensorflow_probability.python.distributions.linear_gaussian_ssm import LinearGaussianStateSpaceModel
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/distributions/linear_gaussian_ssm.py:34: in <module>
tfl = tf.contrib.linalg
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow/python/util/lazy_loader.py:54: in __getattr__
return getattr(module, item)
E AttributeError: module 'tensorflow.contrib' has no attribute 'linalg'
```
Until `tfp` updates to using `v1.12` we'll have to lock to them.
## Related Issues
- Issue #330
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4 from os import path
5 import sys
6
7 this_directory = path.abspath(path.dirname(__file__))
8 if sys.version_info.major < 3:
9 from io import open
10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
11 long_description = readme_md.read()
12
13 extras_require = {
14 'tensorflow': [
15 'tensorflow>=1.10.0',
16 'tensorflow-probability==0.3.0',
17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
18 'setuptools<=39.1.0',
19 ],
20 'torch': ['torch>=0.4.0'],
21 'mxnet': [
22 'mxnet>=1.0.0',
23 'requests<2.19.0,>=2.18.4',
24 'numpy<1.15.0,>=1.8.2',
25 'requests<2.19.0,>=2.18.4',
26 ],
27 # 'dask': [
28 # 'dask[array]'
29 # ],
30 'xmlimport': ['uproot'],
31 'minuit': ['iminuit'],
32 'develop': [
33 'pyflakes',
34 'pytest>=3.5.1',
35 'pytest-cov>=2.5.1',
36 'pytest-benchmark[histogram]',
37 'pytest-console-scripts',
38 'python-coveralls',
39 'coverage>=4.0', # coveralls
40 'matplotlib',
41 'jupyter',
42 'nbdime',
43 'uproot>=3.0.0',
44 'papermill',
45 'graphviz',
46 'bumpversion',
47 'sphinx',
48 'sphinxcontrib-bibtex',
49 'sphinxcontrib-napoleon',
50 'sphinx_rtd_theme',
51 'nbsphinx',
52 'sphinx-issues',
53 'm2r',
54 'jsonpatch',
55 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
56 'pre-commit',
57 'black;python_version>="3.6"', # Black is Python3 only
58 'twine',
59 ],
60 }
61 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
62
63 setup(
64 name='pyhf',
65 version='0.0.15',
66 description='(partial) pure python histfactory implementation',
67 long_description=long_description,
68 long_description_content_type='text/markdown',
69 url='https://github.com/diana-hep/pyhf',
70 author='Lukas Heinrich',
71 author_email='[email protected]',
72 license='Apache',
73 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',
74 classifiers=[
75 "Programming Language :: Python :: 2",
76 "Programming Language :: Python :: 2.7",
77 "Programming Language :: Python :: 3",
78 "Programming Language :: Python :: 3.6",
79 ],
80 packages=find_packages(),
81 include_package_data=True,
82 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
83 install_requires=[
84 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet
85 'click>=6.0', # for console scripts,
86 'tqdm', # for readxml
87 'six', # for modifiers
88 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
89 'jsonpatch',
90 ],
91 extras_require=extras_require,
92 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
93 dependency_links=[],
94 )
95
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
extras_require = {
'tensorflow': [
- 'tensorflow>=1.10.0',
+ 'tensorflow<1.12.0,>=1.10.0',
'tensorflow-probability==0.3.0',
'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
'setuptools<=39.1.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n \n extras_require = {\n 'tensorflow': [\n- 'tensorflow>=1.10.0',\n+ 'tensorflow<1.12.0,>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n", "issue": "Lock Tensorflow to 1.11.0 release until TensorFlow probability has caught up\n# Description\r\n\r\n[TensorFlow 1.12.0 has been released](https://github.com/tensorflow/tensorflow/releases/tag/v1.12.0) and it has breaking changes. Most notably\r\n\r\n> Remove `tf.contrib.linalg`. `tf.linalg` should be used instead. \r\n\r\nThis doesn't affect us, but it does affect [TensorFlow Probability `v0.3.0`, which breaks](https://travis-ci.org/diana-hep/pyhf/jobs/451151767#L668-L685):\r\n\r\n```\r\nImportError while loading conftest '/home/travis/build/diana-hep/pyhf/tests/conftest.py'.\r\ntests/conftest.py:46: in <module>\r\n (pyhf.tensor.tensorflow_backend(session=tf.Session()), None)\r\npyhf/tensor/__init__.py:28: in __getattr__\r\n from .tensorflow_backend import tensorflow_backend\r\npyhf/tensor/tensorflow_backend.py:3: in <module>\r\n import tensorflow_probability as tfp\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/__init__.py:21: in <module>\r\n from tensorflow_probability.python import * # pylint: disable=wildcard-import\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/__init__.py:22: in <module>\r\n from tensorflow_probability.python import distributions\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/distributions/__init__.py:44: in <module>\r\n from tensorflow_probability.python.distributions.linear_gaussian_ssm import LinearGaussianStateSpaceModel\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/distributions/linear_gaussian_ssm.py:34: in <module>\r\n tfl = tf.contrib.linalg\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow/python/util/lazy_loader.py:54: in __getattr__\r\n return getattr(module, item)\r\nE AttributeError: module 'tensorflow.contrib' has no attribute 'linalg'\r\n```\r\n\r\nUntil `tfp` updates to using `v1.12` we'll have to lock to them.\r\n\r\n## Related Issues\r\n\r\n- Issue #330 \r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]} | 2,186 | 144 |
gh_patches_debug_6465 | rasdani/github-patches | git_diff | feast-dev__feast-3766 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feast ui cannot parse url path
## Expected Behavior
One of example cases:
When user navigate localhost:8888/p/order_count_project/feature-view/user_3_and_7_days_order_count should see related feature-view page
## Current Behavior
One of example cases:
When user navigate localhost:8888/p/order_count_project/feature-view/user_3_and_7_days_order_count see "Internal Server Error"
## Steps to reproduce
install feast 0.34.1
run feast ui
navigate homepage localhost:8888
navigate any page (entities or feature-view or data sources doesn't matter)
you will see the page you clicked at browser search bar like http://localhost:8888/p/order_count_project/data-source
then refresh or copy url open in new tab
you will see internal server error
### Specifications
- Version: 0.34.1
- Platform: macos
- Subsystem:
## Possible Solution
ui_server.py file updated recently. commit changes resource finder library and then it returns PosixPath.
We should convert to str and add little "/" to "@app.api_route("/p/{path_name:path}", methods=["GET"])" function
</issue>
<code>
[start of sdk/python/feast/ui_server.py]
1 import json
2 import threading
3 from typing import Callable, Optional
4
5 import importlib_resources
6 import uvicorn
7 from fastapi import FastAPI, Response
8 from fastapi.middleware.cors import CORSMiddleware
9 from fastapi.staticfiles import StaticFiles
10
11 import feast
12
13
14 def get_app(
15 store: "feast.FeatureStore",
16 project_id: str,
17 registry_ttl_secs: int,
18 root_path: str = "",
19 ):
20 app = FastAPI()
21
22 app.add_middleware(
23 CORSMiddleware,
24 allow_origins=["*"],
25 allow_credentials=True,
26 allow_methods=["*"],
27 allow_headers=["*"],
28 )
29
30 # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down
31 registry_proto = None
32 shutting_down = False
33 active_timer: Optional[threading.Timer] = None
34
35 def async_refresh():
36 store.refresh_registry()
37 nonlocal registry_proto
38 registry_proto = store.registry.proto()
39 if shutting_down:
40 return
41 nonlocal active_timer
42 active_timer = threading.Timer(registry_ttl_secs, async_refresh)
43 active_timer.start()
44
45 @app.on_event("shutdown")
46 def shutdown_event():
47 nonlocal shutting_down
48 shutting_down = True
49 if active_timer:
50 active_timer.cancel()
51
52 async_refresh()
53
54 ui_dir_ref = importlib_resources.files(__name__) / "ui/build/"
55 with importlib_resources.as_file(ui_dir_ref) as ui_dir:
56 # Initialize with the projects-list.json file
57 with ui_dir.joinpath("projects-list.json").open(mode="w") as f:
58 projects_dict = {
59 "projects": [
60 {
61 "name": "Project",
62 "description": "Test project",
63 "id": project_id,
64 "registryPath": f"{root_path}/registry",
65 }
66 ]
67 }
68 f.write(json.dumps(projects_dict))
69
70 @app.get("/registry")
71 def read_registry():
72 return Response(
73 content=registry_proto.SerializeToString(),
74 media_type="application/octet-stream",
75 )
76
77 # For all other paths (such as paths that would otherwise be handled by react router), pass to React
78 @app.api_route("/p/{path_name:path}", methods=["GET"])
79 def catch_all():
80 filename = ui_dir + "index.html"
81
82 with open(filename) as f:
83 content = f.read()
84
85 return Response(content, media_type="text/html")
86
87 app.mount(
88 "/",
89 StaticFiles(directory=ui_dir, html=True),
90 name="site",
91 )
92
93 return app
94
95
96 def start_server(
97 store: "feast.FeatureStore",
98 host: str,
99 port: int,
100 get_registry_dump: Callable,
101 project_id: str,
102 registry_ttl_sec: int,
103 root_path: str = "",
104 ):
105 app = get_app(
106 store,
107 project_id,
108 registry_ttl_sec,
109 root_path,
110 )
111 uvicorn.run(app, host=host, port=port)
112
[end of sdk/python/feast/ui_server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py
--- a/sdk/python/feast/ui_server.py
+++ b/sdk/python/feast/ui_server.py
@@ -77,7 +77,7 @@
# For all other paths (such as paths that would otherwise be handled by react router), pass to React
@app.api_route("/p/{path_name:path}", methods=["GET"])
def catch_all():
- filename = ui_dir + "index.html"
+ filename = ui_dir.joinpath("index.html")
with open(filename) as f:
content = f.read()
| {"golden_diff": "diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py\n--- a/sdk/python/feast/ui_server.py\n+++ b/sdk/python/feast/ui_server.py\n@@ -77,7 +77,7 @@\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n- filename = ui_dir + \"index.html\"\n+ filename = ui_dir.joinpath(\"index.html\")\n \n with open(filename) as f:\n content = f.read()\n", "issue": "Feast ui cannot parse url path\n## Expected Behavior \r\n\r\nOne of example cases:\r\nWhen user navigate localhost:8888/p/order_count_project/feature-view/user_3_and_7_days_order_count should see related feature-view page\r\n\r\n## Current Behavior\r\n\r\nOne of example cases:\r\nWhen user navigate localhost:8888/p/order_count_project/feature-view/user_3_and_7_days_order_count see \"Internal Server Error\"\r\n\r\n## Steps to reproduce\r\n\r\ninstall feast 0.34.1\r\nrun feast ui\r\nnavigate homepage localhost:8888\r\nnavigate any page (entities or feature-view or data sources doesn't matter)\r\nyou will see the page you clicked at browser search bar like http://localhost:8888/p/order_count_project/data-source \r\nthen refresh or copy url open in new tab\r\nyou will see internal server error\r\n\r\n### Specifications\r\n\r\n- Version: 0.34.1\r\n- Platform: macos\r\n- Subsystem: \r\n\r\n## Possible Solution\r\n\r\nui_server.py file updated recently. commit changes resource finder library and then it returns PosixPath. \r\nWe should convert to str and add little \"/\" to \"@app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\" function\r\n\r\n\n", "before_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport importlib_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n project_id: str,\n registry_ttl_secs: int,\n root_path: str = \"\",\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_proto = None\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_proto\n registry_proto = store.registry.proto()\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir_ref = importlib_resources.files(__name__) / \"ui/build/\"\n with importlib_resources.as_file(ui_dir_ref) as ui_dir:\n # Initialize with the projects-list.json file\n with ui_dir.joinpath(\"projects-list.json\").open(mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": f\"{root_path}/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return Response(\n content=registry_proto.SerializeToString(),\n media_type=\"application/octet-stream\",\n )\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir + \"index.html\"\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\",\n StaticFiles(directory=ui_dir, html=True),\n name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n root_path: str = \"\",\n):\n app = get_app(\n store,\n project_id,\n registry_ttl_sec,\n root_path,\n )\n uvicorn.run(app, host=host, port=port)\n", "path": "sdk/python/feast/ui_server.py"}]} | 1,669 | 138 |
gh_patches_debug_26525 | rasdani/github-patches | git_diff | freedomofpress__securedrop-6485 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`map_locale_display_names()` does not set `use_display_name` for the first of multiple locales for a language
## Description
For languages like `pt` and `zh` for which we now have multiple translated locales, `map_locale_display_names()` sets `use_display_name` for only the second and following locales. Prior to an overzealous refactoring in #6406, it would be set for all locales for a multi-locale language.
## Steps to Reproduce
Reported by @deeplow in <https://forum.securedrop.org/t/can-pt-br-become-portugues-brasil-instead-of-just-portugues/1455>. *Test cases TK.*
## Expected Behavior
* `pt_BR` = `Português (Brasil)`
* `pt_PT` = `Português (Portugal)`
## Actual Behavior
* `pt_BR` = `Português`
* `pt_PT` = `Português (Portugal)`
</issue>
<code>
[start of securedrop/i18n.py]
1 #
2 # SecureDrop whistleblower submission system
3 # Copyright (C) 2017 Loic Dachary <[email protected]>
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Affero General Public License for more details.
14 #
15 # You should have received a copy of the GNU Affero General Public License
16 # along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #
18 import collections
19 from typing import List, Set
20
21 from babel.core import (
22 Locale,
23 UnknownLocaleError,
24 get_locale_identifier,
25 negotiate_locale,
26 parse_locale,
27 )
28 from flask import Flask, g, request, session
29 from flask_babel import Babel
30 from sdconfig import FALLBACK_LOCALE, SDConfig
31
32
33 class RequestLocaleInfo:
34 """
35 Convenience wrapper around a babel.core.Locale.
36 """
37
38 def __init__(self, locale: str):
39 self.locale = Locale.parse(locale)
40
41 # This attribute can be set to `True` to differentiate multiple
42 # locales currently available (supported) for the same language.
43 self.use_display_name = False
44
45 def __str__(self) -> str:
46 """
47 The Babel string representation of the locale.
48 """
49 return str(self.locale)
50
51 @property
52 def display_name(self) -> str:
53 """
54 Give callers (i.e., templates) the `Locale` object's display name when
55 such resolution is warranted, otherwise the language name---as
56 determined by `map_locale_display_names()`.
57 """
58 if self.use_display_name:
59 return self.locale.display_name
60 return self.locale.language_name
61
62 @property
63 def text_direction(self) -> str:
64 """
65 The Babel text direction: ltr or rtl.
66
67 Used primarily to set text direction in HTML via the "dir"
68 attribute.
69 """
70 return self.locale.text_direction
71
72 @property
73 def language(self) -> str:
74 """
75 The Babel language name.
76
77 Just the language, without subtag info like region or script.
78 """
79 return self.locale.language
80
81 @property
82 def id(self) -> str:
83 """
84 The Babel string representation of the locale.
85
86 This should match the name of the directory containing its
87 translations.
88 """
89 return str(self.locale)
90
91 @property
92 def language_tag(self) -> str:
93 """
94 Returns a BCP47/RFC5646 language tag for the locale.
95
96 Language tags are used in HTTP headers and the HTML lang
97 attribute.
98 """
99 return get_locale_identifier(parse_locale(str(self.locale)), sep="-")
100
101
102 def configure_babel(config: SDConfig, app: Flask) -> Babel:
103 """
104 Set up Flask-Babel according to the SecureDrop configuration.
105 """
106 # Tell Babel where to find our translations.
107 translations_directory = str(config.TRANSLATION_DIRS.absolute())
108 app.config["BABEL_TRANSLATION_DIRECTORIES"] = translations_directory
109
110 # Create the app's Babel instance. Passing the app to the
111 # constructor causes the instance to attach itself to the app.
112 babel = Babel(app)
113
114 # verify that Babel is only using the translations we told it about
115 if list(babel.translation_directories) != [translations_directory]:
116 raise ValueError(
117 "Babel translation directories ({}) do not match SecureDrop configuration ({})".format(
118 babel.translation_directories, [translations_directory]
119 )
120 )
121
122 # register the function used to determine the locale of a request
123 babel.localeselector(lambda: get_locale(config))
124 return babel
125
126
127 def parse_locale_set(codes: List[str]) -> Set[Locale]:
128 return {Locale.parse(code) for code in codes}
129
130
131 def validate_locale_configuration(config: SDConfig, babel: Babel) -> None:
132 """
133 Check that configured locales are available in the filesystem and therefore usable by
134 Babel. Warn about configured locales that are not usable, unless we're left with
135 no usable default or fallback locale, in which case raise an exception.
136 """
137 # These locales are available and loadable from the filesystem.
138 available = set(babel.list_translations())
139 available.add(Locale.parse(FALLBACK_LOCALE))
140
141 # These locales were configured via "securedrop-admin sdconfig", meaning
142 # they were present on the Admin Workstation at "securedrop-admin" runtime.
143 configured = parse_locale_set(config.SUPPORTED_LOCALES)
144
145 # The intersection of these sets is the set of locales usable by Babel.
146 usable = available & configured
147
148 missing = configured - usable
149 if missing:
150 babel.app.logger.error(
151 f"Configured locales {missing} are not in the set of usable locales {usable}"
152 )
153
154 defaults = parse_locale_set([config.DEFAULT_LOCALE, FALLBACK_LOCALE])
155 if not defaults & usable:
156 raise ValueError(
157 f"None of the default locales {defaults} are in the set of usable locales {usable}"
158 )
159
160 global USABLE_LOCALES
161 USABLE_LOCALES = usable
162
163
164 # TODO(#6420): avoid relying on and manipulating on this global state
165 LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]
166 USABLE_LOCALES = set() # type: Set[Locale]
167
168
169 def map_locale_display_names(config: SDConfig) -> None:
170 """
171 Create a map of locale identifiers to names for display.
172
173 For most of our supported languages, we only provide one
174 translation, so including the full display name is not necessary
175 to distinguish them. For languages with more than one translation,
176 like Chinese, we do need the additional detail.
177 """
178 seen: Set[str] = set()
179 locale_map = collections.OrderedDict()
180 for l in sorted(config.SUPPORTED_LOCALES):
181 if Locale.parse(l) not in USABLE_LOCALES:
182 continue
183
184 locale = RequestLocaleInfo(l)
185 if locale.language in seen:
186 # Disambiguate translations for this language.
187 locale.use_display_name = True
188 else:
189 seen.add(locale.language)
190
191 locale_map[str(locale)] = locale
192
193 global LOCALES
194 LOCALES = locale_map
195
196
197 def configure(config: SDConfig, app: Flask) -> None:
198 babel = configure_babel(config, app)
199 validate_locale_configuration(config, babel)
200 map_locale_display_names(config)
201
202
203 def get_locale(config: SDConfig) -> str:
204 """
205 Return the best supported locale for a request.
206
207 Get the locale as follows, by order of precedence:
208 - l request argument or session['locale']
209 - browser suggested locale, from the Accept-Languages header
210 - config.DEFAULT_LOCALE
211 - config.FALLBACK_LOCALE
212 """
213 preferences = []
214 if session.get("locale"):
215 preferences.append(session.get("locale"))
216 if request.args.get("l"):
217 preferences.insert(0, request.args.get("l"))
218 if not preferences:
219 preferences.extend(get_accepted_languages())
220 preferences.append(config.DEFAULT_LOCALE)
221 preferences.append(FALLBACK_LOCALE)
222
223 negotiated = negotiate_locale(preferences, LOCALES.keys())
224
225 if not negotiated:
226 raise ValueError("No usable locale")
227
228 return negotiated
229
230
231 def get_accepted_languages() -> List[str]:
232 """
233 Convert a request's list of accepted languages into locale identifiers.
234 """
235 accept_languages = []
236 for l in request.accept_languages.values():
237 try:
238 parsed = Locale.parse(l, "-")
239 accept_languages.append(str(parsed))
240
241 # We only have two Chinese translations, simplified
242 # and traditional, based on script and not
243 # region. Browsers tend to send identifiers with
244 # region, e.g. zh-CN or zh-TW. Babel can generally
245 # infer the script from those, so we can fabricate a
246 # fallback entry without region, in the hope that it
247 # will match one of our translations and the site will
248 # at least be more legible at first contact than the
249 # probable default locale of English.
250 if parsed.language == "zh" and parsed.script:
251 accept_languages.append(str(Locale(language=parsed.language, script=parsed.script)))
252 except (ValueError, UnknownLocaleError):
253 pass
254 return accept_languages
255
256
257 def set_locale(config: SDConfig) -> None:
258 """
259 Update locale info in request and session.
260 """
261 locale = get_locale(config)
262 g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot
263 session["locale"] = locale
264 g.locales = LOCALES # pylint: disable=assigning-non-slot
265
[end of securedrop/i18n.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/i18n.py b/securedrop/i18n.py
--- a/securedrop/i18n.py
+++ b/securedrop/i18n.py
@@ -16,7 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import collections
-from typing import List, Set
+from typing import Dict, List, Set
from babel.core import (
Locale,
@@ -175,18 +175,21 @@
to distinguish them. For languages with more than one translation,
like Chinese, we do need the additional detail.
"""
- seen: Set[str] = set()
+
+ language_locale_counts = collections.defaultdict(int) # type: Dict[str, int]
+ for l in sorted(config.SUPPORTED_LOCALES):
+ locale = RequestLocaleInfo(l)
+ language_locale_counts[locale.language] += 1
+
locale_map = collections.OrderedDict()
for l in sorted(config.SUPPORTED_LOCALES):
if Locale.parse(l) not in USABLE_LOCALES:
continue
locale = RequestLocaleInfo(l)
- if locale.language in seen:
+ if language_locale_counts[locale.language] > 1:
# Disambiguate translations for this language.
locale.use_display_name = True
- else:
- seen.add(locale.language)
locale_map[str(locale)] = locale
| {"golden_diff": "diff --git a/securedrop/i18n.py b/securedrop/i18n.py\n--- a/securedrop/i18n.py\n+++ b/securedrop/i18n.py\n@@ -16,7 +16,7 @@\n # along with this program. If not, see <http://www.gnu.org/licenses/>.\n #\n import collections\n-from typing import List, Set\n+from typing import Dict, List, Set\n \n from babel.core import (\n Locale,\n@@ -175,18 +175,21 @@\n to distinguish them. For languages with more than one translation,\n like Chinese, we do need the additional detail.\n \"\"\"\n- seen: Set[str] = set()\n+\n+ language_locale_counts = collections.defaultdict(int) # type: Dict[str, int]\n+ for l in sorted(config.SUPPORTED_LOCALES):\n+ locale = RequestLocaleInfo(l)\n+ language_locale_counts[locale.language] += 1\n+\n locale_map = collections.OrderedDict()\n for l in sorted(config.SUPPORTED_LOCALES):\n if Locale.parse(l) not in USABLE_LOCALES:\n continue\n \n locale = RequestLocaleInfo(l)\n- if locale.language in seen:\n+ if language_locale_counts[locale.language] > 1:\n # Disambiguate translations for this language.\n locale.use_display_name = True\n- else:\n- seen.add(locale.language)\n \n locale_map[str(locale)] = locale\n", "issue": "`map_locale_display_names()` does not set `use_display_name` for the first of multiple locales for a language\n## Description\r\n\r\nFor languages like `pt` and `zh` for which we now have multiple translated locales, `map_locale_display_names()` sets `use_display_name` for only the second and following locales. Prior to an overzealous refactoring in #6406, it would be set for all locales for a multi-locale language.\r\n\r\n## Steps to Reproduce\r\n\r\nReported by @deeplow in <https://forum.securedrop.org/t/can-pt-br-become-portugues-brasil-instead-of-just-portugues/1455>. *Test cases TK.*\r\n\r\n## Expected Behavior\r\n\r\n* `pt_BR` = `Portugu\u00eas (Brasil)`\r\n* `pt_PT` = `Portugu\u00eas (Portugal)`\r\n\r\n## Actual Behavior\r\n\r\n* `pt_BR` = `Portugu\u00eas`\r\n* `pt_PT` = `Portugu\u00eas (Portugal)`\r\n\n", "before_files": [{"content": "#\n# SecureDrop whistleblower submission system\n# Copyright (C) 2017 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nimport collections\nfrom typing import List, Set\n\nfrom babel.core import (\n Locale,\n UnknownLocaleError,\n get_locale_identifier,\n negotiate_locale,\n parse_locale,\n)\nfrom flask import Flask, g, request, session\nfrom flask_babel import Babel\nfrom sdconfig import FALLBACK_LOCALE, SDConfig\n\n\nclass RequestLocaleInfo:\n \"\"\"\n Convenience wrapper around a babel.core.Locale.\n \"\"\"\n\n def __init__(self, locale: str):\n self.locale = Locale.parse(locale)\n\n # This attribute can be set to `True` to differentiate multiple\n # locales currently available (supported) for the same language.\n self.use_display_name = False\n\n def __str__(self) -> str:\n \"\"\"\n The Babel string representation of the locale.\n \"\"\"\n return str(self.locale)\n\n @property\n def display_name(self) -> str:\n \"\"\"\n Give callers (i.e., templates) the `Locale` object's display name when\n such resolution is warranted, otherwise the language name---as\n determined by `map_locale_display_names()`.\n \"\"\"\n if self.use_display_name:\n return self.locale.display_name\n return self.locale.language_name\n\n @property\n def text_direction(self) -> str:\n \"\"\"\n The Babel text direction: ltr or rtl.\n\n Used primarily to set text direction in HTML via the \"dir\"\n attribute.\n \"\"\"\n return self.locale.text_direction\n\n @property\n def language(self) -> str:\n \"\"\"\n The Babel language name.\n\n Just the language, without subtag info like region or script.\n \"\"\"\n return self.locale.language\n\n @property\n def id(self) -> str:\n \"\"\"\n The Babel string representation of the locale.\n\n This should match the name of the directory containing its\n translations.\n \"\"\"\n return str(self.locale)\n\n @property\n def language_tag(self) -> str:\n \"\"\"\n Returns a BCP47/RFC5646 language tag for the locale.\n\n Language tags are used in HTTP headers and the HTML lang\n attribute.\n \"\"\"\n return get_locale_identifier(parse_locale(str(self.locale)), sep=\"-\")\n\n\ndef configure_babel(config: SDConfig, app: Flask) -> Babel:\n \"\"\"\n Set up Flask-Babel according to the SecureDrop configuration.\n \"\"\"\n # Tell Babel where to find our translations.\n translations_directory = str(config.TRANSLATION_DIRS.absolute())\n app.config[\"BABEL_TRANSLATION_DIRECTORIES\"] = translations_directory\n\n # Create the app's Babel instance. Passing the app to the\n # constructor causes the instance to attach itself to the app.\n babel = Babel(app)\n\n # verify that Babel is only using the translations we told it about\n if list(babel.translation_directories) != [translations_directory]:\n raise ValueError(\n \"Babel translation directories ({}) do not match SecureDrop configuration ({})\".format(\n babel.translation_directories, [translations_directory]\n )\n )\n\n # register the function used to determine the locale of a request\n babel.localeselector(lambda: get_locale(config))\n return babel\n\n\ndef parse_locale_set(codes: List[str]) -> Set[Locale]:\n return {Locale.parse(code) for code in codes}\n\n\ndef validate_locale_configuration(config: SDConfig, babel: Babel) -> None:\n \"\"\"\n Check that configured locales are available in the filesystem and therefore usable by\n Babel. Warn about configured locales that are not usable, unless we're left with\n no usable default or fallback locale, in which case raise an exception.\n \"\"\"\n # These locales are available and loadable from the filesystem.\n available = set(babel.list_translations())\n available.add(Locale.parse(FALLBACK_LOCALE))\n\n # These locales were configured via \"securedrop-admin sdconfig\", meaning\n # they were present on the Admin Workstation at \"securedrop-admin\" runtime.\n configured = parse_locale_set(config.SUPPORTED_LOCALES)\n\n # The intersection of these sets is the set of locales usable by Babel.\n usable = available & configured\n\n missing = configured - usable\n if missing:\n babel.app.logger.error(\n f\"Configured locales {missing} are not in the set of usable locales {usable}\"\n )\n\n defaults = parse_locale_set([config.DEFAULT_LOCALE, FALLBACK_LOCALE])\n if not defaults & usable:\n raise ValueError(\n f\"None of the default locales {defaults} are in the set of usable locales {usable}\"\n )\n\n global USABLE_LOCALES\n USABLE_LOCALES = usable\n\n\n# TODO(#6420): avoid relying on and manipulating on this global state\nLOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]\nUSABLE_LOCALES = set() # type: Set[Locale]\n\n\ndef map_locale_display_names(config: SDConfig) -> None:\n \"\"\"\n Create a map of locale identifiers to names for display.\n\n For most of our supported languages, we only provide one\n translation, so including the full display name is not necessary\n to distinguish them. For languages with more than one translation,\n like Chinese, we do need the additional detail.\n \"\"\"\n seen: Set[str] = set()\n locale_map = collections.OrderedDict()\n for l in sorted(config.SUPPORTED_LOCALES):\n if Locale.parse(l) not in USABLE_LOCALES:\n continue\n\n locale = RequestLocaleInfo(l)\n if locale.language in seen:\n # Disambiguate translations for this language.\n locale.use_display_name = True\n else:\n seen.add(locale.language)\n\n locale_map[str(locale)] = locale\n\n global LOCALES\n LOCALES = locale_map\n\n\ndef configure(config: SDConfig, app: Flask) -> None:\n babel = configure_babel(config, app)\n validate_locale_configuration(config, babel)\n map_locale_display_names(config)\n\n\ndef get_locale(config: SDConfig) -> str:\n \"\"\"\n Return the best supported locale for a request.\n\n Get the locale as follows, by order of precedence:\n - l request argument or session['locale']\n - browser suggested locale, from the Accept-Languages header\n - config.DEFAULT_LOCALE\n - config.FALLBACK_LOCALE\n \"\"\"\n preferences = []\n if session.get(\"locale\"):\n preferences.append(session.get(\"locale\"))\n if request.args.get(\"l\"):\n preferences.insert(0, request.args.get(\"l\"))\n if not preferences:\n preferences.extend(get_accepted_languages())\n preferences.append(config.DEFAULT_LOCALE)\n preferences.append(FALLBACK_LOCALE)\n\n negotiated = negotiate_locale(preferences, LOCALES.keys())\n\n if not negotiated:\n raise ValueError(\"No usable locale\")\n\n return negotiated\n\n\ndef get_accepted_languages() -> List[str]:\n \"\"\"\n Convert a request's list of accepted languages into locale identifiers.\n \"\"\"\n accept_languages = []\n for l in request.accept_languages.values():\n try:\n parsed = Locale.parse(l, \"-\")\n accept_languages.append(str(parsed))\n\n # We only have two Chinese translations, simplified\n # and traditional, based on script and not\n # region. Browsers tend to send identifiers with\n # region, e.g. zh-CN or zh-TW. Babel can generally\n # infer the script from those, so we can fabricate a\n # fallback entry without region, in the hope that it\n # will match one of our translations and the site will\n # at least be more legible at first contact than the\n # probable default locale of English.\n if parsed.language == \"zh\" and parsed.script:\n accept_languages.append(str(Locale(language=parsed.language, script=parsed.script)))\n except (ValueError, UnknownLocaleError):\n pass\n return accept_languages\n\n\ndef set_locale(config: SDConfig) -> None:\n \"\"\"\n Update locale info in request and session.\n \"\"\"\n locale = get_locale(config)\n g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\n session[\"locale\"] = locale\n g.locales = LOCALES # pylint: disable=assigning-non-slot\n", "path": "securedrop/i18n.py"}]} | 3,403 | 321 |
gh_patches_debug_37912 | rasdani/github-patches | git_diff | tournesol-app__tournesol-155 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Count ratings appropriately
If a contributor rates A versus B on 9 quality criteria, this should count as 9 ratings.
The home page statistics should reflect this, on not the number of times a contributor rated A versus B :)
</issue>
<code>
[start of backend/backend/api_v2/statistics.py]
1 from backend.models import ExpertRating, Video, UserInformation
2 from drf_spectacular.utils import extend_schema
3 from rest_framework import serializers
4 from rest_framework import viewsets
5 from rest_framework.decorators import action
6 from rest_framework.permissions import IsAuthenticatedOrReadOnly
7 from rest_framework.response import Response
8 from backend.rating_fields import VIDEO_FIELDS
9 from django.db.models import Min, Max, F, Q
10 from backend.api_v2.helpers import WithPKOverflowProtection
11 import datetime
12 from django.utils.timezone import make_aware
13
14
15 class StatisticsSerializerV2(serializers.Serializer):
16 """Serialize statistics for the website."""
17 certified_experts = serializers.IntegerField(
18 help_text="Number of experts with certified e-mails")
19 total_experts = serializers.IntegerField(
20 help_text="Number of all experts")
21 pairwise_comparisons = serializers.IntegerField(
22 help_text="Total number of pairwise comparisons")
23 videos = serializers.IntegerField(
24 help_text="Total number of videos in the database")
25 min_score = serializers.FloatField(
26 help_text="Minimal aggregated score over all videos and features")
27 max_score = serializers.FloatField(
28 help_text="Maximal aggregated score over all videos and features")
29 weekly_active_ratings = serializers.IntegerField(
30 help_text="Number of ratings added within a week")
31 n_rated_videos = serializers.IntegerField(
32 help_text="Total number of videos with ratings")
33
34
35 class StatisticsViewSetV2(viewsets.ViewSet, WithPKOverflowProtection):
36 """Show website statistics."""
37 serializer_class = StatisticsSerializerV2
38 permission_classes = [IsAuthenticatedOrReadOnly]
39
40 # need a list, otherwise router will not register this viewset
41 @extend_schema(exclude=True, responses={
42 200: StatisticsSerializerV2(
43 many=True),
44 400: None})
45 def list(self, request):
46 return Response({})
47
48 @extend_schema(
49 responses={
50 200: StatisticsSerializerV2(
51 many=False)},
52 operation_id="view")
53 @action(methods=['GET'], detail=False)
54 def view(self, request):
55 """Get statistics for the website."""
56 minmax_scores = \
57 Video.objects.aggregate(**{'max_' + f: Max(F(f)) for f in VIDEO_FIELDS},
58 **{'min_' + f: Min(F(f)) for f in VIDEO_FIELDS})
59
60 try:
61 min_score = min([v for k, v in minmax_scores.items() if k.startswith('min')])
62 max_score = max([v for k, v in minmax_scores.items() if k.startswith('max')])
63 except Exception:
64 min_score = 0.0
65 max_score = 0.0
66
67 date_week_ago = make_aware(datetime.datetime.now()) - datetime.timedelta(days=7)
68
69 data = {'certified_experts': UserInformation.
70 _annotate_is_certified(UserInformation.objects.all())
71 .filter(_is_certified=1, user__is_active=True).count(),
72 'pairwise_comparisons': ExpertRating.objects.all().count(),
73 'videos': Video.objects.all().count(),
74 'min_score': min_score,
75 'max_score': max_score,
76 'total_experts': UserInformation.objects.filter(is_demo=False).count(),
77 'weekly_active_ratings': ExpertRating.objects.filter(
78 datetime_lastedit__gte=date_week_ago).count(),
79 'n_rated_videos': Video.objects.exclude(Q(expertrating_video_1__id=None) &
80 Q(expertrating_video_2__id=None)
81 ).distinct().count()
82 }
83
84 return Response(StatisticsSerializerV2(data, many=False).data)
85
[end of backend/backend/api_v2/statistics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/backend/api_v2/statistics.py b/backend/backend/api_v2/statistics.py
--- a/backend/backend/api_v2/statistics.py
+++ b/backend/backend/api_v2/statistics.py
@@ -12,24 +12,35 @@
from django.utils.timezone import make_aware
-class StatisticsSerializerV2(serializers.Serializer):
- """Serialize statistics for the website."""
- certified_experts = serializers.IntegerField(
- help_text="Number of experts with certified e-mails")
- total_experts = serializers.IntegerField(
- help_text="Number of all experts")
- pairwise_comparisons = serializers.IntegerField(
- help_text="Total number of pairwise comparisons")
- videos = serializers.IntegerField(
- help_text="Total number of videos in the database")
- min_score = serializers.FloatField(
- help_text="Minimal aggregated score over all videos and features")
- max_score = serializers.FloatField(
- help_text="Maximal aggregated score over all videos and features")
- weekly_active_ratings = serializers.IntegerField(
- help_text="Number of ratings added within a week")
- n_rated_videos = serializers.IntegerField(
- help_text="Total number of videos with ratings")
+StatisticsSerializerV2 = type(
+ 'StatisticsSerializerV2', (serializers.Serializer,),
+ {**dict(
+ __doc__="""Serialize statistics for the website.""",
+ certified_experts=serializers.IntegerField(
+ help_text="Number of experts with certified e-mails"),
+ total_experts=serializers.IntegerField(
+ help_text="Number of all experts"),
+ pairwise_comparisons=serializers.IntegerField(
+ help_text="Total number of pairwise comparisons"),
+ videos=serializers.IntegerField(
+ help_text="Total number of videos in the database"),
+ min_score=serializers.FloatField(
+ help_text="Minimal aggregated score over all videos and features"),
+ max_score=serializers.FloatField(
+ help_text="Maximal aggregated score over all videos and features"),
+ weekly_active_ratings=serializers.IntegerField(
+ help_text="Number of ratings added within a week"),
+ n_rated_videos=serializers.IntegerField(
+ help_text="Total number of videos with ratings"),
+
+ n_sum_comparisons=serializers.IntegerField(
+ help_text="Sum of all numbers of comparisons for all features"),
+ ),
+ **{f"n_{f}_comparisons": serializers.IntegerField(
+ help_text=f"Number of comparisons for {f}")
+ for f in VIDEO_FIELDS}
+ }
+)
class StatisticsViewSetV2(viewsets.ViewSet, WithPKOverflowProtection):
@@ -81,4 +92,13 @@
).distinct().count()
}
+ n_sum_comparisons = 0
+ for f in VIDEO_FIELDS:
+ val = ExpertRating.objects.filter(**{
+ f + '__isnull': False, f + '_weight__gt': 0}).distinct().count()
+ data[f"n_{f}_comparisons"] = val
+ n_sum_comparisons += val
+
+ data["n_sum_comparisons"] = n_sum_comparisons
+
return Response(StatisticsSerializerV2(data, many=False).data)
| {"golden_diff": "diff --git a/backend/backend/api_v2/statistics.py b/backend/backend/api_v2/statistics.py\n--- a/backend/backend/api_v2/statistics.py\n+++ b/backend/backend/api_v2/statistics.py\n@@ -12,24 +12,35 @@\n from django.utils.timezone import make_aware\r\n \r\n \r\n-class StatisticsSerializerV2(serializers.Serializer):\r\n- \"\"\"Serialize statistics for the website.\"\"\"\r\n- certified_experts = serializers.IntegerField(\r\n- help_text=\"Number of experts with certified e-mails\")\r\n- total_experts = serializers.IntegerField(\r\n- help_text=\"Number of all experts\")\r\n- pairwise_comparisons = serializers.IntegerField(\r\n- help_text=\"Total number of pairwise comparisons\")\r\n- videos = serializers.IntegerField(\r\n- help_text=\"Total number of videos in the database\")\r\n- min_score = serializers.FloatField(\r\n- help_text=\"Minimal aggregated score over all videos and features\")\r\n- max_score = serializers.FloatField(\r\n- help_text=\"Maximal aggregated score over all videos and features\")\r\n- weekly_active_ratings = serializers.IntegerField(\r\n- help_text=\"Number of ratings added within a week\")\r\n- n_rated_videos = serializers.IntegerField(\r\n- help_text=\"Total number of videos with ratings\")\r\n+StatisticsSerializerV2 = type(\r\n+ 'StatisticsSerializerV2', (serializers.Serializer,),\r\n+ {**dict(\r\n+ __doc__=\"\"\"Serialize statistics for the website.\"\"\",\r\n+ certified_experts=serializers.IntegerField(\r\n+ help_text=\"Number of experts with certified e-mails\"),\r\n+ total_experts=serializers.IntegerField(\r\n+ help_text=\"Number of all experts\"),\r\n+ pairwise_comparisons=serializers.IntegerField(\r\n+ help_text=\"Total number of pairwise comparisons\"),\r\n+ videos=serializers.IntegerField(\r\n+ help_text=\"Total number of videos in the database\"),\r\n+ min_score=serializers.FloatField(\r\n+ help_text=\"Minimal aggregated score over all videos and features\"),\r\n+ max_score=serializers.FloatField(\r\n+ help_text=\"Maximal aggregated score over all videos and features\"),\r\n+ weekly_active_ratings=serializers.IntegerField(\r\n+ help_text=\"Number of ratings added within a week\"),\r\n+ n_rated_videos=serializers.IntegerField(\r\n+ help_text=\"Total number of videos with ratings\"),\r\n+\r\n+ n_sum_comparisons=serializers.IntegerField(\r\n+ help_text=\"Sum of all numbers of comparisons for all features\"),\r\n+ ),\r\n+ **{f\"n_{f}_comparisons\": serializers.IntegerField(\r\n+ help_text=f\"Number of comparisons for {f}\")\r\n+ for f in VIDEO_FIELDS}\r\n+ }\r\n+)\r\n \r\n \r\n class StatisticsViewSetV2(viewsets.ViewSet, WithPKOverflowProtection):\r\n@@ -81,4 +92,13 @@\n ).distinct().count()\r\n }\r\n \r\n+ n_sum_comparisons = 0\r\n+ for f in VIDEO_FIELDS:\r\n+ val = ExpertRating.objects.filter(**{\r\n+ f + '__isnull': False, f + '_weight__gt': 0}).distinct().count()\r\n+ data[f\"n_{f}_comparisons\"] = val\r\n+ n_sum_comparisons += val\r\n+\r\n+ data[\"n_sum_comparisons\"] = n_sum_comparisons\r\n+\r\n return Response(StatisticsSerializerV2(data, many=False).data)\n", "issue": "Count ratings appropriately\nIf a contributor rates A versus B on 9 quality criteria, this should count as 9 ratings.\r\nThe home page statistics should reflect this, on not the number of times a contributor rated A versus B :)\n", "before_files": [{"content": "from backend.models import ExpertRating, Video, UserInformation\r\nfrom drf_spectacular.utils import extend_schema\r\nfrom rest_framework import serializers\r\nfrom rest_framework import viewsets\r\nfrom rest_framework.decorators import action\r\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\r\nfrom rest_framework.response import Response\r\nfrom backend.rating_fields import VIDEO_FIELDS\r\nfrom django.db.models import Min, Max, F, Q\r\nfrom backend.api_v2.helpers import WithPKOverflowProtection\r\nimport datetime\r\nfrom django.utils.timezone import make_aware\r\n\r\n\r\nclass StatisticsSerializerV2(serializers.Serializer):\r\n \"\"\"Serialize statistics for the website.\"\"\"\r\n certified_experts = serializers.IntegerField(\r\n help_text=\"Number of experts with certified e-mails\")\r\n total_experts = serializers.IntegerField(\r\n help_text=\"Number of all experts\")\r\n pairwise_comparisons = serializers.IntegerField(\r\n help_text=\"Total number of pairwise comparisons\")\r\n videos = serializers.IntegerField(\r\n help_text=\"Total number of videos in the database\")\r\n min_score = serializers.FloatField(\r\n help_text=\"Minimal aggregated score over all videos and features\")\r\n max_score = serializers.FloatField(\r\n help_text=\"Maximal aggregated score over all videos and features\")\r\n weekly_active_ratings = serializers.IntegerField(\r\n help_text=\"Number of ratings added within a week\")\r\n n_rated_videos = serializers.IntegerField(\r\n help_text=\"Total number of videos with ratings\")\r\n\r\n\r\nclass StatisticsViewSetV2(viewsets.ViewSet, WithPKOverflowProtection):\r\n \"\"\"Show website statistics.\"\"\"\r\n serializer_class = StatisticsSerializerV2\r\n permission_classes = [IsAuthenticatedOrReadOnly]\r\n\r\n # need a list, otherwise router will not register this viewset\r\n @extend_schema(exclude=True, responses={\r\n 200: StatisticsSerializerV2(\r\n many=True),\r\n 400: None})\r\n def list(self, request):\r\n return Response({})\r\n\r\n @extend_schema(\r\n responses={\r\n 200: StatisticsSerializerV2(\r\n many=False)},\r\n operation_id=\"view\")\r\n @action(methods=['GET'], detail=False)\r\n def view(self, request):\r\n \"\"\"Get statistics for the website.\"\"\"\r\n minmax_scores = \\\r\n Video.objects.aggregate(**{'max_' + f: Max(F(f)) for f in VIDEO_FIELDS},\r\n **{'min_' + f: Min(F(f)) for f in VIDEO_FIELDS})\r\n\r\n try:\r\n min_score = min([v for k, v in minmax_scores.items() if k.startswith('min')])\r\n max_score = max([v for k, v in minmax_scores.items() if k.startswith('max')])\r\n except Exception:\r\n min_score = 0.0\r\n max_score = 0.0\r\n\r\n date_week_ago = make_aware(datetime.datetime.now()) - datetime.timedelta(days=7)\r\n\r\n data = {'certified_experts': UserInformation.\r\n _annotate_is_certified(UserInformation.objects.all())\r\n .filter(_is_certified=1, user__is_active=True).count(),\r\n 'pairwise_comparisons': ExpertRating.objects.all().count(),\r\n 'videos': Video.objects.all().count(),\r\n 'min_score': min_score,\r\n 'max_score': max_score,\r\n 'total_experts': UserInformation.objects.filter(is_demo=False).count(),\r\n 'weekly_active_ratings': ExpertRating.objects.filter(\r\n datetime_lastedit__gte=date_week_ago).count(),\r\n 'n_rated_videos': Video.objects.exclude(Q(expertrating_video_1__id=None) &\r\n Q(expertrating_video_2__id=None)\r\n ).distinct().count()\r\n }\r\n\r\n return Response(StatisticsSerializerV2(data, many=False).data)\r\n", "path": "backend/backend/api_v2/statistics.py"}]} | 1,515 | 705 |
gh_patches_debug_22007 | rasdani/github-patches | git_diff | CTFd__CTFd-2074 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cascading Hints
Hints should have a sense of unlocking where one hint cannot be used until a previous one or others are used.
</issue>
<code>
[start of CTFd/api/v1/hints.py]
1 from typing import List
2
3 from flask import request
4 from flask_restx import Namespace, Resource
5
6 from CTFd.api.v1.helpers.request import validate_args
7 from CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic
8 from CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse
9 from CTFd.constants import RawEnum
10 from CTFd.models import Hints, HintUnlocks, db
11 from CTFd.schemas.hints import HintSchema
12 from CTFd.utils.decorators import admins_only, authed_only, during_ctf_time_only
13 from CTFd.utils.helpers.models import build_model_filters
14 from CTFd.utils.user import get_current_user, is_admin
15
16 hints_namespace = Namespace("hints", description="Endpoint to retrieve Hints")
17
18 HintModel = sqlalchemy_to_pydantic(Hints)
19
20
21 class HintDetailedSuccessResponse(APIDetailedSuccessResponse):
22 data: HintModel
23
24
25 class HintListSuccessResponse(APIListSuccessResponse):
26 data: List[HintModel]
27
28
29 hints_namespace.schema_model(
30 "HintDetailedSuccessResponse", HintDetailedSuccessResponse.apidoc()
31 )
32
33 hints_namespace.schema_model(
34 "HintListSuccessResponse", HintListSuccessResponse.apidoc()
35 )
36
37
38 @hints_namespace.route("")
39 class HintList(Resource):
40 @admins_only
41 @hints_namespace.doc(
42 description="Endpoint to list Hint objects in bulk",
43 responses={
44 200: ("Success", "HintListSuccessResponse"),
45 400: (
46 "An error occured processing the provided or stored data",
47 "APISimpleErrorResponse",
48 ),
49 },
50 )
51 @validate_args(
52 {
53 "type": (str, None),
54 "challenge_id": (int, None),
55 "content": (str, None),
56 "cost": (int, None),
57 "q": (str, None),
58 "field": (
59 RawEnum("HintFields", {"type": "type", "content": "content"}),
60 None,
61 ),
62 },
63 location="query",
64 )
65 def get(self, query_args):
66 q = query_args.pop("q", None)
67 field = str(query_args.pop("field", None))
68 filters = build_model_filters(model=Hints, query=q, field=field)
69
70 hints = Hints.query.filter_by(**query_args).filter(*filters).all()
71 response = HintSchema(many=True, view="locked").dump(hints)
72
73 if response.errors:
74 return {"success": False, "errors": response.errors}, 400
75
76 return {"success": True, "data": response.data}
77
78 @admins_only
79 @hints_namespace.doc(
80 description="Endpoint to create a Hint object",
81 responses={
82 200: ("Success", "HintDetailedSuccessResponse"),
83 400: (
84 "An error occured processing the provided or stored data",
85 "APISimpleErrorResponse",
86 ),
87 },
88 )
89 def post(self):
90 req = request.get_json()
91 schema = HintSchema(view="admin")
92 response = schema.load(req, session=db.session)
93
94 if response.errors:
95 return {"success": False, "errors": response.errors}, 400
96
97 db.session.add(response.data)
98 db.session.commit()
99
100 response = schema.dump(response.data)
101
102 return {"success": True, "data": response.data}
103
104
105 @hints_namespace.route("/<hint_id>")
106 class Hint(Resource):
107 @during_ctf_time_only
108 @authed_only
109 @hints_namespace.doc(
110 description="Endpoint to get a specific Hint object",
111 responses={
112 200: ("Success", "HintDetailedSuccessResponse"),
113 400: (
114 "An error occured processing the provided or stored data",
115 "APISimpleErrorResponse",
116 ),
117 },
118 )
119 def get(self, hint_id):
120 user = get_current_user()
121 hint = Hints.query.filter_by(id=hint_id).first_or_404()
122
123 view = "unlocked"
124 if hint.cost:
125 view = "locked"
126 unlocked = HintUnlocks.query.filter_by(
127 account_id=user.account_id, target=hint.id
128 ).first()
129 if unlocked:
130 view = "unlocked"
131
132 if is_admin():
133 if request.args.get("preview", False):
134 view = "admin"
135
136 response = HintSchema(view=view).dump(hint)
137
138 if response.errors:
139 return {"success": False, "errors": response.errors}, 400
140
141 return {"success": True, "data": response.data}
142
143 @admins_only
144 @hints_namespace.doc(
145 description="Endpoint to edit a specific Hint object",
146 responses={
147 200: ("Success", "HintDetailedSuccessResponse"),
148 400: (
149 "An error occured processing the provided or stored data",
150 "APISimpleErrorResponse",
151 ),
152 },
153 )
154 def patch(self, hint_id):
155 hint = Hints.query.filter_by(id=hint_id).first_or_404()
156 req = request.get_json()
157
158 schema = HintSchema(view="admin")
159 response = schema.load(req, instance=hint, partial=True, session=db.session)
160
161 if response.errors:
162 return {"success": False, "errors": response.errors}, 400
163
164 db.session.add(response.data)
165 db.session.commit()
166
167 response = schema.dump(response.data)
168
169 return {"success": True, "data": response.data}
170
171 @admins_only
172 @hints_namespace.doc(
173 description="Endpoint to delete a specific Tag object",
174 responses={200: ("Success", "APISimpleSuccessResponse")},
175 )
176 def delete(self, hint_id):
177 hint = Hints.query.filter_by(id=hint_id).first_or_404()
178 db.session.delete(hint)
179 db.session.commit()
180 db.session.close()
181
182 return {"success": True}
183
[end of CTFd/api/v1/hints.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/api/v1/hints.py b/CTFd/api/v1/hints.py
--- a/CTFd/api/v1/hints.py
+++ b/CTFd/api/v1/hints.py
@@ -120,6 +120,33 @@
user = get_current_user()
hint = Hints.query.filter_by(id=hint_id).first_or_404()
+ if hint.requirements:
+ requirements = hint.requirements.get("prerequisites", [])
+
+ # Get the IDs of all hints that the user has unlocked
+ all_unlocks = HintUnlocks.query.filter_by(account_id=user.account_id).all()
+ unlock_ids = {unlock.id for unlock in all_unlocks}
+
+ # Filter out hint IDs that don't exist
+ all_hint_ids = {h.id for h in Hints.query.with_entities(Hints.id).all()}
+ prereqs = set(requirements).intersection(all_hint_ids)
+
+ # If the user has the necessary unlocks or is admin we should allow them to view
+ if unlock_ids >= prereqs or is_admin():
+ pass
+ else:
+ return (
+ {
+ "success": False,
+ "errors": {
+ "requirements": [
+ "You must unlock other hints before accessing this hint"
+ ]
+ },
+ },
+ 403,
+ )
+
view = "unlocked"
if hint.cost:
view = "locked"
| {"golden_diff": "diff --git a/CTFd/api/v1/hints.py b/CTFd/api/v1/hints.py\n--- a/CTFd/api/v1/hints.py\n+++ b/CTFd/api/v1/hints.py\n@@ -120,6 +120,33 @@\n user = get_current_user()\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n \n+ if hint.requirements:\n+ requirements = hint.requirements.get(\"prerequisites\", [])\n+\n+ # Get the IDs of all hints that the user has unlocked\n+ all_unlocks = HintUnlocks.query.filter_by(account_id=user.account_id).all()\n+ unlock_ids = {unlock.id for unlock in all_unlocks}\n+\n+ # Filter out hint IDs that don't exist\n+ all_hint_ids = {h.id for h in Hints.query.with_entities(Hints.id).all()}\n+ prereqs = set(requirements).intersection(all_hint_ids)\n+\n+ # If the user has the necessary unlocks or is admin we should allow them to view\n+ if unlock_ids >= prereqs or is_admin():\n+ pass\n+ else:\n+ return (\n+ {\n+ \"success\": False,\n+ \"errors\": {\n+ \"requirements\": [\n+ \"You must unlock other hints before accessing this hint\"\n+ ]\n+ },\n+ },\n+ 403,\n+ )\n+\n view = \"unlocked\"\n if hint.cost:\n view = \"locked\"\n", "issue": "Cascading Hints\nHints should have a sense of unlocking where one hint cannot be used until a previous one or others are used.\n", "before_files": [{"content": "from typing import List\n\nfrom flask import request\nfrom flask_restx import Namespace, Resource\n\nfrom CTFd.api.v1.helpers.request import validate_args\nfrom CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic\nfrom CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse\nfrom CTFd.constants import RawEnum\nfrom CTFd.models import Hints, HintUnlocks, db\nfrom CTFd.schemas.hints import HintSchema\nfrom CTFd.utils.decorators import admins_only, authed_only, during_ctf_time_only\nfrom CTFd.utils.helpers.models import build_model_filters\nfrom CTFd.utils.user import get_current_user, is_admin\n\nhints_namespace = Namespace(\"hints\", description=\"Endpoint to retrieve Hints\")\n\nHintModel = sqlalchemy_to_pydantic(Hints)\n\n\nclass HintDetailedSuccessResponse(APIDetailedSuccessResponse):\n data: HintModel\n\n\nclass HintListSuccessResponse(APIListSuccessResponse):\n data: List[HintModel]\n\n\nhints_namespace.schema_model(\n \"HintDetailedSuccessResponse\", HintDetailedSuccessResponse.apidoc()\n)\n\nhints_namespace.schema_model(\n \"HintListSuccessResponse\", HintListSuccessResponse.apidoc()\n)\n\n\n@hints_namespace.route(\"\")\nclass HintList(Resource):\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to list Hint objects in bulk\",\n responses={\n 200: (\"Success\", \"HintListSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n @validate_args(\n {\n \"type\": (str, None),\n \"challenge_id\": (int, None),\n \"content\": (str, None),\n \"cost\": (int, None),\n \"q\": (str, None),\n \"field\": (\n RawEnum(\"HintFields\", {\"type\": \"type\", \"content\": \"content\"}),\n None,\n ),\n },\n location=\"query\",\n )\n def get(self, query_args):\n q = query_args.pop(\"q\", None)\n field = str(query_args.pop(\"field\", None))\n filters = build_model_filters(model=Hints, query=q, field=field)\n\n hints = Hints.query.filter_by(**query_args).filter(*filters).all()\n response = HintSchema(many=True, view=\"locked\").dump(hints)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to create a Hint object\",\n responses={\n 200: (\"Success\", \"HintDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def post(self):\n req = request.get_json()\n schema = HintSchema(view=\"admin\")\n response = schema.load(req, session=db.session)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n db.session.add(response.data)\n db.session.commit()\n\n response = schema.dump(response.data)\n\n return {\"success\": True, \"data\": response.data}\n\n\n@hints_namespace.route(\"/<hint_id>\")\nclass Hint(Resource):\n @during_ctf_time_only\n @authed_only\n @hints_namespace.doc(\n description=\"Endpoint to get a specific Hint object\",\n responses={\n 200: (\"Success\", \"HintDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def get(self, hint_id):\n user = get_current_user()\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n\n view = \"unlocked\"\n if hint.cost:\n view = \"locked\"\n unlocked = HintUnlocks.query.filter_by(\n account_id=user.account_id, target=hint.id\n ).first()\n if unlocked:\n view = \"unlocked\"\n\n if is_admin():\n if request.args.get(\"preview\", False):\n view = \"admin\"\n\n response = HintSchema(view=view).dump(hint)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to edit a specific Hint object\",\n responses={\n 200: (\"Success\", \"HintDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def patch(self, hint_id):\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n req = request.get_json()\n\n schema = HintSchema(view=\"admin\")\n response = schema.load(req, instance=hint, partial=True, session=db.session)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n db.session.add(response.data)\n db.session.commit()\n\n response = schema.dump(response.data)\n\n return {\"success\": True, \"data\": response.data}\n\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to delete a specific Tag object\",\n responses={200: (\"Success\", \"APISimpleSuccessResponse\")},\n )\n def delete(self, hint_id):\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n db.session.delete(hint)\n db.session.commit()\n db.session.close()\n\n return {\"success\": True}\n", "path": "CTFd/api/v1/hints.py"}]} | 2,290 | 330 |
gh_patches_debug_50 | rasdani/github-patches | git_diff | scrapy__scrapy-4563 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extend hoverxref_roles
@humitos [suggested](https://github.com/scrapy/scrapy/issues/4475#issuecomment-613350667) extending the `hoverxref_roles` setting of the corresponding Sphinx extension so that the display-on-hover behavior of the documentations works for things like signal or setting references.
</issue>
<code>
[start of docs/conf.py]
1 # Scrapy documentation build configuration file, created by
2 # sphinx-quickstart on Mon Nov 24 12:02:52 2008.
3 #
4 # This file is execfile()d with the current directory set to its containing dir.
5 #
6 # The contents of this file are pickled, so don't put values in the namespace
7 # that aren't pickleable (module imports are okay, they're removed automatically).
8 #
9 # All configuration values have a default; values that are commented out
10 # serve to show the default.
11
12 import sys
13 from datetime import datetime
14 from os import path
15
16 # If your extensions are in another directory, add it here. If the directory
17 # is relative to the documentation root, use os.path.abspath to make it
18 # absolute, like shown here.
19 sys.path.append(path.join(path.dirname(__file__), "_ext"))
20 sys.path.insert(0, path.dirname(path.dirname(__file__)))
21
22
23 # General configuration
24 # ---------------------
25
26 # Add any Sphinx extension module names here, as strings. They can be extensions
27 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
28 extensions = [
29 'hoverxref.extension',
30 'notfound.extension',
31 'scrapydocs',
32 'sphinx.ext.autodoc',
33 'sphinx.ext.coverage',
34 'sphinx.ext.intersphinx',
35 'sphinx.ext.viewcode',
36 ]
37
38 # Add any paths that contain templates here, relative to this directory.
39 templates_path = ['_templates']
40
41 # The suffix of source filenames.
42 source_suffix = '.rst'
43
44 # The encoding of source files.
45 #source_encoding = 'utf-8'
46
47 # The master toctree document.
48 master_doc = 'index'
49
50 # General information about the project.
51 project = 'Scrapy'
52 copyright = '2008–{}, Scrapy developers'.format(datetime.now().year)
53
54 # The version info for the project you're documenting, acts as replacement for
55 # |version| and |release|, also used in various other places throughout the
56 # built documents.
57 #
58 # The short X.Y version.
59 try:
60 import scrapy
61 version = '.'.join(map(str, scrapy.version_info[:2]))
62 release = scrapy.__version__
63 except ImportError:
64 version = ''
65 release = ''
66
67 # The language for content autogenerated by Sphinx. Refer to documentation
68 # for a list of supported languages.
69 language = 'en'
70
71 # There are two options for replacing |today|: either, you set today to some
72 # non-false value, then it is used:
73 #today = ''
74 # Else, today_fmt is used as the format for a strftime call.
75 #today_fmt = '%B %d, %Y'
76
77 # List of documents that shouldn't be included in the build.
78 #unused_docs = []
79
80 exclude_patterns = ['build']
81
82 # List of directories, relative to source directory, that shouldn't be searched
83 # for source files.
84 exclude_trees = ['.build']
85
86 # The reST default role (used for this markup: `text`) to use for all documents.
87 #default_role = None
88
89 # If true, '()' will be appended to :func: etc. cross-reference text.
90 #add_function_parentheses = True
91
92 # If true, the current module name will be prepended to all description
93 # unit titles (such as .. function::).
94 #add_module_names = True
95
96 # If true, sectionauthor and moduleauthor directives will be shown in the
97 # output. They are ignored by default.
98 #show_authors = False
99
100 # The name of the Pygments (syntax highlighting) style to use.
101 pygments_style = 'sphinx'
102
103 # List of Sphinx warnings that will not be raised
104 suppress_warnings = ['epub.unknown_project_files']
105
106
107 # Options for HTML output
108 # -----------------------
109
110 # The theme to use for HTML and HTML Help pages. See the documentation for
111 # a list of builtin themes.
112 html_theme = 'sphinx_rtd_theme'
113
114 # Theme options are theme-specific and customize the look and feel of a theme
115 # further. For a list of options available for each theme, see the
116 # documentation.
117 #html_theme_options = {}
118
119 # Add any paths that contain custom themes here, relative to this directory.
120 # Add path to the RTD explicitly to robustify builds (otherwise might
121 # fail in a clean Debian build env)
122 import sphinx_rtd_theme
123 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
124
125
126 # The style sheet to use for HTML and HTML Help pages. A file of that name
127 # must exist either in Sphinx' static/ path, or in one of the custom paths
128 # given in html_static_path.
129 # html_style = 'scrapydoc.css'
130
131 # The name for this set of Sphinx documents. If None, it defaults to
132 # "<project> v<release> documentation".
133 #html_title = None
134
135 # A shorter title for the navigation bar. Default is the same as html_title.
136 #html_short_title = None
137
138 # The name of an image file (relative to this directory) to place at the top
139 # of the sidebar.
140 #html_logo = None
141
142 # The name of an image file (within the static path) to use as favicon of the
143 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
144 # pixels large.
145 #html_favicon = None
146
147 # Add any paths that contain custom static files (such as style sheets) here,
148 # relative to this directory. They are copied after the builtin static files,
149 # so a file named "default.css" will overwrite the builtin "default.css".
150 html_static_path = ['_static']
151
152 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
153 # using the given strftime format.
154 html_last_updated_fmt = '%b %d, %Y'
155
156 # Custom sidebar templates, maps document names to template names.
157 #html_sidebars = {}
158
159 # Additional templates that should be rendered to pages, maps page names to
160 # template names.
161 #html_additional_pages = {}
162
163 # If false, no module index is generated.
164 #html_use_modindex = True
165
166 # If false, no index is generated.
167 #html_use_index = True
168
169 # If true, the index is split into individual pages for each letter.
170 #html_split_index = False
171
172 # If true, the reST sources are included in the HTML build as _sources/<name>.
173 html_copy_source = True
174
175 # If true, an OpenSearch description file will be output, and all pages will
176 # contain a <link> tag referring to it. The value of this option must be the
177 # base URL from which the finished HTML is served.
178 #html_use_opensearch = ''
179
180 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
181 #html_file_suffix = ''
182
183 # Output file base name for HTML help builder.
184 htmlhelp_basename = 'Scrapydoc'
185
186
187 # Options for LaTeX output
188 # ------------------------
189
190 # The paper size ('letter' or 'a4').
191 #latex_paper_size = 'letter'
192
193 # The font size ('10pt', '11pt' or '12pt').
194 #latex_font_size = '10pt'
195
196 # Grouping the document tree into LaTeX files. List of tuples
197 # (source start file, target name, title, author, document class [howto/manual]).
198 latex_documents = [
199 ('index', 'Scrapy.tex', 'Scrapy Documentation',
200 'Scrapy developers', 'manual'),
201 ]
202
203 # The name of an image file (relative to this directory) to place at the top of
204 # the title page.
205 #latex_logo = None
206
207 # For "manual" documents, if this is true, then toplevel headings are parts,
208 # not chapters.
209 #latex_use_parts = False
210
211 # Additional stuff for the LaTeX preamble.
212 #latex_preamble = ''
213
214 # Documents to append as an appendix to all manuals.
215 #latex_appendices = []
216
217 # If false, no module index is generated.
218 #latex_use_modindex = True
219
220
221 # Options for the linkcheck builder
222 # ---------------------------------
223
224 # A list of regular expressions that match URIs that should not be checked when
225 # doing a linkcheck build.
226 linkcheck_ignore = [
227 'http://localhost:\d+', 'http://hg.scrapy.org',
228 'http://directory.google.com/'
229 ]
230
231
232 # Options for the Coverage extension
233 # ----------------------------------
234 coverage_ignore_pyobjects = [
235 # Contract’s add_pre_hook and add_post_hook are not documented because
236 # they should be transparent to contract developers, for whom pre_hook and
237 # post_hook should be the actual concern.
238 r'\bContract\.add_(pre|post)_hook$',
239
240 # ContractsManager is an internal class, developers are not expected to
241 # interact with it directly in any way.
242 r'\bContractsManager\b$',
243
244 # For default contracts we only want to document their general purpose in
245 # their __init__ method, the methods they reimplement to achieve that purpose
246 # should be irrelevant to developers using those contracts.
247 r'\w+Contract\.(adjust_request_args|(pre|post)_process)$',
248
249 # Methods of downloader middlewares are not documented, only the classes
250 # themselves, since downloader middlewares are controlled through Scrapy
251 # settings.
252 r'^scrapy\.downloadermiddlewares\.\w*?\.(\w*?Middleware|DownloaderStats)\.',
253
254 # Base classes of downloader middlewares are implementation details that
255 # are not meant for users.
256 r'^scrapy\.downloadermiddlewares\.\w*?\.Base\w*?Middleware',
257
258 # Private exception used by the command-line interface implementation.
259 r'^scrapy\.exceptions\.UsageError',
260
261 # Methods of BaseItemExporter subclasses are only documented in
262 # BaseItemExporter.
263 r'^scrapy\.exporters\.(?!BaseItemExporter\b)\w*?\.',
264
265 # Extension behavior is only modified through settings. Methods of
266 # extension classes, as well as helper functions, are implementation
267 # details that are not documented.
268 r'^scrapy\.extensions\.[a-z]\w*?\.[A-Z]\w*?\.', # methods
269 r'^scrapy\.extensions\.[a-z]\w*?\.[a-z]', # helper functions
270
271 # Never documented before, and deprecated now.
272 r'^scrapy\.item\.DictItem$',
273 r'^scrapy\.linkextractors\.FilteringLinkExtractor$',
274
275 # Implementation detail of LxmlLinkExtractor
276 r'^scrapy\.linkextractors\.lxmlhtml\.LxmlParserLinkExtractor',
277 ]
278
279
280 # Options for the InterSphinx extension
281 # -------------------------------------
282
283 intersphinx_mapping = {
284 'coverage': ('https://coverage.readthedocs.io/en/stable', None),
285 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),
286 'pytest': ('https://docs.pytest.org/en/latest', None),
287 'python': ('https://docs.python.org/3', None),
288 'sphinx': ('https://www.sphinx-doc.org/en/master', None),
289 'tox': ('https://tox.readthedocs.io/en/latest', None),
290 'twisted': ('https://twistedmatrix.com/documents/current', None),
291 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),
292 }
293
294
295 # Options for sphinx-hoverxref options
296 # ------------------------------------
297
298 hoverxref_auto_ref = True
299 hoverxref_role_types = {
300 "class": "tooltip",
301 "confval": "tooltip",
302 "hoverxref": "tooltip",
303 "mod": "tooltip",
304 "ref": "tooltip",
305 }
306
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -300,3 +300,4 @@
"mod": "tooltip",
"ref": "tooltip",
}
+hoverxref_roles = ['command', 'reqmeta', 'setting', 'signal']
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -300,3 +300,4 @@\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n }\n+hoverxref_roles = ['command', 'reqmeta', 'setting', 'signal']\n", "issue": "Extend hoverxref_roles\n@humitos [suggested](https://github.com/scrapy/scrapy/issues/4475#issuecomment-613350667) extending the `hoverxref_roles` setting of the corresponding Sphinx extension so that the display-on-hover behavior of the documentations works for things like signal or setting references.\n", "before_files": [{"content": "# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008\u2013{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# List of Sphinx warnings that will not be raised\nsuppress_warnings = ['epub.unknown_project_files']\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n", "path": "docs/conf.py"}]} | 3,955 | 72 |
gh_patches_debug_41799 | rasdani/github-patches | git_diff | mindee__doctr-369 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[demo] Improve UI for OCR result display
For very dense documents, since the predicted text value is plotted statically, there can be some readability issues. We should try to improve this
</issue>
<code>
[start of demo/app.py]
1 # Copyright (C) 2021, Mindee.
2
3 # This program is licensed under the Apache License version 2.
4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
5
6 import os
7 import streamlit as st
8 import matplotlib.pyplot as plt
9
10 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
11
12 import tensorflow as tf
13 import cv2
14
15 gpu_devices = tf.config.experimental.list_physical_devices('GPU')
16 if any(gpu_devices):
17 tf.config.experimental.set_memory_growth(gpu_devices[0], True)
18
19 from doctr.documents import DocumentFile
20 from doctr.models import ocr_predictor
21 from doctr.utils.visualization import synthetize_page, visualize_page
22
23 DET_ARCHS = ["db_resnet50"]
24 RECO_ARCHS = ["crnn_vgg16_bn", "crnn_resnet31", "sar_vgg16_bn", "sar_resnet31"]
25
26
27 def main():
28
29 # Wide mode
30 st.set_page_config(layout="wide")
31
32 # Designing the interface
33 st.title("DocTR: Document Text Recognition")
34 # For newline
35 st.write('\n')
36 # Set the columns
37 cols = st.beta_columns((1, 1))
38 cols[0].subheader("Input document (first page)")
39 cols[1].subheader("Raw heatmap (segmentation task)")
40
41 # Sidebar
42 # File selection
43 st.sidebar.title("Document selection")
44 # Disabling warning
45 st.set_option('deprecation.showfileUploaderEncoding', False)
46 # Choose your own image
47 uploaded_file = st.sidebar.file_uploader("Upload files", type=['pdf', 'png', 'jpeg', 'jpg'])
48 if uploaded_file is not None:
49 if uploaded_file.name.endswith('.pdf'):
50 doc = DocumentFile.from_pdf(uploaded_file.read()).as_images(output_size=(1024, 1024))
51 else:
52 doc = DocumentFile.from_images(uploaded_file.read())
53 cols[0].image(doc[0], width=640)
54
55 # Model selection
56 st.sidebar.title("Model selection")
57 det_arch = st.sidebar.selectbox("Text detection model", DET_ARCHS)
58 reco_arch = st.sidebar.selectbox("Text recognition model", RECO_ARCHS)
59
60 # For newline
61 st.sidebar.write('\n')
62
63 if st.sidebar.button("Analyze document"):
64
65 if uploaded_file is None:
66 st.sidebar.write("Please upload a document")
67
68 else:
69 with st.spinner('Loading model...'):
70 predictor = ocr_predictor(det_arch, reco_arch, pretrained=True)
71
72 with st.spinner('Analyzing...'):
73
74 # Forward the image to the model
75 processed_batches = predictor.det_predictor.pre_processor(doc)
76 out = predictor.det_predictor.model(processed_batches[0], return_model_output=True, training=False)
77 seg_map = out["out_map"]
78 seg_map = tf.squeeze(seg_map[0, ...], axis=[2])
79 seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),
80 interpolation=cv2.INTER_LINEAR)
81 # Plot the raw heatmap
82 fig, ax = plt.subplots()
83 ax.imshow(seg_map)
84 ax.axis('off')
85 cols[1].pyplot(fig)
86
87 # Plot OCR output
88 out = predictor(doc, training=False)
89 cols[1].subheader("OCR output")
90 fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)
91 cols[1].pyplot(fig)
92
93 # Page reconsitution under input page
94 cols[0].subheader("Page reconstitution from OCR output")
95 img = synthetize_page(out.pages[0].export())
96 cols[0].image(img, clamp=True, width=640)
97
98
99 if __name__ == '__main__':
100 main()
101
[end of demo/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/demo/app.py b/demo/app.py
--- a/demo/app.py
+++ b/demo/app.py
@@ -33,10 +33,14 @@
st.title("DocTR: Document Text Recognition")
# For newline
st.write('\n')
+ # Instructions
+ st.markdown("*Hint: click on the top-right corner of an image to enlarge it!*")
# Set the columns
- cols = st.beta_columns((1, 1))
- cols[0].subheader("Input document (first page)")
- cols[1].subheader("Raw heatmap (segmentation task)")
+ cols = st.beta_columns((1, 1, 1, 1))
+ cols[0].subheader("Input page")
+ cols[1].subheader("Segmentation heatmap")
+ cols[2].subheader("OCR output")
+ cols[3].subheader("Page reconstitution")
# Sidebar
# File selection
@@ -50,7 +54,8 @@
doc = DocumentFile.from_pdf(uploaded_file.read()).as_images(output_size=(1024, 1024))
else:
doc = DocumentFile.from_images(uploaded_file.read())
- cols[0].image(doc[0], width=640)
+ page_idx = st.sidebar.selectbox("Page selection", [idx + 1 for idx in range(len(doc))]) - 1
+ cols[0].image(doc[page_idx])
# Model selection
st.sidebar.title("Model selection")
@@ -60,7 +65,7 @@
# For newline
st.sidebar.write('\n')
- if st.sidebar.button("Analyze document"):
+ if st.sidebar.button("Analyze page"):
if uploaded_file is None:
st.sidebar.write("Please upload a document")
@@ -72,11 +77,11 @@
with st.spinner('Analyzing...'):
# Forward the image to the model
- processed_batches = predictor.det_predictor.pre_processor(doc)
+ processed_batches = predictor.det_predictor.pre_processor([doc[page_idx]])
out = predictor.det_predictor.model(processed_batches[0], return_model_output=True, training=False)
seg_map = out["out_map"]
seg_map = tf.squeeze(seg_map[0, ...], axis=[2])
- seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),
+ seg_map = cv2.resize(seg_map.numpy(), (doc[page_idx].shape[1], doc[page_idx].shape[0]),
interpolation=cv2.INTER_LINEAR)
# Plot the raw heatmap
fig, ax = plt.subplots()
@@ -85,15 +90,18 @@
cols[1].pyplot(fig)
# Plot OCR output
- out = predictor(doc, training=False)
- cols[1].subheader("OCR output")
- fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)
- cols[1].pyplot(fig)
+ out = predictor([doc[page_idx]], training=False)
+ fig = visualize_page(out.pages[0].export(), doc[page_idx], interactive=False)
+ cols[2].pyplot(fig)
# Page reconsitution under input page
- cols[0].subheader("Page reconstitution from OCR output")
- img = synthetize_page(out.pages[0].export())
- cols[0].image(img, clamp=True, width=640)
+ page_export = out.pages[0].export()
+ img = synthetize_page(page_export)
+ cols[3].image(img, clamp=True)
+
+ # Display JSON
+ st.markdown("\nHere are your analysis results in JSON format:")
+ st.json(page_export)
if __name__ == '__main__':
| {"golden_diff": "diff --git a/demo/app.py b/demo/app.py\n--- a/demo/app.py\n+++ b/demo/app.py\n@@ -33,10 +33,14 @@\n st.title(\"DocTR: Document Text Recognition\")\n # For newline\n st.write('\\n')\n+ # Instructions\n+ st.markdown(\"*Hint: click on the top-right corner of an image to enlarge it!*\")\n # Set the columns\n- cols = st.beta_columns((1, 1))\n- cols[0].subheader(\"Input document (first page)\")\n- cols[1].subheader(\"Raw heatmap (segmentation task)\")\n+ cols = st.beta_columns((1, 1, 1, 1))\n+ cols[0].subheader(\"Input page\")\n+ cols[1].subheader(\"Segmentation heatmap\")\n+ cols[2].subheader(\"OCR output\")\n+ cols[3].subheader(\"Page reconstitution\")\n \n # Sidebar\n # File selection\n@@ -50,7 +54,8 @@\n doc = DocumentFile.from_pdf(uploaded_file.read()).as_images(output_size=(1024, 1024))\n else:\n doc = DocumentFile.from_images(uploaded_file.read())\n- cols[0].image(doc[0], width=640)\n+ page_idx = st.sidebar.selectbox(\"Page selection\", [idx + 1 for idx in range(len(doc))]) - 1\n+ cols[0].image(doc[page_idx])\n \n # Model selection\n st.sidebar.title(\"Model selection\")\n@@ -60,7 +65,7 @@\n # For newline\n st.sidebar.write('\\n')\n \n- if st.sidebar.button(\"Analyze document\"):\n+ if st.sidebar.button(\"Analyze page\"):\n \n if uploaded_file is None:\n st.sidebar.write(\"Please upload a document\")\n@@ -72,11 +77,11 @@\n with st.spinner('Analyzing...'):\n \n # Forward the image to the model\n- processed_batches = predictor.det_predictor.pre_processor(doc)\n+ processed_batches = predictor.det_predictor.pre_processor([doc[page_idx]])\n out = predictor.det_predictor.model(processed_batches[0], return_model_output=True, training=False)\n seg_map = out[\"out_map\"]\n seg_map = tf.squeeze(seg_map[0, ...], axis=[2])\n- seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),\n+ seg_map = cv2.resize(seg_map.numpy(), (doc[page_idx].shape[1], doc[page_idx].shape[0]),\n interpolation=cv2.INTER_LINEAR)\n # Plot the raw heatmap\n fig, ax = plt.subplots()\n@@ -85,15 +90,18 @@\n cols[1].pyplot(fig)\n \n # Plot OCR output\n- out = predictor(doc, training=False)\n- cols[1].subheader(\"OCR output\")\n- fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)\n- cols[1].pyplot(fig)\n+ out = predictor([doc[page_idx]], training=False)\n+ fig = visualize_page(out.pages[0].export(), doc[page_idx], interactive=False)\n+ cols[2].pyplot(fig)\n \n # Page reconsitution under input page\n- cols[0].subheader(\"Page reconstitution from OCR output\")\n- img = synthetize_page(out.pages[0].export())\n- cols[0].image(img, clamp=True, width=640)\n+ page_export = out.pages[0].export()\n+ img = synthetize_page(page_export)\n+ cols[3].image(img, clamp=True)\n+\n+ # Display JSON\n+ st.markdown(\"\\nHere are your analysis results in JSON format:\")\n+ st.json(page_export)\n \n \n if __name__ == '__main__':\n", "issue": "[demo] Improve UI for OCR result display\nFor very dense documents, since the predicted text value is plotted statically, there can be some readability issues. We should try to improve this\n", "before_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport os\nimport streamlit as st\nimport matplotlib.pyplot as plt\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nimport tensorflow as tf\nimport cv2\n\ngpu_devices = tf.config.experimental.list_physical_devices('GPU')\nif any(gpu_devices):\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n\nfrom doctr.documents import DocumentFile\nfrom doctr.models import ocr_predictor\nfrom doctr.utils.visualization import synthetize_page, visualize_page\n\nDET_ARCHS = [\"db_resnet50\"]\nRECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_resnet31\", \"sar_vgg16_bn\", \"sar_resnet31\"]\n\n\ndef main():\n\n # Wide mode\n st.set_page_config(layout=\"wide\")\n\n # Designing the interface\n st.title(\"DocTR: Document Text Recognition\")\n # For newline\n st.write('\\n')\n # Set the columns\n cols = st.beta_columns((1, 1))\n cols[0].subheader(\"Input document (first page)\")\n cols[1].subheader(\"Raw heatmap (segmentation task)\")\n\n # Sidebar\n # File selection\n st.sidebar.title(\"Document selection\")\n # Disabling warning\n st.set_option('deprecation.showfileUploaderEncoding', False)\n # Choose your own image\n uploaded_file = st.sidebar.file_uploader(\"Upload files\", type=['pdf', 'png', 'jpeg', 'jpg'])\n if uploaded_file is not None:\n if uploaded_file.name.endswith('.pdf'):\n doc = DocumentFile.from_pdf(uploaded_file.read()).as_images(output_size=(1024, 1024))\n else:\n doc = DocumentFile.from_images(uploaded_file.read())\n cols[0].image(doc[0], width=640)\n\n # Model selection\n st.sidebar.title(\"Model selection\")\n det_arch = st.sidebar.selectbox(\"Text detection model\", DET_ARCHS)\n reco_arch = st.sidebar.selectbox(\"Text recognition model\", RECO_ARCHS)\n\n # For newline\n st.sidebar.write('\\n')\n\n if st.sidebar.button(\"Analyze document\"):\n\n if uploaded_file is None:\n st.sidebar.write(\"Please upload a document\")\n\n else:\n with st.spinner('Loading model...'):\n predictor = ocr_predictor(det_arch, reco_arch, pretrained=True)\n\n with st.spinner('Analyzing...'):\n\n # Forward the image to the model\n processed_batches = predictor.det_predictor.pre_processor(doc)\n out = predictor.det_predictor.model(processed_batches[0], return_model_output=True, training=False)\n seg_map = out[\"out_map\"]\n seg_map = tf.squeeze(seg_map[0, ...], axis=[2])\n seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),\n interpolation=cv2.INTER_LINEAR)\n # Plot the raw heatmap\n fig, ax = plt.subplots()\n ax.imshow(seg_map)\n ax.axis('off')\n cols[1].pyplot(fig)\n\n # Plot OCR output\n out = predictor(doc, training=False)\n cols[1].subheader(\"OCR output\")\n fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)\n cols[1].pyplot(fig)\n\n # Page reconsitution under input page\n cols[0].subheader(\"Page reconstitution from OCR output\")\n img = synthetize_page(out.pages[0].export())\n cols[0].image(img, clamp=True, width=640)\n\n\nif __name__ == '__main__':\n main()\n", "path": "demo/app.py"}]} | 1,610 | 863 |
gh_patches_debug_27227 | rasdani/github-patches | git_diff | searx__searx-2066 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mymemory_translated engine: unexpected crash 'str' object has no attribute 'decode'
mymemory engine does not work.
You can see it in the search engine statistics: https://searx.space/#.
Either: "unexpected crash 'str' object has no attribute 'decode'"
Or: "no result"
My instance is https://searx.hlfh.space (I use antibot-proxy) and I have the first issue.
I am using mymemory with the API key I got from the service.
</issue>
<code>
[start of searx/engines/translated.py]
1 """
2 MyMemory Translated
3
4 @website https://mymemory.translated.net/
5 @provide-api yes (https://mymemory.translated.net/doc/spec.php)
6 @using-api yes
7 @results JSON
8 @stable yes
9 @parse url, title, content
10 """
11 import re
12 from sys import version_info
13 from searx.utils import is_valid_lang
14
15 if version_info[0] == 3:
16 unicode = str
17
18 categories = ['general']
19 url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
20 web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
21 weight = 100
22
23 parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
24 api_key = ''
25
26
27 def request(query, params):
28 m = parser_re.match(unicode(query, 'utf8'))
29 if not m:
30 return params
31
32 from_lang, to_lang, query = m.groups()
33
34 from_lang = is_valid_lang(from_lang)
35 to_lang = is_valid_lang(to_lang)
36
37 if not from_lang or not to_lang:
38 return params
39
40 if api_key:
41 key_form = '&key=' + api_key
42 else:
43 key_form = ''
44 params['url'] = url.format(from_lang=from_lang[1],
45 to_lang=to_lang[1],
46 query=query,
47 key=key_form)
48 params['query'] = query
49 params['from_lang'] = from_lang
50 params['to_lang'] = to_lang
51
52 return params
53
54
55 def response(resp):
56 results = []
57 results.append({
58 'url': web_url.format(
59 from_lang=resp.search_params['from_lang'][2],
60 to_lang=resp.search_params['to_lang'][2],
61 query=resp.search_params['query']),
62 'title': '[{0}-{1}] {2}'.format(
63 resp.search_params['from_lang'][1],
64 resp.search_params['to_lang'][1],
65 resp.search_params['query']),
66 'content': resp.json()['responseData']['translatedText']
67 })
68 return results
69
[end of searx/engines/translated.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/translated.py b/searx/engines/translated.py
--- a/searx/engines/translated.py
+++ b/searx/engines/translated.py
@@ -9,23 +9,19 @@
@parse url, title, content
"""
import re
-from sys import version_info
from searx.utils import is_valid_lang
-if version_info[0] == 3:
- unicode = str
-
categories = ['general']
-url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
-web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
+url = u'https://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
+web_url = u'https://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
weight = 100
-parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
+parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
api_key = ''
def request(query, params):
- m = parser_re.match(unicode(query, 'utf8'))
+ m = parser_re.match(query)
if not m:
return params
@@ -43,9 +39,9 @@
key_form = ''
params['url'] = url.format(from_lang=from_lang[1],
to_lang=to_lang[1],
- query=query,
+ query=query.decode('utf-8'),
key=key_form)
- params['query'] = query
+ params['query'] = query.decode('utf-8')
params['from_lang'] = from_lang
params['to_lang'] = to_lang
| {"golden_diff": "diff --git a/searx/engines/translated.py b/searx/engines/translated.py\n--- a/searx/engines/translated.py\n+++ b/searx/engines/translated.py\n@@ -9,23 +9,19 @@\n @parse url, title, content\n \"\"\"\n import re\n-from sys import version_info\n from searx.utils import is_valid_lang\n \n-if version_info[0] == 3:\n- unicode = str\n-\n categories = ['general']\n-url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'\n-web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'\n+url = u'https://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'\n+web_url = u'https://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'\n weight = 100\n \n-parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)\n+parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)\n api_key = ''\n \n \n def request(query, params):\n- m = parser_re.match(unicode(query, 'utf8'))\n+ m = parser_re.match(query)\n if not m:\n return params\n \n@@ -43,9 +39,9 @@\n key_form = ''\n params['url'] = url.format(from_lang=from_lang[1],\n to_lang=to_lang[1],\n- query=query,\n+ query=query.decode('utf-8'),\n key=key_form)\n- params['query'] = query\n+ params['query'] = query.decode('utf-8')\n params['from_lang'] = from_lang\n params['to_lang'] = to_lang\n", "issue": "mymemory_translated engine: unexpected crash 'str' object has no attribute 'decode' \nmymemory engine does not work.\r\nYou can see it in the search engine statistics: https://searx.space/#.\r\n\r\nEither: \"unexpected crash 'str' object has no attribute 'decode'\"\r\nOr: \"no result\"\r\n\r\nMy instance is https://searx.hlfh.space (I use antibot-proxy) and I have the first issue.\r\nI am using mymemory with the API key I got from the service.\n", "before_files": [{"content": "\"\"\"\n MyMemory Translated\n\n @website https://mymemory.translated.net/\n @provide-api yes (https://mymemory.translated.net/doc/spec.php)\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title, content\n\"\"\"\nimport re\nfrom sys import version_info\nfrom searx.utils import is_valid_lang\n\nif version_info[0] == 3:\n unicode = str\n\ncategories = ['general']\nurl = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'\nweb_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'\nweight = 100\n\nparser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)\napi_key = ''\n\n\ndef request(query, params):\n m = parser_re.match(unicode(query, 'utf8'))\n if not m:\n return params\n\n from_lang, to_lang, query = m.groups()\n\n from_lang = is_valid_lang(from_lang)\n to_lang = is_valid_lang(to_lang)\n\n if not from_lang or not to_lang:\n return params\n\n if api_key:\n key_form = '&key=' + api_key\n else:\n key_form = ''\n params['url'] = url.format(from_lang=from_lang[1],\n to_lang=to_lang[1],\n query=query,\n key=key_form)\n params['query'] = query\n params['from_lang'] = from_lang\n params['to_lang'] = to_lang\n\n return params\n\n\ndef response(resp):\n results = []\n results.append({\n 'url': web_url.format(\n from_lang=resp.search_params['from_lang'][2],\n to_lang=resp.search_params['to_lang'][2],\n query=resp.search_params['query']),\n 'title': '[{0}-{1}] {2}'.format(\n resp.search_params['from_lang'][1],\n resp.search_params['to_lang'][1],\n resp.search_params['query']),\n 'content': resp.json()['responseData']['translatedText']\n })\n return results\n", "path": "searx/engines/translated.py"}]} | 1,271 | 429 |
gh_patches_debug_3751 | rasdani/github-patches | git_diff | svthalia__concrexit-1361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Profile page crashes when not logged in
Sentry Issue: [CONCREXIT-40](https://sentry.io/organizations/thalia/issues/1976140555/?referrer=github_integration)
```
AttributeError: 'NoneType' object has no attribute 'pk'
File "django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "django/core/handlers/base.py", line 179, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "django/views/generic/base.py", line 64, in view
self.setup(request, *args, **kwargs)
File "members/views.py", line 173, in setup
kwargs["pk"] = request.member.pk
File "django/utils/functional.py", line 241, in inner
return func(self._wrapped, *args)
```
Steps to reproduce:
1. Open https://thalia.nu/members/profile/ in Incognito mode
</issue>
<code>
[start of website/members/views.py]
1 """Views provided by the members package"""
2 import json
3 from datetime import date, datetime
4
5 from django.contrib.auth.decorators import login_required
6 from django.contrib.messages.views import SuccessMessageMixin
7 from django.db.models import Q, QuerySet
8 from django.http import Http404, HttpResponse
9 from django.shortcuts import get_object_or_404
10 from django.template.response import TemplateResponse
11 from django.urls import reverse_lazy
12 from django.utils.decorators import method_decorator
13 from django.utils.translation import gettext_lazy as _
14 from django.views.generic import ListView, DetailView, UpdateView, CreateView
15 from django.views.generic.base import TemplateResponseMixin, View, TemplateView
16 from rest_framework.authtoken.models import Token
17 from rest_framework.authtoken.views import ObtainAuthToken
18 from rest_framework.response import Response
19
20 import pizzas.services
21 from members import services, emails
22 from members.decorators import membership_required
23 from members.models import EmailChange, Membership, Member, Profile
24 from utils.snippets import datetime_to_lectureyear
25 import events.services as event_services
26 import activemembers.services as activemembers_services
27
28 from . import models
29 from .forms import ProfileForm
30 from .services import member_achievements
31 from .services import member_societies
32
33
34 class ObtainThaliaAuthToken(ObtainAuthToken):
35 """
36 Custom override of the AuthToken view to force lowercase the username
37 """
38
39 def post(self, request, *args, **kwargs) -> HttpResponse:
40 serializer = self.serializer_class(
41 data={
42 "username": request.data.get("username").lower()
43 if "username" in request.data
44 else None,
45 "password": request.data.get("password"),
46 },
47 context={"request": request},
48 )
49
50 if not serializer.is_valid():
51 return Response({"error": "Unauthorized"}, status=401)
52
53 user = serializer.validated_data["user"]
54 token, _ = Token.objects.get_or_create(user=user)
55 return Response({"token": token.key})
56
57
58 @method_decorator(login_required, "dispatch")
59 @method_decorator(membership_required, "dispatch")
60 class MembersIndex(ListView):
61 """
62 View that renders the members overview
63 """
64
65 model = Member
66 paginate_by = 28
67 template_name = "members/index.html"
68 context_object_name = "members"
69 keywords = None
70 query_filter = ""
71 year_range = []
72
73 def setup(self, request, *args, **kwargs) -> None:
74 super().setup(request, *args, **kwargs)
75 current_lectureyear = datetime_to_lectureyear(date.today())
76 self.year_range = list(
77 reversed(range(current_lectureyear - 5, current_lectureyear + 1))
78 )
79 self.keywords = request.GET.get("keywords", "").split() or None
80 self.query_filter = kwargs.get("filter", None)
81
82 def get_queryset(self) -> QuerySet:
83 memberships_query = Q(until__gt=datetime.now()) | Q(until=None)
84 members_query = ~Q(id=None)
85
86 if self.query_filter and self.query_filter.isdigit():
87 members_query &= Q(profile__starting_year=int(self.query_filter))
88 memberships_query &= Q(type=Membership.MEMBER)
89 elif self.query_filter == "older":
90 members_query &= Q(profile__starting_year__lt=self.year_range[-1])
91 memberships_query &= Q(type=Membership.MEMBER)
92 elif self.query_filter == "former":
93 # Filter out all current active memberships
94 memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
95 memberships = Membership.objects.filter(memberships_query)
96 members_query &= ~Q(pk__in=memberships.values("user__pk"))
97 # Members_query contains users that are not currently (honorary)member
98 elif self.query_filter == "benefactors":
99 memberships_query &= Q(type=Membership.BENEFACTOR)
100 elif self.query_filter == "honorary":
101 memberships_query = Q(until__gt=datetime.now().date()) | Q(until=None)
102 memberships_query &= Q(type=Membership.HONORARY)
103
104 if self.keywords:
105 for key in self.keywords:
106 # Works because relevant options all have `nick` in their key
107 members_query &= (
108 (
109 Q(profile__nickname__icontains=key)
110 & Q(profile__display_name_preference__contains="nick")
111 )
112 | Q(first_name__icontains=key)
113 | Q(last_name__icontains=key)
114 | Q(username__icontains=key)
115 )
116
117 if self.query_filter == "former":
118 memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
119 memberships = Membership.objects.filter(memberships_query)
120 all_memberships = Membership.objects.all()
121 # Only keep members that were once members, or are legacy users
122 # that do not have any memberships at all
123 members_query &= Q(pk__in=memberships.values("user__pk")) | ~Q(
124 pk__in=all_memberships.values("user__pk")
125 )
126 else:
127 memberships = Membership.objects.filter(memberships_query)
128 members_query &= Q(pk__in=memberships.values("user__pk"))
129 return Member.objects.filter(members_query).order_by("first_name")
130
131 def get_context_data(self, **kwargs) -> dict:
132 context = super().get_context_data(**kwargs)
133
134 page = context["page_obj"].number
135 paginator = context["paginator"]
136
137 page_range = range(1, paginator.num_pages + 1)
138 if paginator.num_pages > 7:
139 if page > 3:
140 page_range_end = paginator.num_pages
141 if page + 3 <= paginator.num_pages:
142 page_range_end = page + 3
143
144 page_range = range(page - 2, page_range_end)
145 while page_range.stop - page_range.start < 5:
146 page_range = range(page_range.start - 1, page_range.stop)
147 else:
148 page_range = range(1, 6)
149
150 context.update(
151 {
152 "filter": self.query_filter,
153 "page_range": page_range,
154 "year_range": self.year_range,
155 "keywords": self.keywords,
156 }
157 )
158
159 return context
160
161
162 @method_decorator(login_required, "dispatch")
163 class ProfileDetailView(DetailView):
164 """
165 View that renders a member's profile
166 """
167
168 context_object_name = "member"
169 model = Member
170 template_name = "members/user/profile.html"
171
172 def setup(self, request, *args, **kwargs) -> None:
173 if "pk" not in kwargs:
174 kwargs["pk"] = request.member.pk
175 super().setup(request, *args, **kwargs)
176
177 def get_context_data(self, **kwargs) -> dict:
178 context = super().get_context_data(**kwargs)
179 member = context["member"]
180
181 achievements = member_achievements(member)
182 societies = member_societies(member)
183
184 membership = member.current_membership
185 membership_type = _("Unknown membership history")
186 if membership:
187 membership_type = membership.get_type_display()
188 elif member.has_been_honorary_member():
189 membership_type = _("Former honorary member")
190 elif member.has_been_member():
191 membership_type = _("Former member")
192 elif member.latest_membership:
193 membership_type = _("Former benefactor")
194
195 context.update(
196 {
197 "achievements": achievements,
198 "societies": societies,
199 "membership_type": membership_type,
200 }
201 )
202
203 return context
204
205
206 @method_decorator(login_required, "dispatch")
207 class UserProfileUpdateView(SuccessMessageMixin, UpdateView):
208 """
209 View that allows a user to update their profile
210 """
211
212 template_name = "members/user/edit_profile.html"
213 model = Profile
214 form_class = ProfileForm
215 success_url = reverse_lazy("members:edit-profile")
216 success_message = _("Your profile has been updated successfully.")
217
218 def get_object(self, queryset=None) -> Profile:
219 return get_object_or_404(models.Profile, user=self.request.user)
220
221
222 @method_decorator(login_required, "dispatch")
223 class StatisticsView(TemplateView):
224 """
225 View that renders the statistics page
226 """
227
228 template_name = "members/statistics.html"
229
230 def get_context_data(self, **kwargs) -> dict:
231 context = super().get_context_data(**kwargs)
232
233 total = models.Member.current_members.count()
234
235 context.update(
236 {
237 "total_members": total,
238 "statistics": json.dumps(
239 {
240 "cohort_sizes": services.gen_stats_year(),
241 "member_type_distribution": services.gen_stats_member_type(),
242 "total_pizza_orders": pizzas.services.gen_stats_pizza_orders(),
243 "current_pizza_orders": pizzas.services.gen_stats_current_pizza_orders(),
244 "committee_sizes": activemembers_services.generate_statistics(),
245 "event_categories": event_services.generate_category_statistics(),
246 }
247 ),
248 }
249 )
250
251 return context
252
253
254 @method_decorator(login_required, name="dispatch")
255 class EmailChangeFormView(CreateView):
256 """
257 View that renders the email change form
258 """
259
260 model = EmailChange
261 fields = ["email", "member"]
262 template_name = "members/user/email_change.html"
263
264 def get_initial(self) -> dict:
265 initial = super().get_initial()
266 initial["email"] = self.request.member.email
267 return initial
268
269 def post(self, request, *args, **kwargs) -> HttpResponse:
270 request.POST = request.POST.dict()
271 request.POST["member"] = request.member.pk
272 return super().post(request, *args, **kwargs)
273
274 def form_valid(self, form) -> HttpResponse:
275 change_request = form.save()
276 emails.send_email_change_confirmation_messages(change_request)
277 return TemplateResponse(
278 request=self.request, template="members/user/email_change_requested.html"
279 )
280
281
282 @method_decorator(login_required, name="dispatch")
283 class EmailChangeConfirmView(View, TemplateResponseMixin):
284 """
285 View that renders an HTML template and confirms the old email address
286 """
287
288 template_name = "members/user/email_change_confirmed.html"
289
290 def get(self, request, *args, **kwargs) -> HttpResponse:
291 if not EmailChange.objects.filter(confirm_key=kwargs["key"]).exists():
292 raise Http404
293
294 change_request = EmailChange.objects.get(confirm_key=kwargs["key"])
295
296 services.confirm_email_change(change_request)
297
298 return self.render_to_response({})
299
300
301 @method_decorator(login_required, name="dispatch")
302 class EmailChangeVerifyView(View, TemplateResponseMixin):
303 """
304 View that renders an HTML template and verifies the new email address
305 """
306
307 template_name = "members/user/email_change_verified.html"
308
309 def get(self, request, *args, **kwargs) -> HttpResponse:
310 if not EmailChange.objects.filter(verify_key=kwargs["key"]).exists():
311 raise Http404
312
313 change_request = EmailChange.objects.get(verify_key=kwargs["key"])
314
315 services.verify_email_change(change_request)
316
317 return self.render_to_response({})
318
[end of website/members/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/members/views.py b/website/members/views.py
--- a/website/members/views.py
+++ b/website/members/views.py
@@ -170,7 +170,7 @@
template_name = "members/user/profile.html"
def setup(self, request, *args, **kwargs) -> None:
- if "pk" not in kwargs:
+ if "pk" not in kwargs and request.member:
kwargs["pk"] = request.member.pk
super().setup(request, *args, **kwargs)
| {"golden_diff": "diff --git a/website/members/views.py b/website/members/views.py\n--- a/website/members/views.py\n+++ b/website/members/views.py\n@@ -170,7 +170,7 @@\n template_name = \"members/user/profile.html\"\n \n def setup(self, request, *args, **kwargs) -> None:\n- if \"pk\" not in kwargs:\n+ if \"pk\" not in kwargs and request.member:\n kwargs[\"pk\"] = request.member.pk\n super().setup(request, *args, **kwargs)\n", "issue": "Profile page crashes when not logged in\nSentry Issue: [CONCREXIT-40](https://sentry.io/organizations/thalia/issues/1976140555/?referrer=github_integration)\n\n```\nAttributeError: 'NoneType' object has no attribute 'pk'\n File \"django/core/handlers/exception.py\", line 47, in inner\n response = get_response(request)\n File \"django/core/handlers/base.py\", line 179, in _get_response\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n File \"django/views/generic/base.py\", line 64, in view\n self.setup(request, *args, **kwargs)\n File \"members/views.py\", line 173, in setup\n kwargs[\"pk\"] = request.member.pk\n File \"django/utils/functional.py\", line 241, in inner\n return func(self._wrapped, *args)\n```\n\nSteps to reproduce:\n1. Open https://thalia.nu/members/profile/ in Incognito mode\n", "before_files": [{"content": "\"\"\"Views provided by the members package\"\"\"\nimport json\nfrom datetime import date, datetime\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.db.models import Q, QuerySet\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import ListView, DetailView, UpdateView, CreateView\nfrom django.views.generic.base import TemplateResponseMixin, View, TemplateView\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.response import Response\n\nimport pizzas.services\nfrom members import services, emails\nfrom members.decorators import membership_required\nfrom members.models import EmailChange, Membership, Member, Profile\nfrom utils.snippets import datetime_to_lectureyear\nimport events.services as event_services\nimport activemembers.services as activemembers_services\n\nfrom . import models\nfrom .forms import ProfileForm\nfrom .services import member_achievements\nfrom .services import member_societies\n\n\nclass ObtainThaliaAuthToken(ObtainAuthToken):\n \"\"\"\n Custom override of the AuthToken view to force lowercase the username\n \"\"\"\n\n def post(self, request, *args, **kwargs) -> HttpResponse:\n serializer = self.serializer_class(\n data={\n \"username\": request.data.get(\"username\").lower()\n if \"username\" in request.data\n else None,\n \"password\": request.data.get(\"password\"),\n },\n context={\"request\": request},\n )\n\n if not serializer.is_valid():\n return Response({\"error\": \"Unauthorized\"}, status=401)\n\n user = serializer.validated_data[\"user\"]\n token, _ = Token.objects.get_or_create(user=user)\n return Response({\"token\": token.key})\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass MembersIndex(ListView):\n \"\"\"\n View that renders the members overview\n \"\"\"\n\n model = Member\n paginate_by = 28\n template_name = \"members/index.html\"\n context_object_name = \"members\"\n keywords = None\n query_filter = \"\"\n year_range = []\n\n def setup(self, request, *args, **kwargs) -> None:\n super().setup(request, *args, **kwargs)\n current_lectureyear = datetime_to_lectureyear(date.today())\n self.year_range = list(\n reversed(range(current_lectureyear - 5, current_lectureyear + 1))\n )\n self.keywords = request.GET.get(\"keywords\", \"\").split() or None\n self.query_filter = kwargs.get(\"filter\", None)\n\n def get_queryset(self) -> QuerySet:\n memberships_query = Q(until__gt=datetime.now()) | Q(until=None)\n members_query = ~Q(id=None)\n\n if self.query_filter and self.query_filter.isdigit():\n members_query &= Q(profile__starting_year=int(self.query_filter))\n memberships_query &= Q(type=Membership.MEMBER)\n elif self.query_filter == \"older\":\n members_query &= Q(profile__starting_year__lt=self.year_range[-1])\n memberships_query &= Q(type=Membership.MEMBER)\n elif self.query_filter == \"former\":\n # Filter out all current active memberships\n memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n members_query &= ~Q(pk__in=memberships.values(\"user__pk\"))\n # Members_query contains users that are not currently (honorary)member\n elif self.query_filter == \"benefactors\":\n memberships_query &= Q(type=Membership.BENEFACTOR)\n elif self.query_filter == \"honorary\":\n memberships_query = Q(until__gt=datetime.now().date()) | Q(until=None)\n memberships_query &= Q(type=Membership.HONORARY)\n\n if self.keywords:\n for key in self.keywords:\n # Works because relevant options all have `nick` in their key\n members_query &= (\n (\n Q(profile__nickname__icontains=key)\n & Q(profile__display_name_preference__contains=\"nick\")\n )\n | Q(first_name__icontains=key)\n | Q(last_name__icontains=key)\n | Q(username__icontains=key)\n )\n\n if self.query_filter == \"former\":\n memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n all_memberships = Membership.objects.all()\n # Only keep members that were once members, or are legacy users\n # that do not have any memberships at all\n members_query &= Q(pk__in=memberships.values(\"user__pk\")) | ~Q(\n pk__in=all_memberships.values(\"user__pk\")\n )\n else:\n memberships = Membership.objects.filter(memberships_query)\n members_query &= Q(pk__in=memberships.values(\"user__pk\"))\n return Member.objects.filter(members_query).order_by(\"first_name\")\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n\n page_range = range(1, paginator.num_pages + 1)\n if paginator.num_pages > 7:\n if page > 3:\n page_range_end = paginator.num_pages\n if page + 3 <= paginator.num_pages:\n page_range_end = page + 3\n\n page_range = range(page - 2, page_range_end)\n while page_range.stop - page_range.start < 5:\n page_range = range(page_range.start - 1, page_range.stop)\n else:\n page_range = range(1, 6)\n\n context.update(\n {\n \"filter\": self.query_filter,\n \"page_range\": page_range,\n \"year_range\": self.year_range,\n \"keywords\": self.keywords,\n }\n )\n\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass ProfileDetailView(DetailView):\n \"\"\"\n View that renders a member's profile\n \"\"\"\n\n context_object_name = \"member\"\n model = Member\n template_name = \"members/user/profile.html\"\n\n def setup(self, request, *args, **kwargs) -> None:\n if \"pk\" not in kwargs:\n kwargs[\"pk\"] = request.member.pk\n super().setup(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n member = context[\"member\"]\n\n achievements = member_achievements(member)\n societies = member_societies(member)\n\n membership = member.current_membership\n membership_type = _(\"Unknown membership history\")\n if membership:\n membership_type = membership.get_type_display()\n elif member.has_been_honorary_member():\n membership_type = _(\"Former honorary member\")\n elif member.has_been_member():\n membership_type = _(\"Former member\")\n elif member.latest_membership:\n membership_type = _(\"Former benefactor\")\n\n context.update(\n {\n \"achievements\": achievements,\n \"societies\": societies,\n \"membership_type\": membership_type,\n }\n )\n\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\nclass UserProfileUpdateView(SuccessMessageMixin, UpdateView):\n \"\"\"\n View that allows a user to update their profile\n \"\"\"\n\n template_name = \"members/user/edit_profile.html\"\n model = Profile\n form_class = ProfileForm\n success_url = reverse_lazy(\"members:edit-profile\")\n success_message = _(\"Your profile has been updated successfully.\")\n\n def get_object(self, queryset=None) -> Profile:\n return get_object_or_404(models.Profile, user=self.request.user)\n\n\n@method_decorator(login_required, \"dispatch\")\nclass StatisticsView(TemplateView):\n \"\"\"\n View that renders the statistics page\n \"\"\"\n\n template_name = \"members/statistics.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n\n total = models.Member.current_members.count()\n\n context.update(\n {\n \"total_members\": total,\n \"statistics\": json.dumps(\n {\n \"cohort_sizes\": services.gen_stats_year(),\n \"member_type_distribution\": services.gen_stats_member_type(),\n \"total_pizza_orders\": pizzas.services.gen_stats_pizza_orders(),\n \"current_pizza_orders\": pizzas.services.gen_stats_current_pizza_orders(),\n \"committee_sizes\": activemembers_services.generate_statistics(),\n \"event_categories\": event_services.generate_category_statistics(),\n }\n ),\n }\n )\n\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeFormView(CreateView):\n \"\"\"\n View that renders the email change form\n \"\"\"\n\n model = EmailChange\n fields = [\"email\", \"member\"]\n template_name = \"members/user/email_change.html\"\n\n def get_initial(self) -> dict:\n initial = super().get_initial()\n initial[\"email\"] = self.request.member.email\n return initial\n\n def post(self, request, *args, **kwargs) -> HttpResponse:\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form) -> HttpResponse:\n change_request = form.save()\n emails.send_email_change_confirmation_messages(change_request)\n return TemplateResponse(\n request=self.request, template=\"members/user/email_change_requested.html\"\n )\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeConfirmView(View, TemplateResponseMixin):\n \"\"\"\n View that renders an HTML template and confirms the old email address\n \"\"\"\n\n template_name = \"members/user/email_change_confirmed.html\"\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n if not EmailChange.objects.filter(confirm_key=kwargs[\"key\"]).exists():\n raise Http404\n\n change_request = EmailChange.objects.get(confirm_key=kwargs[\"key\"])\n\n services.confirm_email_change(change_request)\n\n return self.render_to_response({})\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EmailChangeVerifyView(View, TemplateResponseMixin):\n \"\"\"\n View that renders an HTML template and verifies the new email address\n \"\"\"\n\n template_name = \"members/user/email_change_verified.html\"\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n if not EmailChange.objects.filter(verify_key=kwargs[\"key\"]).exists():\n raise Http404\n\n change_request = EmailChange.objects.get(verify_key=kwargs[\"key\"])\n\n services.verify_email_change(change_request)\n\n return self.render_to_response({})\n", "path": "website/members/views.py"}]} | 3,961 | 122 |
gh_patches_debug_9958 | rasdani/github-patches | git_diff | ethereum__web3.py-3187 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
web3 import errors in Python 3.12
* Version: 6.13.0
* Python: 3.12, inside a venv
* OS: linux (but is probably applicable to other platforms as well)
* `pip freeze` output:
```
aiohttp==3.9.1
aiosignal==1.3.1
attrs==23.2.0
bitarray==2.9.2
certifi==2023.11.17
charset-normalizer==3.3.2
cytoolz==0.12.2
eth-abi==4.2.1
eth-account==0.10.0
eth-hash==0.5.2
eth-keyfile==0.7.0
eth-keys==0.4.0
eth-rlp==1.0.0
eth-typing==3.5.2
eth-utils==2.3.1
frozenlist==1.4.1
hexbytes==0.3.1
idna==3.6
jsonschema==4.20.0
jsonschema-specifications==2023.12.1
lru-dict==1.2.0
multidict==6.0.4
parsimonious==0.9.0
protobuf==4.25.1
pycryptodome==3.19.1
pyunormalize==15.1.0
referencing==0.32.1
regex==2023.12.25
requests==2.31.0
rlp==4.0.0
rpds-py==0.16.2
toolz==0.12.0
typing_extensions==4.9.0
urllib3==2.1.0
web3==6.13.0
websockets==12.0
yarl==1.9.4
```
### What was wrong?
In certain situations, web3 will raise ImportErrors on python 3.12 if the `setuptools` package is not installed. _In particular, this happens inside a fresh Python 3.12 venv._ The `setuptools` package automatically installs the `pkg_resources` package, which is used in web3 [here](https://github.com/ethereum/web3.py/blob/8f853f5841fd62187bce0c9f17be75627104ca43/web3/__init__.py#L25). This used to work fine in older Python versions. However, according to the [new changes in 3.12](https://docs.python.org/3/whatsnew/3.12.html):
> gh-95299: Do not pre-install setuptools in virtual environments created with venv. This means that distutils, setuptools, pkg_resources, and easy_install will no longer available by default; to access these run pip install setuptools in the activated virtual environment.
This means that the pkg_resources package is no longer accessible which causes this error.
Among other things, this scenario can occur inside tox tests for projects that have the `web3` package installed and are configured to test against 3.12. This causes such tests to immediately fail because of the ImportError. The workaround, installing setuptools after the venv created, causes unnecessarily long test times, adding about 3 minutes to the run time.
### How can it be fixed?
Given that web3's use of setuptools/pkg_resources is limited to just getting the version number, this should be trivial to fix. Why not open the file with built-in functions such as `open()` and parse it for the version number? I don't think that `web3` should continue to depend on setuptools.
</issue>
<code>
[start of web3/__init__.py]
1 from eth_account import Account # noqa: E402,
2 import pkg_resources
3
4 from web3.main import (
5 AsyncWeb3,
6 Web3,
7 )
8 from web3.providers.async_rpc import ( # noqa: E402
9 AsyncHTTPProvider,
10 )
11 from web3.providers.eth_tester import ( # noqa: E402
12 EthereumTesterProvider,
13 )
14 from web3.providers.ipc import ( # noqa: E402
15 IPCProvider,
16 )
17 from web3.providers.rpc import ( # noqa: E402
18 HTTPProvider,
19 )
20 from web3.providers.websocket import ( # noqa: E402
21 WebsocketProvider,
22 WebsocketProviderV2,
23 )
24
25 __version__ = pkg_resources.get_distribution("web3").version
26
27 __all__ = [
28 "__version__",
29 "AsyncWeb3",
30 "Web3",
31 "HTTPProvider",
32 "IPCProvider",
33 "WebsocketProvider",
34 "WebsocketProviderV2",
35 "EthereumTesterProvider",
36 "Account",
37 "AsyncHTTPProvider",
38 ]
39
[end of web3/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web3/__init__.py b/web3/__init__.py
--- a/web3/__init__.py
+++ b/web3/__init__.py
@@ -1,5 +1,15 @@
-from eth_account import Account # noqa: E402,
-import pkg_resources
+from eth_account import Account # noqa: E402
+import sys
+
+if sys.version_info.major == 3 and sys.version_info.minor < 8:
+ import pkg_resources
+
+ __version__ = pkg_resources.get_distribution("web3").version
+else:
+ from importlib.metadata import version
+
+ __version__ = version("web3")
+
from web3.main import (
AsyncWeb3,
@@ -22,7 +32,6 @@
WebsocketProviderV2,
)
-__version__ = pkg_resources.get_distribution("web3").version
__all__ = [
"__version__",
| {"golden_diff": "diff --git a/web3/__init__.py b/web3/__init__.py\n--- a/web3/__init__.py\n+++ b/web3/__init__.py\n@@ -1,5 +1,15 @@\n-from eth_account import Account # noqa: E402,\n-import pkg_resources\n+from eth_account import Account # noqa: E402\n+import sys\n+\n+if sys.version_info.major == 3 and sys.version_info.minor < 8:\n+ import pkg_resources\n+\n+ __version__ = pkg_resources.get_distribution(\"web3\").version\n+else:\n+ from importlib.metadata import version\n+\n+ __version__ = version(\"web3\")\n+\n \n from web3.main import (\n AsyncWeb3,\n@@ -22,7 +32,6 @@\n WebsocketProviderV2,\n )\n \n-__version__ = pkg_resources.get_distribution(\"web3\").version\n \n __all__ = [\n \"__version__\",\n", "issue": "web3 import errors in Python 3.12\n* Version: 6.13.0\r\n* Python: 3.12, inside a venv\r\n* OS: linux (but is probably applicable to other platforms as well)\r\n* `pip freeze` output:\r\n\r\n```\r\naiohttp==3.9.1\r\naiosignal==1.3.1\r\nattrs==23.2.0\r\nbitarray==2.9.2\r\ncertifi==2023.11.17\r\ncharset-normalizer==3.3.2\r\ncytoolz==0.12.2\r\neth-abi==4.2.1\r\neth-account==0.10.0\r\neth-hash==0.5.2\r\neth-keyfile==0.7.0\r\neth-keys==0.4.0\r\neth-rlp==1.0.0\r\neth-typing==3.5.2\r\neth-utils==2.3.1\r\nfrozenlist==1.4.1\r\nhexbytes==0.3.1\r\nidna==3.6\r\njsonschema==4.20.0\r\njsonschema-specifications==2023.12.1\r\nlru-dict==1.2.0\r\nmultidict==6.0.4\r\nparsimonious==0.9.0\r\nprotobuf==4.25.1\r\npycryptodome==3.19.1\r\npyunormalize==15.1.0\r\nreferencing==0.32.1\r\nregex==2023.12.25\r\nrequests==2.31.0\r\nrlp==4.0.0\r\nrpds-py==0.16.2\r\ntoolz==0.12.0\r\ntyping_extensions==4.9.0\r\nurllib3==2.1.0\r\nweb3==6.13.0\r\nwebsockets==12.0\r\nyarl==1.9.4\r\n```\r\n\r\n### What was wrong?\r\n\r\nIn certain situations, web3 will raise ImportErrors on python 3.12 if the `setuptools` package is not installed. _In particular, this happens inside a fresh Python 3.12 venv._ The `setuptools` package automatically installs the `pkg_resources` package, which is used in web3 [here](https://github.com/ethereum/web3.py/blob/8f853f5841fd62187bce0c9f17be75627104ca43/web3/__init__.py#L25). This used to work fine in older Python versions. However, according to the [new changes in 3.12](https://docs.python.org/3/whatsnew/3.12.html):\r\n\r\n> gh-95299: Do not pre-install setuptools in virtual environments created with venv. This means that distutils, setuptools, pkg_resources, and easy_install will no longer available by default; to access these run pip install setuptools in the activated virtual environment.\r\n\r\nThis means that the pkg_resources package is no longer accessible which causes this error.\r\n\r\nAmong other things, this scenario can occur inside tox tests for projects that have the `web3` package installed and are configured to test against 3.12. This causes such tests to immediately fail because of the ImportError. The workaround, installing setuptools after the venv created, causes unnecessarily long test times, adding about 3 minutes to the run time.\r\n\r\n### How can it be fixed?\r\n\r\nGiven that web3's use of setuptools/pkg_resources is limited to just getting the version number, this should be trivial to fix. Why not open the file with built-in functions such as `open()` and parse it for the version number? I don't think that `web3` should continue to depend on setuptools.\n", "before_files": [{"content": "from eth_account import Account # noqa: E402,\nimport pkg_resources\n\nfrom web3.main import (\n AsyncWeb3,\n Web3,\n)\nfrom web3.providers.async_rpc import ( # noqa: E402\n AsyncHTTPProvider,\n)\nfrom web3.providers.eth_tester import ( # noqa: E402\n EthereumTesterProvider,\n)\nfrom web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n)\nfrom web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n)\nfrom web3.providers.websocket import ( # noqa: E402\n WebsocketProvider,\n WebsocketProviderV2,\n)\n\n__version__ = pkg_resources.get_distribution(\"web3\").version\n\n__all__ = [\n \"__version__\",\n \"AsyncWeb3\",\n \"Web3\",\n \"HTTPProvider\",\n \"IPCProvider\",\n \"WebsocketProvider\",\n \"WebsocketProviderV2\",\n \"EthereumTesterProvider\",\n \"Account\",\n \"AsyncHTTPProvider\",\n]\n", "path": "web3/__init__.py"}]} | 1,674 | 210 |
gh_patches_debug_27086 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-8283 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable intersphinx support for hoverxref in our documentation
While writing #8283, I realized that we still do not enable intersphinx support in our sphinx-hoverxref documentation. More info here:
https://blog.readthedocs.com/hoverxref-intersphinx/
I think it would be nice to do so.
</issue>
<code>
[start of docs/conf.py]
1 import os
2 import sys
3 from configparser import RawConfigParser
4
5 import sphinx_rtd_theme
6
7 sys.path.insert(0, os.path.abspath('..'))
8 sys.path.append(os.path.dirname(__file__))
9 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev")
10
11 from django.utils import timezone
12
13 import django
14 django.setup()
15
16
17 def get_version():
18 """Return package version from setup.cfg."""
19 config = RawConfigParser()
20 config.read(os.path.join('..', 'setup.cfg'))
21 return config.get('metadata', 'version')
22
23
24 sys.path.append(os.path.abspath('_ext'))
25 extensions = [
26 'sphinx.ext.autosectionlabel',
27 'sphinx.ext.autodoc',
28 'sphinx.ext.intersphinx',
29 'sphinxcontrib.httpdomain',
30 'djangodocs',
31 'doc_extensions',
32 'sphinx_tabs.tabs',
33 'sphinx-prompt',
34 'notfound.extension',
35 'hoverxref.extension',
36 'sphinx_search.extension',
37 'sphinxemoji.sphinxemoji',
38 ]
39
40 templates_path = ['_templates']
41
42 master_doc = 'index'
43 project = 'Read the Docs'
44 copyright = '2010-{}, Read the Docs, Inc & contributors'.format(
45 timezone.now().year
46 )
47 version = get_version()
48 release = version
49 exclude_patterns = ['_build']
50 default_role = 'obj'
51 intersphinx_mapping = {
52 'python': ('https://docs.python.org/3.6/', None),
53 'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),
54 'sphinx': ('https://www.sphinx-doc.org/en/master/', None),
55 'pip': ('https://pip.pypa.io/en/stable/', None),
56 }
57 htmlhelp_basename = 'ReadTheDocsdoc'
58 latex_documents = [
59 ('index', 'ReadTheDocs.tex', 'Read the Docs Documentation',
60 'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),
61 ]
62 man_pages = [
63 ('index', 'read-the-docs', 'Read the Docs Documentation',
64 ['Eric Holscher, Charlie Leifer, Bobby Grace'], 1)
65 ]
66
67 exclude_patterns = [
68 # 'api' # needed for ``make gettext`` to not die.
69 ]
70
71 language = 'en'
72
73 locale_dirs = [
74 'locale/',
75 ]
76 gettext_compact = False
77
78 html_theme = 'sphinx_rtd_theme'
79 html_static_path = ['_static']
80 html_js_files = ['js/expand_tabs.js']
81 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
82 html_logo = 'img/logo.svg'
83 html_theme_options = {
84 'logo_only': True,
85 'display_version': False,
86 }
87
88 hoverxref_auto_ref = True
89 hoverxref_domains = ['py']
90 hoverxref_roles = [
91 'option',
92 'doc',
93 ]
94 hoverxref_role_types = {
95 'mod': 'modal', # for Python Sphinx Domain
96 'doc': 'modal', # for whole docs
97 'class': 'tooltip', # for Python Sphinx Domain
98 'ref': 'tooltip', # for hoverxref_auto_ref config
99 'confval': 'tooltip', # for custom object
100 }
101
102 rst_epilog = """
103 .. |org_brand| replace:: Read the Docs Community
104 .. |com_brand| replace:: Read the Docs for Business
105 """
106
107 # Activate autosectionlabel plugin
108 autosectionlabel_prefix_document = True
109
110 numfig = True
111
112 # sphinx-notfound-page
113 # https://github.com/readthedocs/sphinx-notfound-page
114 notfound_context = {
115 'title': 'Page Not Found',
116 'body': '''
117 <h1>Page Not Found</h1>
118
119 <p>Sorry, we couldn't find that page.</p>
120
121 <p>Try using the search box or go to the homepage.</p>
122 ''',
123 }
124 linkcheck_ignore = [
125 r'http://127\.0\.0\.1',
126 r'http://localhost',
127 r'http://community\.dev\.readthedocs\.io',
128 r'https://yourproject\.readthedocs\.io',
129 r'https?://docs\.example\.com',
130 r'https://foo\.readthedocs\.io/projects',
131 r'https://github\.com.+?#L\d+',
132 r'https://github\.com/readthedocs/readthedocs\.org/issues',
133 r'https://github\.com/readthedocs/readthedocs\.org/pull',
134 r'https://docs\.readthedocs\.io/\?rtd_search',
135 r'https://readthedocs\.org/search',
136 # This page is under login
137 r'https://readthedocs\.org/accounts/gold',
138 ]
139
140
141 def setup(app):
142 app.add_css_file('css/sphinx_prompt_css.css')
143
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -53,7 +53,23 @@
'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),
'sphinx': ('https://www.sphinx-doc.org/en/master/', None),
'pip': ('https://pip.pypa.io/en/stable/', None),
+ 'nbsphinx': ('https://nbsphinx.readthedocs.io/en/0.8.6/', None),
+ 'myst-nb': ('https://myst-nb.readthedocs.io/en/v0.12.3/', None),
+ 'ipywidgets': ('https://ipywidgets.readthedocs.io/en/7.6.3/', None),
+ 'jupytext': ('https://jupytext.readthedocs.io/en/stable/', None),
+ 'ipyleaflet': ('https://ipyleaflet.readthedocs.io/en/stable/', None),
+ 'poliastro': ('https://docs.poliastro.space/en/v0.15.2/', None),
+ 'qiskit': ('https://qiskit.org/documentation/', None),
+ 'myst-parser': ('https://myst-parser.readthedocs.io/en/v0.15.1/', None),
}
+hoverxref_intersphinx = [
+ "sphinx",
+ "pip",
+ "nbsphinx",
+ "myst-nb",
+ "ipywidgets",
+ "jupytext",
+]
htmlhelp_basename = 'ReadTheDocsdoc'
latex_documents = [
('index', 'ReadTheDocs.tex', 'Read the Docs Documentation',
@@ -107,8 +123,6 @@
# Activate autosectionlabel plugin
autosectionlabel_prefix_document = True
-numfig = True
-
# sphinx-notfound-page
# https://github.com/readthedocs/sphinx-notfound-page
notfound_context = {
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -53,7 +53,23 @@\n 'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),\n 'sphinx': ('https://www.sphinx-doc.org/en/master/', None),\n 'pip': ('https://pip.pypa.io/en/stable/', None),\n+ 'nbsphinx': ('https://nbsphinx.readthedocs.io/en/0.8.6/', None),\n+ 'myst-nb': ('https://myst-nb.readthedocs.io/en/v0.12.3/', None),\n+ 'ipywidgets': ('https://ipywidgets.readthedocs.io/en/7.6.3/', None),\n+ 'jupytext': ('https://jupytext.readthedocs.io/en/stable/', None),\n+ 'ipyleaflet': ('https://ipyleaflet.readthedocs.io/en/stable/', None),\n+ 'poliastro': ('https://docs.poliastro.space/en/v0.15.2/', None),\n+ 'qiskit': ('https://qiskit.org/documentation/', None),\n+ 'myst-parser': ('https://myst-parser.readthedocs.io/en/v0.15.1/', None),\n }\n+hoverxref_intersphinx = [\n+ \"sphinx\",\n+ \"pip\",\n+ \"nbsphinx\",\n+ \"myst-nb\",\n+ \"ipywidgets\",\n+ \"jupytext\",\n+]\n htmlhelp_basename = 'ReadTheDocsdoc'\n latex_documents = [\n ('index', 'ReadTheDocs.tex', 'Read the Docs Documentation',\n@@ -107,8 +123,6 @@\n # Activate autosectionlabel plugin\n autosectionlabel_prefix_document = True\n \n-numfig = True\n-\n # sphinx-notfound-page\n # https://github.com/readthedocs/sphinx-notfound-page\n notfound_context = {\n", "issue": "Enable intersphinx support for hoverxref in our documentation\nWhile writing #8283, I realized that we still do not enable intersphinx support in our sphinx-hoverxref documentation. More info here:\r\n\r\nhttps://blog.readthedocs.com/hoverxref-intersphinx/\r\n\r\nI think it would be nice to do so.\n", "before_files": [{"content": "import os\nimport sys\nfrom configparser import RawConfigParser\n\nimport sphinx_rtd_theme\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.utils import timezone\n\nimport django\ndjango.setup()\n\n\ndef get_version():\n \"\"\"Return package version from setup.cfg.\"\"\"\n config = RawConfigParser()\n config.read(os.path.join('..', 'setup.cfg'))\n return config.get('metadata', 'version')\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n 'sphinx_tabs.tabs',\n 'sphinx-prompt',\n 'notfound.extension',\n 'hoverxref.extension',\n 'sphinx_search.extension',\n 'sphinxemoji.sphinxemoji',\n]\n\ntemplates_path = ['_templates']\n\nmaster_doc = 'index'\nproject = 'Read the Docs'\ncopyright = '2010-{}, Read the Docs, Inc & contributors'.format(\n timezone.now().year\n)\nversion = get_version()\nrelease = version\nexclude_patterns = ['_build']\ndefault_role = 'obj'\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3.6/', None),\n 'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),\n 'sphinx': ('https://www.sphinx-doc.org/en/master/', None),\n 'pip': ('https://pip.pypa.io/en/stable/', None),\n}\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', 'Read the Docs Documentation',\n 'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', 'Read the Docs Documentation',\n ['Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_static_path = ['_static']\nhtml_js_files = ['js/expand_tabs.js']\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_logo = 'img/logo.svg'\nhtml_theme_options = {\n 'logo_only': True,\n 'display_version': False,\n}\n\nhoverxref_auto_ref = True\nhoverxref_domains = ['py']\nhoverxref_roles = [\n 'option',\n 'doc',\n]\nhoverxref_role_types = {\n 'mod': 'modal', # for Python Sphinx Domain\n 'doc': 'modal', # for whole docs\n 'class': 'tooltip', # for Python Sphinx Domain\n 'ref': 'tooltip', # for hoverxref_auto_ref config\n 'confval': 'tooltip', # for custom object\n}\n\nrst_epilog = \"\"\"\n.. |org_brand| replace:: Read the Docs Community\n.. |com_brand| replace:: Read the Docs for Business\n\"\"\"\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n\nnumfig = True\n\n# sphinx-notfound-page\n# https://github.com/readthedocs/sphinx-notfound-page\nnotfound_context = {\n 'title': 'Page Not Found',\n 'body': '''\n<h1>Page Not Found</h1>\n\n<p>Sorry, we couldn't find that page.</p>\n\n<p>Try using the search box or go to the homepage.</p>\n''',\n}\nlinkcheck_ignore = [\n r'http://127\\.0\\.0\\.1',\n r'http://localhost',\n r'http://community\\.dev\\.readthedocs\\.io',\n r'https://yourproject\\.readthedocs\\.io',\n r'https?://docs\\.example\\.com',\n r'https://foo\\.readthedocs\\.io/projects',\n r'https://github\\.com.+?#L\\d+',\n r'https://github\\.com/readthedocs/readthedocs\\.org/issues',\n r'https://github\\.com/readthedocs/readthedocs\\.org/pull',\n r'https://docs\\.readthedocs\\.io/\\?rtd_search',\n r'https://readthedocs\\.org/search',\n # This page is under login\n r'https://readthedocs\\.org/accounts/gold',\n]\n\n\ndef setup(app):\n app.add_css_file('css/sphinx_prompt_css.css')\n", "path": "docs/conf.py"}]} | 1,959 | 452 |
gh_patches_debug_428 | rasdani/github-patches | git_diff | python__python-docs-es-1762 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Translate 'library/os.po'
This needs to reach 100% translated.
The rendered version of this file will be available at https://docs.python.org/es/3.10/library/os.html once translated.
Meanwhile, the English version is shown.
Current stats for `library/os.po`:
* Fuzzy: 27
* Percent translated: 94.8%
* Entries: 804 / 848
* Untranslated: 44
Please, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.
Remember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).
</issue>
<code>
[start of scripts/translate.py]
1 import os
2 import re
3 import sys
4 from typing import Dict, Tuple
5
6 import polib
7
8 VERBOSE = False
9 DEBUG = False
10 SKIP_TRANSLATED_ENTRIES = True
11
12 try:
13 from deep_translator import GoogleTranslator
14 except ImportError:
15 print("Error: This util script needs `deep_translator` to be installed")
16 sys.exit(1)
17
18 _patterns = [
19 ":c:func:`[^`]+`",
20 ":c:type:`[^`]+`",
21 ":c:macro:`[^`]+`",
22 ":c:member:`[^`]+`",
23 ":c:data:`[^`]+`",
24 ":py:data:`[^`]+`",
25 ":py:mod:`[^`]+`",
26 ":func:`[^`]+`",
27 ":mod:`[^`]+`",
28 ":ref:`[^`]+`",
29 ":class:`[^`]+`",
30 ":pep:`[^`]+`",
31 ":data:`[^`]+`",
32 ":exc:`[^`]+`",
33 ":term:`[^`]+`",
34 ":meth:`[^`]+`",
35 ":envvar:`[^`]+`",
36 ":file:`[^`]+`",
37 ":attr:`[^`]+`",
38 ":const:`[^`]+`",
39 ":issue:`[^`]+`",
40 ":opcode:`[^`]+`",
41 ":option:`[^`]+`",
42 ":program:`[^`]+`",
43 ":keyword:`[^`]+`",
44 ":RFC:`[^`]+`",
45 ":rfc:`[^`]+`",
46 ":doc:`[^`]+`",
47 "``[^`]+``",
48 "`[^`]+`__",
49 "`[^`]+`_",
50 "\*\*[^\*]+\*\*", # bold text between **
51 "\*[^\*]+\*", # italic text between *
52 ]
53
54 _exps = [re.compile(e) for e in _patterns]
55
56 def protect_sphinx_directives(s: str) -> Tuple[dict, str]:
57 """
58 Parameters:
59 string containing the text to translate
60
61 Returns:
62 dictionary containing all the placeholder text as keys
63 and the correct value.
64 """
65
66 i = 0
67 d: Dict[str, str] = {}
68 for exp in _exps:
69 matches = exp.findall(s)
70 if DEBUG:
71 print(exp, matches)
72 for match in matches:
73 ph = f"XASDF{str(i).zfill(2)}"
74 s = s.replace(match, ph)
75 if ph in d and VERBOSE:
76 print(f"Error: {ph} is already in the dictionary")
77 print("new", match)
78 print("old", d[ph])
79 d[ph] = match
80 i += 1
81 return d, s
82
83
84 def undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:
85 for ph, value in placeholders.items():
86 translated_text = translated_text.replace(ph, value)
87 if DEBUG:
88 print(ph, value)
89 print(translated_text)
90 return translated_text
91
92
93 if __name__ == "__main__":
94 filename = sys.argv[1]
95 if not os.path.isfile(filename):
96 print(f"File not found: '{filename}'")
97 sys.exit(-1)
98
99 po = polib.pofile(filename)
100 translator = GoogleTranslator(source="en", target="es")
101
102 for entry in po:
103 # If the entry has already a translation, skip.
104 if SKIP_TRANSLATED_ENTRIES and entry.msgstr:
105 continue
106
107 print("\nEN|", entry.msgid)
108 placeholders, temp_text = protect_sphinx_directives(entry.msgid)
109 if VERBOSE:
110 print(temp_text)
111 print(placeholders)
112
113 # Translate the temporary text without sphinx statements
114 translated_text = translator.translate(temp_text)
115
116 # Recover sphinx statements
117 real_text = undo_sphinx_directives_protection(placeholders, translated_text)
118 print("ES|", real_text)
119
120 # Replace the po file translated entry
121 entry.msgstr = real_text
122
123 # Save the file after all the entries are translated
124 po.save()
125
[end of scripts/translate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/translate.py b/scripts/translate.py
--- a/scripts/translate.py
+++ b/scripts/translate.py
@@ -44,6 +44,8 @@
":RFC:`[^`]+`",
":rfc:`[^`]+`",
":doc:`[^`]+`",
+ ":manpage:`[^`]+`",
+ ":sup:`[^`]+`",
"``[^`]+``",
"`[^`]+`__",
"`[^`]+`_",
| {"golden_diff": "diff --git a/scripts/translate.py b/scripts/translate.py\n--- a/scripts/translate.py\n+++ b/scripts/translate.py\n@@ -44,6 +44,8 @@\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n+ \":manpage:`[^`]+`\",\n+ \":sup:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n", "issue": "Translate 'library/os.po'\nThis needs to reach 100% translated.\n\nThe rendered version of this file will be available at https://docs.python.org/es/3.10/library/os.html once translated.\nMeanwhile, the English version is shown.\n\nCurrent stats for `library/os.po`:\n\n* Fuzzy: 27\n* Percent translated: 94.8%\n* Entries: 804 / 848\n* Untranslated: 44\n\nPlease, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.\n\nRemember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).\n", "before_files": [{"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n", "path": "scripts/translate.py"}]} | 1,862 | 113 |
gh_patches_debug_7956 | rasdani/github-patches | git_diff | open-mmlab__mmpose-783 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
resource limit bug
**Describe the feature**
**Motivation**
It is inconvenient when we run mmpose on slurm clustre which may has larger file-open's soft limit than 4096. The resource limit adjust here [https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/builder.py#L13-L19](url) will reduce the base file-open's soft limit to 4096. Sometimes it will result in 'OSError: [Error 24] Too many open files' during training process.
**Additional context**
the code maybe can be modified like below:
```python
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096,base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
</issue>
<code>
[start of mmpose/datasets/builder.py]
1 import platform
2 import random
3 from functools import partial
4
5 import numpy as np
6 from mmcv.parallel import collate
7 from mmcv.runner import get_dist_info
8 from mmcv.utils import Registry, build_from_cfg
9 from mmcv.utils.parrots_wrapper import _get_dataloader
10
11 from .samplers import DistributedSampler
12
13 if platform.system() != 'Windows':
14 # https://github.com/pytorch/pytorch/issues/973
15 import resource
16 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
17 hard_limit = rlimit[1]
18 soft_limit = min(4096, hard_limit)
19 resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
20
21 DATASETS = Registry('dataset')
22 PIPELINES = Registry('pipeline')
23
24
25 def build_dataset(cfg, default_args=None):
26 """Build a dataset from config dict.
27
28 Args:
29 cfg (dict): Config dict. It should at least contain the key "type".
30 default_args (dict, optional): Default initialization arguments.
31 Default: None.
32
33 Returns:
34 Dataset: The constructed dataset.
35 """
36 from .dataset_wrappers import RepeatDataset
37
38 if cfg['type'] == 'RepeatDataset':
39 dataset = RepeatDataset(
40 build_dataset(cfg['dataset'], default_args), cfg['times'])
41 else:
42 dataset = build_from_cfg(cfg, DATASETS, default_args)
43 return dataset
44
45
46 def build_dataloader(dataset,
47 samples_per_gpu,
48 workers_per_gpu,
49 num_gpus=1,
50 dist=True,
51 shuffle=True,
52 seed=None,
53 drop_last=True,
54 pin_memory=True,
55 **kwargs):
56 """Build PyTorch DataLoader.
57
58 In distributed training, each GPU/process has a dataloader.
59 In non-distributed training, there is only one dataloader for all GPUs.
60
61 Args:
62 dataset (Dataset): A PyTorch dataset.
63 samples_per_gpu (int): Number of training samples on each GPU, i.e.,
64 batch size of each GPU.
65 workers_per_gpu (int): How many subprocesses to use for data loading
66 for each GPU.
67 num_gpus (int): Number of GPUs. Only used in non-distributed training.
68 dist (bool): Distributed training/test or not. Default: True.
69 shuffle (bool): Whether to shuffle the data at every epoch.
70 Default: True.
71 drop_last (bool): Whether to drop the last incomplete batch in epoch.
72 Default: True
73 pin_memory (bool): Whether to use pin_memory in DataLoader.
74 Default: True
75 kwargs: any keyword argument to be used to initialize DataLoader
76
77 Returns:
78 DataLoader: A PyTorch dataloader.
79 """
80 rank, world_size = get_dist_info()
81 if dist:
82 sampler = DistributedSampler(
83 dataset, world_size, rank, shuffle=shuffle, seed=seed)
84 shuffle = False
85 batch_size = samples_per_gpu
86 num_workers = workers_per_gpu
87 else:
88 sampler = None
89 batch_size = num_gpus * samples_per_gpu
90 num_workers = num_gpus * workers_per_gpu
91
92 init_fn = partial(
93 worker_init_fn, num_workers=num_workers, rank=rank,
94 seed=seed) if seed is not None else None
95
96 _, DataLoader = _get_dataloader()
97 data_loader = DataLoader(
98 dataset,
99 batch_size=batch_size,
100 sampler=sampler,
101 num_workers=num_workers,
102 collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
103 pin_memory=pin_memory,
104 shuffle=shuffle,
105 worker_init_fn=init_fn,
106 drop_last=drop_last,
107 **kwargs)
108
109 return data_loader
110
111
112 def worker_init_fn(worker_id, num_workers, rank, seed):
113 """Init the random seed for various workers."""
114 # The seed of each worker equals to
115 # num_worker * rank + worker_id + user_seed
116 worker_seed = num_workers * rank + worker_id + seed
117 np.random.seed(worker_seed)
118 random.seed(worker_seed)
119
[end of mmpose/datasets/builder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py
--- a/mmpose/datasets/builder.py
+++ b/mmpose/datasets/builder.py
@@ -14,8 +14,9 @@
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
+ base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
- soft_limit = min(4096, hard_limit)
+ soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
| {"golden_diff": "diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py\n--- a/mmpose/datasets/builder.py\n+++ b/mmpose/datasets/builder.py\n@@ -14,8 +14,9 @@\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n+ base_soft_limit = rlimit[0]\n hard_limit = rlimit[1]\n- soft_limit = min(4096, hard_limit)\n+ soft_limit = min(max(4096, base_soft_limit), hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n \n DATASETS = Registry('dataset')\n", "issue": "resource limit bug\n**Describe the feature**\r\n\r\n**Motivation**\r\n\r\nIt is inconvenient when we run mmpose on slurm clustre which may has larger file-open's soft limit than 4096. The resource limit adjust here [https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/builder.py#L13-L19](url) will reduce the base file-open's soft limit to 4096. Sometimes it will result in 'OSError: [Error 24] Too many open files' during training process.\r\n\r\n\r\n**Additional context**\r\nthe code maybe can be modified like below:\r\n```python\r\n\r\nif platform.system() != 'Windows':\r\n # https://github.com/pytorch/pytorch/issues/973\r\n import resource\r\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\r\n base_soft_limit = rlimit[0]\r\n hard_limit = rlimit[1]\r\n soft_limit = min(max(4096,base_soft_limit), hard_limit)\r\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import Registry, build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n hard_limit = rlimit[1]\n soft_limit = min(4096, hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\nDATASETS = Registry('dataset')\nPIPELINES = Registry('pipeline')\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n from .dataset_wrappers import RepeatDataset\n\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}]} | 1,902 | 172 |
gh_patches_debug_36445 | rasdani/github-patches | git_diff | meltano__meltano-7620 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cloud CLI to register a new project in Meltano Cloud
cc @tayloramurphy
Blocked by:
- https://github.com/meltano/infra/issues/514
Related to:
- https://github.com/meltano/infra/issues/513 (internal)
- https://github.com/meltano/meltano/issues/7411
</issue>
<code>
[start of src/meltano/cloud/cli/project.py]
1 """Meltano Cloud `project` command."""
2
3 from __future__ import annotations
4
5 import asyncio
6 import logging
7 import sys
8 import typing as t
9
10 import click
11 import questionary
12 from ulid import ULID
13
14 from meltano.cloud.api.client import MeltanoCloudClient
15 from meltano.cloud.cli.base import (
16 LimitedResult,
17 get_paginated,
18 pass_context,
19 print_formatted_list,
20 )
21 from meltano.core.utils import run_async
22
23 if t.TYPE_CHECKING:
24 from meltano.cloud.api.config import MeltanoCloudConfig
25 from meltano.cloud.api.types import CloudProject
26 from meltano.cloud.cli.base import MeltanoCloudCLIContext
27
28 DEFAULT_GET_PROJECTS_LIMIT = 125
29 MAX_PAGE_SIZE = 250
30
31 logger = logging.getLogger()
32
33
34 class ULIDType(click.ParamType):
35 """A ULID input type.
36
37 Examples:
38 01BX5ZZKBKACTAV9WEVGEMMVRY
39 01BX5ZZKBKACTAV9WEVGEMMVS1
40 """
41
42 name = "ulid"
43
44 def convert(
45 self,
46 value: str | ULID,
47 param: click.Parameter | None, # noqa: ARG002
48 ctx: click.Context | None, # noqa: ARG002
49 ) -> str:
50 """Try converting value to a ULID object."""
51 if isinstance(value, ULID):
52 return str(value)
53
54 if isinstance(value, str):
55 try:
56 ULID.from_str(value)
57 except ValueError:
58 self.fail(f"Invalid ULID value: {value}")
59
60 return value
61
62
63 class ProjectsCloudClient(MeltanoCloudClient):
64 """A Meltano Cloud client with extensions for projects."""
65
66 async def get_projects(
67 self,
68 *,
69 project_id: str | None = None,
70 project_name: str | None = None,
71 page_size: int | None = None,
72 page_token: str | None = None,
73 ):
74 """Use GET to get Meltano Cloud project projects.
75
76 Args:
77 project_id: The Meltano Cloud ID for the project.
78 project_name: The name of the project.
79 page_size: The number of items to request per page.
80 page_token: The page token.
81 """
82 async with self.authenticated():
83 return await self._json_request(
84 "GET",
85 f"/projects/v1/{self.config.tenant_resource_key}",
86 params=self.clean_params(
87 {
88 "project_id": project_id,
89 "project_name": project_name,
90 "page_size": page_size,
91 "page_token": page_token,
92 },
93 ),
94 )
95
96
97 @click.group("project")
98 def project_group() -> None:
99 """Interact with Meltano Cloud projects."""
100
101
102 def _safe_get_internal_project_id(config: MeltanoCloudConfig) -> str | None:
103 """Get the internal project ID, or `None` if it could not be obtained."""
104 try:
105 return config.internal_project_id
106 except Exception:
107 logger.debug(
108 "Could not get internal project ID from config; using `None` instead.",
109 )
110 return None
111
112
113 async def _get_projects(
114 config: MeltanoCloudConfig,
115 *,
116 project_id: str | None = None,
117 project_name: str | None = None,
118 limit: int = DEFAULT_GET_PROJECTS_LIMIT,
119 ) -> LimitedResult[CloudProject]:
120 async with ProjectsCloudClient(config=config) as client:
121 results = await get_paginated(
122 lambda page_size, page_token: client.get_projects(
123 project_id=project_id,
124 project_name=project_name,
125 page_size=page_size,
126 page_token=page_token,
127 ),
128 limit,
129 MAX_PAGE_SIZE,
130 )
131
132 results.items = [
133 {
134 **x,
135 "default": x["project_id"] == _safe_get_internal_project_id(config),
136 }
137 for x in results.items
138 ]
139 return results
140
141
142 def _format_project(project: dict[str, t.Any]) -> tuple[str, ...]:
143 return (
144 "X" if project["default"] else "",
145 project["project_name"],
146 project["git_repository"],
147 )
148
149
150 private_project_attributes = {"tenant_resource_key", "project_id"}
151
152
153 def _remove_private_project_attributes(project: CloudProject) -> dict[str, t.Any]:
154 return {k: v for k, v in project.items() if k not in private_project_attributes}
155
156
157 @project_group.command("list")
158 @click.option(
159 "--limit",
160 required=False,
161 type=int,
162 default=DEFAULT_GET_PROJECTS_LIMIT,
163 help="The maximum number of projects to display.",
164 )
165 @click.option(
166 "--format",
167 "output_format",
168 required=False,
169 default="terminal",
170 type=click.Choice(("terminal", "markdown", "json")),
171 help="The output format to use.",
172 )
173 @pass_context
174 @run_async
175 async def list_projects(
176 context: MeltanoCloudCLIContext,
177 output_format: str,
178 limit: int,
179 ) -> None:
180 """List Meltano Cloud projects."""
181 results = await _get_projects(config=context.config, limit=limit)
182 stripped_results = LimitedResult(
183 items=[_remove_private_project_attributes(x) for x in results.items],
184 truncated=results.truncated,
185 )
186 print_formatted_list(
187 stripped_results,
188 output_format,
189 _format_project,
190 ("Default", "Name", "Git Repository"),
191 ("center", "left", "left"),
192 )
193
194
195 def _print_projects(projects: list[CloudProject]) -> None:
196 for project in projects:
197 click.echo(
198 f"{project['project_id']}: {project['project_name']} "
199 f"({project['git_repository']!r})",
200 )
201
202
203 def _check_for_duplicate_project_names(projects: list[CloudProject]) -> None:
204 project_names = [x["project_name"] for x in projects]
205 if len(set(project_names)) != len(project_names):
206 click.secho(
207 "Error: Multiple Meltano Cloud projects have the same name. If you are "
208 "trying to use a project with an unambiguous name, please select it with "
209 "the `--name` option. Otherwise, please specify the project using the "
210 "`--id` option with its internal ID, shown below. Note that these IDs may "
211 "change at any time. To avoid this issue, please use unique project names.",
212 fg="red",
213 )
214 _print_projects(projects)
215 sys.exit(1)
216
217
218 def _check_for_project_name_conflict(
219 projects: list[CloudProject],
220 project_name: str,
221 ) -> None:
222 if [x["project_name"] for x in projects].count(project_name) > 1:
223 click.secho(
224 "Error: Multiple Meltano Cloud projects have the specified name. "
225 "Please specify the project using the `--id` option with its "
226 "internal ID, shown below. Note that these IDs may change at any "
227 "time. To avoid this issue, please use unique project names.",
228 fg="red",
229 )
230 _print_projects(projects)
231 sys.exit(1)
232
233
234 class ProjectChoicesQuestionaryOption(click.Option):
235 """Click option that provides an interactive prompt for Cloud Project names."""
236
237 def prompt_for_value(self, ctx: click.Context) -> t.Any:
238 """Prompt the user to interactively select a Meltano Cloud project by name.
239
240 Args:
241 ctx: The Click context.
242
243 Returns:
244 The name of the selected project, or `None` if the project was
245 selected using the `--id` option.
246 """
247 if "project_id" in ctx.params:
248 # The project has been specified by ID - don't prompt for a name
249 return None
250
251 context: MeltanoCloudCLIContext = ctx.obj
252 context.projects = asyncio.run(_get_projects(context.config)).items
253 _check_for_duplicate_project_names(context.projects)
254 default_project_name = next(
255 (
256 x
257 for x in context.projects
258 if x["project_id"]
259 == context.config.internal_organization_default["default_project_id"]
260 ),
261 {"project_name": None},
262 )["project_name"]
263 if not context.projects:
264 raise click.ClickException(
265 "No Meltano Cloud projects available to use. Please create a "
266 "project before running 'meltano cloud project use'.",
267 )
268 return questionary.select(
269 message="",
270 qmark="Use Meltano Cloud project",
271 choices=[x["project_name"] for x in context.projects],
272 default=default_project_name,
273 ).unsafe_ask() # Use Click's Ctrl-C handling instead of Questionary's
274
275
276 @project_group.command("use")
277 @click.option(
278 "--name",
279 "project_name",
280 cls=ProjectChoicesQuestionaryOption,
281 help=(
282 "The name of a Meltano Cloud project - "
283 "see `meltano cloud project list` for the available options."
284 ),
285 prompt=True,
286 )
287 @click.option(
288 "--id",
289 "project_id",
290 help=(
291 "The internal ID of a Meltano Cloud project - this ID is unstable and "
292 "should only be used if necessary to disambiguate when multiple "
293 "projects share a name."
294 ),
295 default=None,
296 type=ULIDType(),
297 )
298 @pass_context
299 @run_async
300 async def use_project(
301 context: MeltanoCloudCLIContext,
302 project_name: str | None,
303 project_id: str | None,
304 ) -> None:
305 """Set a project as the default to use for Meltano Cloud CLI commands."""
306 if project_id is not None and project_name is not None:
307 raise click.UsageError("The '--name' and '--id' options are mutually exclusive")
308 if project_id is not None:
309 context.config.internal_project_id = project_id
310 click.secho(
311 (
312 f"Set the project with ID {project_id!r} as the default "
313 "Meltano Cloud project for future commands"
314 ),
315 fg="green",
316 )
317 return
318
319 if context.projects is None: # Interactive config was not used
320 context.projects = (await _get_projects(context.config)).items
321 _check_for_project_name_conflict(context.projects, t.cast(str, project_name))
322 if project_name not in {x["project_name"] for x in context.projects}:
323 raise click.ClickException(
324 f"Unable to use project named {project_name!r} - no available "
325 "project matches name.",
326 )
327 context.config.internal_project_id = next(
328 x for x in context.projects if x["project_name"] == project_name
329 )["project_id"]
330 click.secho(
331 (
332 f"Set {project_name!r} as the default Meltano Cloud project for "
333 "future commands"
334 ),
335 fg="green",
336 )
337
[end of src/meltano/cloud/cli/project.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/meltano/cloud/cli/project.py b/src/meltano/cloud/cli/project.py
--- a/src/meltano/cloud/cli/project.py
+++ b/src/meltano/cloud/cli/project.py
@@ -6,12 +6,16 @@
import logging
import sys
import typing as t
+from http import HTTPStatus
import click
import questionary
+import requests
+from slugify import slugify
from ulid import ULID
+from yaspin import yaspin # type: ignore
-from meltano.cloud.api.client import MeltanoCloudClient
+from meltano.cloud.api.client import MeltanoCloudClient, MeltanoCloudError
from meltano.cloud.cli.base import (
LimitedResult,
get_paginated,
@@ -93,6 +97,26 @@
),
)
+ async def create_project(
+ self,
+ project_name: str,
+ git_repository: str,
+ project_root_path: str | None = None,
+ ):
+ """Use POST to create new Meltano Cloud project."""
+ async with self.authenticated():
+ payload = {"project_name": project_name, "git_repository": git_repository}
+ if project_root_path:
+ payload["project_root_path"] = project_root_path
+ prepared_request = await self._json_request(
+ "POST",
+ f"/projects/v1/{self.config.tenant_resource_key}",
+ json=payload,
+ )
+ response = requests.request(**t.cast(t.Dict[str, t.Any], prepared_request))
+ response.raise_for_status()
+ return response
+
@click.group("project")
def project_group() -> None:
@@ -334,3 +358,41 @@
),
fg="green",
)
+
+
+@project_group.command("create")
[email protected]("--name", type=str, prompt=True)
[email protected]("--repo-url", type=str, prompt=True)
[email protected]("--root-path", type=str, required=False)
+@pass_context
+@run_async
+async def create_project(
+ context: MeltanoCloudCLIContext,
+ name: str,
+ repo_url: str,
+ root_path: str | None = None,
+):
+ """Create a project to your Meltano Cloud."""
+ async with ProjectsCloudClient(config=context.config) as client:
+ try:
+ with yaspin(
+ text="Creating project - this may take several minutes...",
+ ):
+ response = await client.create_project(
+ project_name=name,
+ git_repository=repo_url,
+ project_root_path=root_path,
+ )
+ except MeltanoCloudError as e:
+ if e.response.status == HTTPStatus.CONFLICT:
+ click.secho(
+ (
+ f"A project named {name!r} (normalized to "
+ f"{slugify(name)!r}) already exists."
+ ),
+ fg="yellow",
+ )
+ return None
+ click.echo(f"Project {name!r} created successfully.")
+ if response.status_code == HTTPStatus.NO_CONTENT:
+ return None
| {"golden_diff": "diff --git a/src/meltano/cloud/cli/project.py b/src/meltano/cloud/cli/project.py\n--- a/src/meltano/cloud/cli/project.py\n+++ b/src/meltano/cloud/cli/project.py\n@@ -6,12 +6,16 @@\n import logging\n import sys\n import typing as t\n+from http import HTTPStatus\n \n import click\n import questionary\n+import requests\n+from slugify import slugify\n from ulid import ULID\n+from yaspin import yaspin # type: ignore\n \n-from meltano.cloud.api.client import MeltanoCloudClient\n+from meltano.cloud.api.client import MeltanoCloudClient, MeltanoCloudError\n from meltano.cloud.cli.base import (\n LimitedResult,\n get_paginated,\n@@ -93,6 +97,26 @@\n ),\n )\n \n+ async def create_project(\n+ self,\n+ project_name: str,\n+ git_repository: str,\n+ project_root_path: str | None = None,\n+ ):\n+ \"\"\"Use POST to create new Meltano Cloud project.\"\"\"\n+ async with self.authenticated():\n+ payload = {\"project_name\": project_name, \"git_repository\": git_repository}\n+ if project_root_path:\n+ payload[\"project_root_path\"] = project_root_path\n+ prepared_request = await self._json_request(\n+ \"POST\",\n+ f\"/projects/v1/{self.config.tenant_resource_key}\",\n+ json=payload,\n+ )\n+ response = requests.request(**t.cast(t.Dict[str, t.Any], prepared_request))\n+ response.raise_for_status()\n+ return response\n+\n \n @click.group(\"project\")\n def project_group() -> None:\n@@ -334,3 +358,41 @@\n ),\n fg=\"green\",\n )\n+\n+\n+@project_group.command(\"create\")\[email protected](\"--name\", type=str, prompt=True)\[email protected](\"--repo-url\", type=str, prompt=True)\[email protected](\"--root-path\", type=str, required=False)\n+@pass_context\n+@run_async\n+async def create_project(\n+ context: MeltanoCloudCLIContext,\n+ name: str,\n+ repo_url: str,\n+ root_path: str | None = None,\n+):\n+ \"\"\"Create a project to your Meltano Cloud.\"\"\"\n+ async with ProjectsCloudClient(config=context.config) as client:\n+ try:\n+ with yaspin(\n+ text=\"Creating project - this may take several minutes...\",\n+ ):\n+ response = await client.create_project(\n+ project_name=name,\n+ git_repository=repo_url,\n+ project_root_path=root_path,\n+ )\n+ except MeltanoCloudError as e:\n+ if e.response.status == HTTPStatus.CONFLICT:\n+ click.secho(\n+ (\n+ f\"A project named {name!r} (normalized to \"\n+ f\"{slugify(name)!r}) already exists.\"\n+ ),\n+ fg=\"yellow\",\n+ )\n+ return None\n+ click.echo(f\"Project {name!r} created successfully.\")\n+ if response.status_code == HTTPStatus.NO_CONTENT:\n+ return None\n", "issue": "Cloud CLI to register a new project in Meltano Cloud\ncc @tayloramurphy\r\n\r\nBlocked by:\r\n- https://github.com/meltano/infra/issues/514\r\n\r\nRelated to:\r\n\r\n- https://github.com/meltano/infra/issues/513 (internal)\r\n- https://github.com/meltano/meltano/issues/7411\r\n\r\n\n", "before_files": [{"content": "\"\"\"Meltano Cloud `project` command.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport logging\nimport sys\nimport typing as t\n\nimport click\nimport questionary\nfrom ulid import ULID\n\nfrom meltano.cloud.api.client import MeltanoCloudClient\nfrom meltano.cloud.cli.base import (\n LimitedResult,\n get_paginated,\n pass_context,\n print_formatted_list,\n)\nfrom meltano.core.utils import run_async\n\nif t.TYPE_CHECKING:\n from meltano.cloud.api.config import MeltanoCloudConfig\n from meltano.cloud.api.types import CloudProject\n from meltano.cloud.cli.base import MeltanoCloudCLIContext\n\nDEFAULT_GET_PROJECTS_LIMIT = 125\nMAX_PAGE_SIZE = 250\n\nlogger = logging.getLogger()\n\n\nclass ULIDType(click.ParamType):\n \"\"\"A ULID input type.\n\n Examples:\n 01BX5ZZKBKACTAV9WEVGEMMVRY\n 01BX5ZZKBKACTAV9WEVGEMMVS1\n \"\"\"\n\n name = \"ulid\"\n\n def convert(\n self,\n value: str | ULID,\n param: click.Parameter | None, # noqa: ARG002\n ctx: click.Context | None, # noqa: ARG002\n ) -> str:\n \"\"\"Try converting value to a ULID object.\"\"\"\n if isinstance(value, ULID):\n return str(value)\n\n if isinstance(value, str):\n try:\n ULID.from_str(value)\n except ValueError:\n self.fail(f\"Invalid ULID value: {value}\")\n\n return value\n\n\nclass ProjectsCloudClient(MeltanoCloudClient):\n \"\"\"A Meltano Cloud client with extensions for projects.\"\"\"\n\n async def get_projects(\n self,\n *,\n project_id: str | None = None,\n project_name: str | None = None,\n page_size: int | None = None,\n page_token: str | None = None,\n ):\n \"\"\"Use GET to get Meltano Cloud project projects.\n\n Args:\n project_id: The Meltano Cloud ID for the project.\n project_name: The name of the project.\n page_size: The number of items to request per page.\n page_token: The page token.\n \"\"\"\n async with self.authenticated():\n return await self._json_request(\n \"GET\",\n f\"/projects/v1/{self.config.tenant_resource_key}\",\n params=self.clean_params(\n {\n \"project_id\": project_id,\n \"project_name\": project_name,\n \"page_size\": page_size,\n \"page_token\": page_token,\n },\n ),\n )\n\n\[email protected](\"project\")\ndef project_group() -> None:\n \"\"\"Interact with Meltano Cloud projects.\"\"\"\n\n\ndef _safe_get_internal_project_id(config: MeltanoCloudConfig) -> str | None:\n \"\"\"Get the internal project ID, or `None` if it could not be obtained.\"\"\"\n try:\n return config.internal_project_id\n except Exception:\n logger.debug(\n \"Could not get internal project ID from config; using `None` instead.\",\n )\n return None\n\n\nasync def _get_projects(\n config: MeltanoCloudConfig,\n *,\n project_id: str | None = None,\n project_name: str | None = None,\n limit: int = DEFAULT_GET_PROJECTS_LIMIT,\n) -> LimitedResult[CloudProject]:\n async with ProjectsCloudClient(config=config) as client:\n results = await get_paginated(\n lambda page_size, page_token: client.get_projects(\n project_id=project_id,\n project_name=project_name,\n page_size=page_size,\n page_token=page_token,\n ),\n limit,\n MAX_PAGE_SIZE,\n )\n\n results.items = [\n {\n **x,\n \"default\": x[\"project_id\"] == _safe_get_internal_project_id(config),\n }\n for x in results.items\n ]\n return results\n\n\ndef _format_project(project: dict[str, t.Any]) -> tuple[str, ...]:\n return (\n \"X\" if project[\"default\"] else \"\",\n project[\"project_name\"],\n project[\"git_repository\"],\n )\n\n\nprivate_project_attributes = {\"tenant_resource_key\", \"project_id\"}\n\n\ndef _remove_private_project_attributes(project: CloudProject) -> dict[str, t.Any]:\n return {k: v for k, v in project.items() if k not in private_project_attributes}\n\n\n@project_group.command(\"list\")\[email protected](\n \"--limit\",\n required=False,\n type=int,\n default=DEFAULT_GET_PROJECTS_LIMIT,\n help=\"The maximum number of projects to display.\",\n)\[email protected](\n \"--format\",\n \"output_format\",\n required=False,\n default=\"terminal\",\n type=click.Choice((\"terminal\", \"markdown\", \"json\")),\n help=\"The output format to use.\",\n)\n@pass_context\n@run_async\nasync def list_projects(\n context: MeltanoCloudCLIContext,\n output_format: str,\n limit: int,\n) -> None:\n \"\"\"List Meltano Cloud projects.\"\"\"\n results = await _get_projects(config=context.config, limit=limit)\n stripped_results = LimitedResult(\n items=[_remove_private_project_attributes(x) for x in results.items],\n truncated=results.truncated,\n )\n print_formatted_list(\n stripped_results,\n output_format,\n _format_project,\n (\"Default\", \"Name\", \"Git Repository\"),\n (\"center\", \"left\", \"left\"),\n )\n\n\ndef _print_projects(projects: list[CloudProject]) -> None:\n for project in projects:\n click.echo(\n f\"{project['project_id']}: {project['project_name']} \"\n f\"({project['git_repository']!r})\",\n )\n\n\ndef _check_for_duplicate_project_names(projects: list[CloudProject]) -> None:\n project_names = [x[\"project_name\"] for x in projects]\n if len(set(project_names)) != len(project_names):\n click.secho(\n \"Error: Multiple Meltano Cloud projects have the same name. If you are \"\n \"trying to use a project with an unambiguous name, please select it with \"\n \"the `--name` option. Otherwise, please specify the project using the \"\n \"`--id` option with its internal ID, shown below. Note that these IDs may \"\n \"change at any time. To avoid this issue, please use unique project names.\",\n fg=\"red\",\n )\n _print_projects(projects)\n sys.exit(1)\n\n\ndef _check_for_project_name_conflict(\n projects: list[CloudProject],\n project_name: str,\n) -> None:\n if [x[\"project_name\"] for x in projects].count(project_name) > 1:\n click.secho(\n \"Error: Multiple Meltano Cloud projects have the specified name. \"\n \"Please specify the project using the `--id` option with its \"\n \"internal ID, shown below. Note that these IDs may change at any \"\n \"time. To avoid this issue, please use unique project names.\",\n fg=\"red\",\n )\n _print_projects(projects)\n sys.exit(1)\n\n\nclass ProjectChoicesQuestionaryOption(click.Option):\n \"\"\"Click option that provides an interactive prompt for Cloud Project names.\"\"\"\n\n def prompt_for_value(self, ctx: click.Context) -> t.Any:\n \"\"\"Prompt the user to interactively select a Meltano Cloud project by name.\n\n Args:\n ctx: The Click context.\n\n Returns:\n The name of the selected project, or `None` if the project was\n selected using the `--id` option.\n \"\"\"\n if \"project_id\" in ctx.params:\n # The project has been specified by ID - don't prompt for a name\n return None\n\n context: MeltanoCloudCLIContext = ctx.obj\n context.projects = asyncio.run(_get_projects(context.config)).items\n _check_for_duplicate_project_names(context.projects)\n default_project_name = next(\n (\n x\n for x in context.projects\n if x[\"project_id\"]\n == context.config.internal_organization_default[\"default_project_id\"]\n ),\n {\"project_name\": None},\n )[\"project_name\"]\n if not context.projects:\n raise click.ClickException(\n \"No Meltano Cloud projects available to use. Please create a \"\n \"project before running 'meltano cloud project use'.\",\n )\n return questionary.select(\n message=\"\",\n qmark=\"Use Meltano Cloud project\",\n choices=[x[\"project_name\"] for x in context.projects],\n default=default_project_name,\n ).unsafe_ask() # Use Click's Ctrl-C handling instead of Questionary's\n\n\n@project_group.command(\"use\")\[email protected](\n \"--name\",\n \"project_name\",\n cls=ProjectChoicesQuestionaryOption,\n help=(\n \"The name of a Meltano Cloud project - \"\n \"see `meltano cloud project list` for the available options.\"\n ),\n prompt=True,\n)\[email protected](\n \"--id\",\n \"project_id\",\n help=(\n \"The internal ID of a Meltano Cloud project - this ID is unstable and \"\n \"should only be used if necessary to disambiguate when multiple \"\n \"projects share a name.\"\n ),\n default=None,\n type=ULIDType(),\n)\n@pass_context\n@run_async\nasync def use_project(\n context: MeltanoCloudCLIContext,\n project_name: str | None,\n project_id: str | None,\n) -> None:\n \"\"\"Set a project as the default to use for Meltano Cloud CLI commands.\"\"\"\n if project_id is not None and project_name is not None:\n raise click.UsageError(\"The '--name' and '--id' options are mutually exclusive\")\n if project_id is not None:\n context.config.internal_project_id = project_id\n click.secho(\n (\n f\"Set the project with ID {project_id!r} as the default \"\n \"Meltano Cloud project for future commands\"\n ),\n fg=\"green\",\n )\n return\n\n if context.projects is None: # Interactive config was not used\n context.projects = (await _get_projects(context.config)).items\n _check_for_project_name_conflict(context.projects, t.cast(str, project_name))\n if project_name not in {x[\"project_name\"] for x in context.projects}:\n raise click.ClickException(\n f\"Unable to use project named {project_name!r} - no available \"\n \"project matches name.\",\n )\n context.config.internal_project_id = next(\n x for x in context.projects if x[\"project_name\"] == project_name\n )[\"project_id\"]\n click.secho(\n (\n f\"Set {project_name!r} as the default Meltano Cloud project for \"\n \"future commands\"\n ),\n fg=\"green\",\n )\n", "path": "src/meltano/cloud/cli/project.py"}]} | 3,889 | 694 |
gh_patches_debug_30975 | rasdani/github-patches | git_diff | liqd__a4-product-608 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mandatory mB topic selection on bet.in ( US #1775)
All projects need a topic on bet.in now, even existing ones. Can we remove that requirement? We haven't yet thought about how to implement topics on bet.in and there are not shown anywhere, so it would probably be confusing for initiators.
</issue>
<code>
[start of liqd_product/apps/projects/dashboard.py]
1 from django.urls import reverse
2 from django.utils.translation import ugettext_lazy as _
3
4 from adhocracy4.dashboard import DashboardComponent
5 from adhocracy4.dashboard import ProjectFormComponent
6 from adhocracy4.dashboard import components
7
8 from . import forms
9 from . import views
10
11
12 class ParticipantsComponent(DashboardComponent):
13 identifier = 'participants'
14 weight = 30
15 label = _('Participants')
16
17 def is_effective(self, project):
18 return not project.is_draft and project.is_private
19
20 def get_base_url(self, project):
21 return reverse('a4dashboard:dashboard-participants-edit', kwargs={
22 'project_slug': project.slug
23 })
24
25 def get_urls(self):
26 return [(
27 r'^projects/(?P<project_slug>[-\w_]+)/participants/$',
28 views.DashboardProjectParticipantsView.as_view(component=self),
29 'dashboard-participants-edit'
30 )]
31
32
33 class ModeratorsComponent(DashboardComponent):
34 identifier = 'moderators'
35 weight = 32
36 label = _('Moderators')
37
38 def is_effective(self, project):
39 return True
40
41 def get_base_url(self, project):
42 return reverse('a4dashboard:dashboard-moderators-edit', kwargs={
43 'project_slug': project.slug
44 })
45
46 def get_urls(self):
47 return [(
48 r'^projects/(?P<project_slug>[-\w_]+)/moderators/$',
49 views.DashboardProjectModeratorsView.as_view(component=self),
50 'dashboard-moderators-edit'
51 )]
52
53
54 class TopicComponent(ProjectFormComponent):
55 identifier = 'topics'
56 weight = 33
57 label = _('Topics')
58
59 form_title = _('Edit topics')
60 form_class = forms.TopicForm
61 form_template_name = 'liqd_product_projects/project_topics.html'
62
63
64 components.register_project(ModeratorsComponent())
65 components.register_project(ParticipantsComponent())
66 components.register_project(TopicComponent())
67
[end of liqd_product/apps/projects/dashboard.py]
[start of liqd_product/apps/projects/forms.py]
1 from django import forms
2 from django.contrib.auth import get_user_model
3 from django.core.exceptions import ValidationError
4 from django.utils.translation import ugettext_lazy as _
5
6 from adhocracy4.dashboard.forms import ProjectDashboardForm
7 from adhocracy4.projects.models import Project
8 from liqd_product.apps.users import fields as user_fields
9
10 from .models import ModeratorInvite
11 from .models import ParticipantInvite
12
13 User = get_user_model()
14
15
16 class InviteForm(forms.ModelForm):
17 accept = forms.CharField(required=False)
18 reject = forms.CharField(required=False)
19
20 def clean(self):
21 data = self.data
22 if 'accept' not in data and 'reject' not in data:
23 raise ValidationError('Reject or accept')
24 return data
25
26 def is_accepted(self):
27 data = self.data
28 return 'accept' in data and 'reject' not in data
29
30
31 class ParticipantInviteForm(InviteForm):
32
33 class Meta:
34 model = ParticipantInvite
35 fields = ['accept', 'reject']
36
37
38 class ModeratorInviteForm(InviteForm):
39
40 class Meta:
41 model = ModeratorInvite
42 fields = ['accept', 'reject']
43
44
45 class InviteUsersFromEmailForm(forms.Form):
46 add_users = user_fields.CommaSeparatedEmailField(
47 required=False,
48 label=_('Invite users via email')
49 )
50
51 add_users_upload = user_fields.EmailFileField(
52 required=False,
53 label=_('Invite users via file upload'),
54 help_text=_('Upload a csv file containing email addresses.')
55 )
56
57 def __init__(self, *args, **kwargs):
58 labels = kwargs.pop('labels', None)
59 super().__init__(*args, **kwargs)
60
61 if labels:
62 self.fields['add_users'].label = labels[0]
63 self.fields['add_users_upload'].label = labels[1]
64
65 def clean(self):
66 cleaned_data = super().clean()
67 add_users = self.data.get('add_users')
68 add_users_upload = self.files.get('add_users_upload')
69 if not self.errors and not add_users and not add_users_upload:
70 raise ValidationError(
71 _('Please enter email addresses or upload a file'))
72 return cleaned_data
73
74
75 class TopicForm(ProjectDashboardForm):
76
77 class Meta:
78 model = Project
79 fields = ['topics']
80 required_for_project_publish = ['topics']
81
[end of liqd_product/apps/projects/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liqd_product/apps/projects/dashboard.py b/liqd_product/apps/projects/dashboard.py
--- a/liqd_product/apps/projects/dashboard.py
+++ b/liqd_product/apps/projects/dashboard.py
@@ -2,10 +2,8 @@
from django.utils.translation import ugettext_lazy as _
from adhocracy4.dashboard import DashboardComponent
-from adhocracy4.dashboard import ProjectFormComponent
from adhocracy4.dashboard import components
-from . import forms
from . import views
@@ -51,16 +49,5 @@
)]
-class TopicComponent(ProjectFormComponent):
- identifier = 'topics'
- weight = 33
- label = _('Topics')
-
- form_title = _('Edit topics')
- form_class = forms.TopicForm
- form_template_name = 'liqd_product_projects/project_topics.html'
-
-
components.register_project(ModeratorsComponent())
components.register_project(ParticipantsComponent())
-components.register_project(TopicComponent())
diff --git a/liqd_product/apps/projects/forms.py b/liqd_product/apps/projects/forms.py
--- a/liqd_product/apps/projects/forms.py
+++ b/liqd_product/apps/projects/forms.py
@@ -3,8 +3,6 @@
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
-from adhocracy4.dashboard.forms import ProjectDashboardForm
-from adhocracy4.projects.models import Project
from liqd_product.apps.users import fields as user_fields
from .models import ModeratorInvite
@@ -70,11 +68,3 @@
raise ValidationError(
_('Please enter email addresses or upload a file'))
return cleaned_data
-
-
-class TopicForm(ProjectDashboardForm):
-
- class Meta:
- model = Project
- fields = ['topics']
- required_for_project_publish = ['topics']
| {"golden_diff": "diff --git a/liqd_product/apps/projects/dashboard.py b/liqd_product/apps/projects/dashboard.py\n--- a/liqd_product/apps/projects/dashboard.py\n+++ b/liqd_product/apps/projects/dashboard.py\n@@ -2,10 +2,8 @@\n from django.utils.translation import ugettext_lazy as _\n \n from adhocracy4.dashboard import DashboardComponent\n-from adhocracy4.dashboard import ProjectFormComponent\n from adhocracy4.dashboard import components\n \n-from . import forms\n from . import views\n \n \n@@ -51,16 +49,5 @@\n )]\n \n \n-class TopicComponent(ProjectFormComponent):\n- identifier = 'topics'\n- weight = 33\n- label = _('Topics')\n-\n- form_title = _('Edit topics')\n- form_class = forms.TopicForm\n- form_template_name = 'liqd_product_projects/project_topics.html'\n-\n-\n components.register_project(ModeratorsComponent())\n components.register_project(ParticipantsComponent())\n-components.register_project(TopicComponent())\ndiff --git a/liqd_product/apps/projects/forms.py b/liqd_product/apps/projects/forms.py\n--- a/liqd_product/apps/projects/forms.py\n+++ b/liqd_product/apps/projects/forms.py\n@@ -3,8 +3,6 @@\n from django.core.exceptions import ValidationError\n from django.utils.translation import ugettext_lazy as _\n \n-from adhocracy4.dashboard.forms import ProjectDashboardForm\n-from adhocracy4.projects.models import Project\n from liqd_product.apps.users import fields as user_fields\n \n from .models import ModeratorInvite\n@@ -70,11 +68,3 @@\n raise ValidationError(\n _('Please enter email addresses or upload a file'))\n return cleaned_data\n-\n-\n-class TopicForm(ProjectDashboardForm):\n-\n- class Meta:\n- model = Project\n- fields = ['topics']\n- required_for_project_publish = ['topics']\n", "issue": "Mandatory mB topic selection on bet.in ( US #1775)\nAll projects need a topic on bet.in now, even existing ones. Can we remove that requirement? We haven't yet thought about how to implement topics on bet.in and there are not shown anywhere, so it would probably be confusing for initiators.\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import ProjectFormComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import forms\nfrom . import views\n\n\nclass ParticipantsComponent(DashboardComponent):\n identifier = 'participants'\n weight = 30\n label = _('Participants')\n\n def is_effective(self, project):\n return not project.is_draft and project.is_private\n\n def get_base_url(self, project):\n return reverse('a4dashboard:dashboard-participants-edit', kwargs={\n 'project_slug': project.slug\n })\n\n def get_urls(self):\n return [(\n r'^projects/(?P<project_slug>[-\\w_]+)/participants/$',\n views.DashboardProjectParticipantsView.as_view(component=self),\n 'dashboard-participants-edit'\n )]\n\n\nclass ModeratorsComponent(DashboardComponent):\n identifier = 'moderators'\n weight = 32\n label = _('Moderators')\n\n def is_effective(self, project):\n return True\n\n def get_base_url(self, project):\n return reverse('a4dashboard:dashboard-moderators-edit', kwargs={\n 'project_slug': project.slug\n })\n\n def get_urls(self):\n return [(\n r'^projects/(?P<project_slug>[-\\w_]+)/moderators/$',\n views.DashboardProjectModeratorsView.as_view(component=self),\n 'dashboard-moderators-edit'\n )]\n\n\nclass TopicComponent(ProjectFormComponent):\n identifier = 'topics'\n weight = 33\n label = _('Topics')\n\n form_title = _('Edit topics')\n form_class = forms.TopicForm\n form_template_name = 'liqd_product_projects/project_topics.html'\n\n\ncomponents.register_project(ModeratorsComponent())\ncomponents.register_project(ParticipantsComponent())\ncomponents.register_project(TopicComponent())\n", "path": "liqd_product/apps/projects/dashboard.py"}, {"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom adhocracy4.projects.models import Project\nfrom liqd_product.apps.users import fields as user_fields\n\nfrom .models import ModeratorInvite\nfrom .models import ParticipantInvite\n\nUser = get_user_model()\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n\n\nclass ParticipantInviteForm(InviteForm):\n\n class Meta:\n model = ParticipantInvite\n fields = ['accept', 'reject']\n\n\nclass ModeratorInviteForm(InviteForm):\n\n class Meta:\n model = ModeratorInvite\n fields = ['accept', 'reject']\n\n\nclass InviteUsersFromEmailForm(forms.Form):\n add_users = user_fields.CommaSeparatedEmailField(\n required=False,\n label=_('Invite users via email')\n )\n\n add_users_upload = user_fields.EmailFileField(\n required=False,\n label=_('Invite users via file upload'),\n help_text=_('Upload a csv file containing email addresses.')\n )\n\n def __init__(self, *args, **kwargs):\n labels = kwargs.pop('labels', None)\n super().__init__(*args, **kwargs)\n\n if labels:\n self.fields['add_users'].label = labels[0]\n self.fields['add_users_upload'].label = labels[1]\n\n def clean(self):\n cleaned_data = super().clean()\n add_users = self.data.get('add_users')\n add_users_upload = self.files.get('add_users_upload')\n if not self.errors and not add_users and not add_users_upload:\n raise ValidationError(\n _('Please enter email addresses or upload a file'))\n return cleaned_data\n\n\nclass TopicForm(ProjectDashboardForm):\n\n class Meta:\n model = Project\n fields = ['topics']\n required_for_project_publish = ['topics']\n", "path": "liqd_product/apps/projects/forms.py"}]} | 1,806 | 387 |
gh_patches_debug_13265 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-569 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove `integtest.sh` from all plugin repos
The [integtest.sh](https://github.com/opensearch-project/opensearch-build/blob/main/bundle-workflow/scripts/default/integtest.sh) tool contains the logic to run integration tests for a plugin. This logic is mostly common across most plugins, so it has been moved to `opensearch-build` repo. Thus it can be removed from the individual plugin repos.
However, if a plugin requires some custom logic to run integtests, which the standard tool doesn't provide, they can continue maintaining this integtest.sh in their own repo. In this case, when the integration tests are run, if a plugin has a integtest.sh tool in their repo, it gets precedence over the standard default integtest.sh in the `opensearch-build` repo. This precedence order logic is defined in ScriptFinder [here](https://github.com/opensearch-project/opensearch-build/blob/84f2fa1cf15abe314aee62dbd2cb39bf2c9bb65f/bundle-workflow/src/paths/script_finder.py#L65)
Action items:
Raise PRs on all plugin repos and remove integtest.sh
- [ ] index-management
- [ ] anomaly-detection,
- [ ] alerting
- [ ] asynchronous-search
- [ ] k-NN
Changes will need to be backported into 1.x branches if such exist, too.
</issue>
<code>
[start of bundle-workflow/src/paths/script_finder.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8
9
10 class ScriptFinder:
11 class ScriptNotFoundError(Exception):
12 def __init__(self, kind, paths):
13 self.kind = kind
14 self.paths = paths
15 super().__init__(f"Could not find {kind} script. Looked in {paths}.")
16
17 component_scripts_path = os.path.realpath(
18 os.path.join(
19 os.path.dirname(os.path.abspath(__file__)), "../../scripts/components"
20 )
21 )
22
23 default_scripts_path = os.path.realpath(
24 os.path.join(
25 os.path.dirname(os.path.abspath(__file__)), "../../scripts/default"
26 )
27 )
28
29 """
30 ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.
31
32 For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,
33 it will look in the following locations, in order:
34 * Root of the Git repository
35 * /scripts/<script-name> in the Git repository
36 * <component_scripts_path>/<component_name>/<script-name>
37 * <default_scripts_path>/<script-name>
38
39 For install.sh scripts, given a component name, it will look in the following locations, in order:
40 * <component_scripts_path>/<component_name>/<script-name>
41 * <default_scripts_path>/<script-name>
42 """
43
44 @classmethod
45 def __find_script(cls, name, paths):
46 script = next(filter(lambda path: os.path.exists(path), paths), None)
47 if script is None:
48 raise ScriptFinder.ScriptNotFoundError(name, paths)
49 return script
50
51 @classmethod
52 def find_build_script(cls, component_name, git_dir):
53 paths = [
54 os.path.realpath(os.path.join(git_dir, "build.sh")),
55 os.path.realpath(os.path.join(git_dir, "scripts/build.sh")),
56 os.path.realpath(
57 os.path.join(cls.component_scripts_path, component_name, "build.sh")
58 ),
59 os.path.realpath(os.path.join(cls.default_scripts_path, "build.sh")),
60 ]
61
62 return cls.__find_script("build.sh", paths)
63
64 @classmethod
65 def find_integ_test_script(cls, component_name, git_dir):
66 paths = [
67 # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497
68 # os.path.realpath(os.path.join(git_dir, "integtest.sh")),
69 # os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
70 os.path.realpath(
71 os.path.join(cls.component_scripts_path, component_name, "integtest.sh")
72 ),
73 os.path.realpath(os.path.join(cls.default_scripts_path, "integtest.sh")),
74 ]
75
76 return cls.__find_script("integtest.sh", paths)
77
78 @classmethod
79 def find_install_script(cls, component_name):
80 paths = [
81 os.path.realpath(
82 os.path.join(cls.component_scripts_path, component_name, "install.sh")
83 ),
84 os.path.realpath(os.path.join(cls.default_scripts_path, "install.sh")),
85 ]
86
87 return cls.__find_script("install.sh", paths)
88
89 @classmethod
90 def find_bwc_test_script(cls, component_name, git_dir):
91 paths = [
92 os.path.realpath(os.path.join(git_dir, "bwctest.sh")),
93 os.path.realpath(os.path.join(git_dir, "scripts/bwctest.sh")),
94 os.path.realpath(
95 os.path.join(cls.component_scripts_path, component_name, "bwctest.sh")
96 ),
97 os.path.realpath(os.path.join(cls.default_scripts_path, "bwctest.sh")),
98 ]
99
100 return cls.__find_script("bwctest.sh", paths)
101
[end of bundle-workflow/src/paths/script_finder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py
--- a/bundle-workflow/src/paths/script_finder.py
+++ b/bundle-workflow/src/paths/script_finder.py
@@ -64,9 +64,8 @@
@classmethod
def find_integ_test_script(cls, component_name, git_dir):
paths = [
- # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497
- # os.path.realpath(os.path.join(git_dir, "integtest.sh")),
- # os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
+ os.path.realpath(os.path.join(git_dir, "integtest.sh")),
+ os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
os.path.realpath(
os.path.join(cls.component_scripts_path, component_name, "integtest.sh")
),
| {"golden_diff": "diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py\n--- a/bundle-workflow/src/paths/script_finder.py\n+++ b/bundle-workflow/src/paths/script_finder.py\n@@ -64,9 +64,8 @@\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n- # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497\n- # os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n- # os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n+ os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n+ os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n", "issue": "Remove `integtest.sh` from all plugin repos\nThe [integtest.sh](https://github.com/opensearch-project/opensearch-build/blob/main/bundle-workflow/scripts/default/integtest.sh) tool contains the logic to run integration tests for a plugin. This logic is mostly common across most plugins, so it has been moved to `opensearch-build` repo. Thus it can be removed from the individual plugin repos.\r\nHowever, if a plugin requires some custom logic to run integtests, which the standard tool doesn't provide, they can continue maintaining this integtest.sh in their own repo. In this case, when the integration tests are run, if a plugin has a integtest.sh tool in their repo, it gets precedence over the standard default integtest.sh in the `opensearch-build` repo. This precedence order logic is defined in ScriptFinder [here](https://github.com/opensearch-project/opensearch-build/blob/84f2fa1cf15abe314aee62dbd2cb39bf2c9bb65f/bundle-workflow/src/paths/script_finder.py#L65) \r\n\r\nAction items:\r\n\r\nRaise PRs on all plugin repos and remove integtest.sh \r\n- [ ] index-management\r\n- [ ] anomaly-detection,\r\n- [ ] alerting\r\n- [ ] asynchronous-search\r\n- [ ] k-NN\r\n\r\nChanges will need to be backported into 1.x branches if such exist, too.\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\n\nclass ScriptFinder:\n class ScriptNotFoundError(Exception):\n def __init__(self, kind, paths):\n self.kind = kind\n self.paths = paths\n super().__init__(f\"Could not find {kind} script. Looked in {paths}.\")\n\n component_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/components\"\n )\n )\n\n default_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/default\"\n )\n )\n\n \"\"\"\n ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.\n\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * Root of the Git repository\n * /scripts/<script-name> in the Git repository\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n\n For install.sh scripts, given a component name, it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n \"\"\"\n\n @classmethod\n def __find_script(cls, name, paths):\n script = next(filter(lambda path: os.path.exists(path), paths), None)\n if script is None:\n raise ScriptFinder.ScriptNotFoundError(name, paths)\n return script\n\n @classmethod\n def find_build_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/build.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"build.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"build.sh\")),\n ]\n\n return cls.__find_script(\"build.sh\", paths)\n\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497\n # os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n # os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"integtest.sh\")),\n ]\n\n return cls.__find_script(\"integtest.sh\", paths)\n\n @classmethod\n def find_install_script(cls, component_name):\n paths = [\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"install.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"install.sh\")),\n ]\n\n return cls.__find_script(\"install.sh\", paths)\n\n @classmethod\n def find_bwc_test_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"bwctest.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/bwctest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"bwctest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"bwctest.sh\")),\n ]\n\n return cls.__find_script(\"bwctest.sh\", paths)\n", "path": "bundle-workflow/src/paths/script_finder.py"}]} | 1,866 | 214 |
gh_patches_debug_31101 | rasdani/github-patches | git_diff | StackStorm__st2-4592 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The api key in the st2api log is not obfuscated
##### SUMMARY
The user found in clean API key in query request (for the load balancer health check)
```GET /api/v1/?st2-api-key=foo HTTP/1.1```
##### ISSUE TYPE
- Bug Report
##### STACKSTORM VERSION
st2 2.10.3, on Python 2.7.12
</issue>
<code>
[start of st2common/st2common/middleware/logging.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from __future__ import absolute_import
17 import time
18 import types
19 import itertools
20
21 from st2common.constants.api import REQUEST_ID_HEADER
22 from st2common import log as logging
23 from st2common.router import Request, NotFoundException
24
25 LOG = logging.getLogger(__name__)
26
27 try:
28 clock = time.perf_counter
29 except AttributeError:
30 clock = time.time
31
32
33 class LoggingMiddleware(object):
34 """
35 Logs all incoming requests and outgoing responses
36 """
37
38 def __init__(self, app, router):
39 self.app = app
40 self.router = router
41
42 def __call__(self, environ, start_response):
43 start_time = clock()
44 status_code = []
45 content_length = []
46
47 request = Request(environ)
48
49 # Log the incoming request
50 values = {
51 'method': request.method,
52 'path': request.path,
53 'remote_addr': request.remote_addr,
54 'query': request.GET.dict_of_lists(),
55 'request_id': request.headers.get(REQUEST_ID_HEADER, None)
56 }
57
58 LOG.info('%(request_id)s - %(method)s %(path)s with query=%(query)s' %
59 values, extra=values)
60
61 def custom_start_response(status, headers, exc_info=None):
62 status_code.append(int(status.split(' ')[0]))
63
64 for name, value in headers:
65 if name.lower() == 'content-length':
66 content_length.append(int(value))
67 break
68
69 return start_response(status, headers, exc_info)
70
71 retval = self.app(environ, custom_start_response)
72
73 try:
74 endpoint, path_vars = self.router.match(request)
75 except NotFoundException:
76 endpoint = {}
77
78 log_result = endpoint.get('x-log-result', True)
79
80 if isinstance(retval, (types.GeneratorType, itertools.chain)):
81 # Note: We don't log the result when return value is a generator, because this would
82 # result in calling str() on the generator and as such, exhausting it
83 content_length = [float('inf')]
84 log_result = False
85
86 # Log the response
87 values = {
88 'method': request.method,
89 'path': request.path,
90 'remote_addr': request.remote_addr,
91 'status': status_code[0],
92 'runtime': float("{0:.3f}".format((clock() - start_time) * 10**3)),
93 'content_length': content_length[0] if content_length else len(b''.join(retval)),
94 'request_id': request.headers.get(REQUEST_ID_HEADER, None)
95 }
96
97 log_msg = '%(request_id)s - %(status)s %(content_length)s %(runtime)sms' % (values)
98 LOG.info(log_msg, extra=values)
99
100 if log_result:
101 values['result'] = retval[0]
102 log_msg = ('%(request_id)s - %(status)s %(content_length)s %(runtime)sms\n%(result)s' %
103 (values))
104 LOG.debug(log_msg, extra=values)
105
106 return retval
107
[end of st2common/st2common/middleware/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/st2common/st2common/middleware/logging.py b/st2common/st2common/middleware/logging.py
--- a/st2common/st2common/middleware/logging.py
+++ b/st2common/st2common/middleware/logging.py
@@ -14,16 +14,28 @@
# limitations under the License.
from __future__ import absolute_import
+
import time
import types
import itertools
+from oslo_config import cfg
+
from st2common.constants.api import REQUEST_ID_HEADER
+from st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME
+from st2common.constants.auth import QUERY_PARAM_API_KEY_ATTRIBUTE_NAME
+from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
+from st2common.constants.secrets import MASKED_ATTRIBUTES_BLACKLIST
from st2common import log as logging
from st2common.router import Request, NotFoundException
LOG = logging.getLogger(__name__)
+SECRET_QUERY_PARAMS = [
+ QUERY_PARAM_ATTRIBUTE_NAME,
+ QUERY_PARAM_API_KEY_ATTRIBUTE_NAME
+] + MASKED_ATTRIBUTES_BLACKLIST
+
try:
clock = time.perf_counter
except AttributeError:
@@ -46,12 +58,20 @@
request = Request(environ)
+ query_params = request.GET.dict_of_lists()
+
+ # Mask secret / sensitive query params
+ secret_query_params = SECRET_QUERY_PARAMS + cfg.CONF.log.mask_secrets_blacklist
+ for param_name in secret_query_params:
+ if param_name in query_params:
+ query_params[param_name] = MASKED_ATTRIBUTE_VALUE
+
# Log the incoming request
values = {
'method': request.method,
'path': request.path,
'remote_addr': request.remote_addr,
- 'query': request.GET.dict_of_lists(),
+ 'query': query_params,
'request_id': request.headers.get(REQUEST_ID_HEADER, None)
}
| {"golden_diff": "diff --git a/st2common/st2common/middleware/logging.py b/st2common/st2common/middleware/logging.py\n--- a/st2common/st2common/middleware/logging.py\n+++ b/st2common/st2common/middleware/logging.py\n@@ -14,16 +14,28 @@\n # limitations under the License.\n \n from __future__ import absolute_import\n+\n import time\n import types\n import itertools\n \n+from oslo_config import cfg\n+\n from st2common.constants.api import REQUEST_ID_HEADER\n+from st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME\n+from st2common.constants.auth import QUERY_PARAM_API_KEY_ATTRIBUTE_NAME\n+from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE\n+from st2common.constants.secrets import MASKED_ATTRIBUTES_BLACKLIST\n from st2common import log as logging\n from st2common.router import Request, NotFoundException\n \n LOG = logging.getLogger(__name__)\n \n+SECRET_QUERY_PARAMS = [\n+ QUERY_PARAM_ATTRIBUTE_NAME,\n+ QUERY_PARAM_API_KEY_ATTRIBUTE_NAME\n+] + MASKED_ATTRIBUTES_BLACKLIST\n+\n try:\n clock = time.perf_counter\n except AttributeError:\n@@ -46,12 +58,20 @@\n \n request = Request(environ)\n \n+ query_params = request.GET.dict_of_lists()\n+\n+ # Mask secret / sensitive query params\n+ secret_query_params = SECRET_QUERY_PARAMS + cfg.CONF.log.mask_secrets_blacklist\n+ for param_name in secret_query_params:\n+ if param_name in query_params:\n+ query_params[param_name] = MASKED_ATTRIBUTE_VALUE\n+\n # Log the incoming request\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n- 'query': request.GET.dict_of_lists(),\n+ 'query': query_params,\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n", "issue": "The api key in the st2api log is not obfuscated\n##### SUMMARY\r\nThe user found in clean API key in query request (for the load balancer health check)\r\n```GET /api/v1/?st2-api-key=foo HTTP/1.1```\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n \r\n##### STACKSTORM VERSION\r\nst2 2.10.3, on Python 2.7.12\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport time\nimport types\nimport itertools\n\nfrom st2common.constants.api import REQUEST_ID_HEADER\nfrom st2common import log as logging\nfrom st2common.router import Request, NotFoundException\n\nLOG = logging.getLogger(__name__)\n\ntry:\n clock = time.perf_counter\nexcept AttributeError:\n clock = time.time\n\n\nclass LoggingMiddleware(object):\n \"\"\"\n Logs all incoming requests and outgoing responses\n \"\"\"\n\n def __init__(self, app, router):\n self.app = app\n self.router = router\n\n def __call__(self, environ, start_response):\n start_time = clock()\n status_code = []\n content_length = []\n\n request = Request(environ)\n\n # Log the incoming request\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n 'query': request.GET.dict_of_lists(),\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n\n LOG.info('%(request_id)s - %(method)s %(path)s with query=%(query)s' %\n values, extra=values)\n\n def custom_start_response(status, headers, exc_info=None):\n status_code.append(int(status.split(' ')[0]))\n\n for name, value in headers:\n if name.lower() == 'content-length':\n content_length.append(int(value))\n break\n\n return start_response(status, headers, exc_info)\n\n retval = self.app(environ, custom_start_response)\n\n try:\n endpoint, path_vars = self.router.match(request)\n except NotFoundException:\n endpoint = {}\n\n log_result = endpoint.get('x-log-result', True)\n\n if isinstance(retval, (types.GeneratorType, itertools.chain)):\n # Note: We don't log the result when return value is a generator, because this would\n # result in calling str() on the generator and as such, exhausting it\n content_length = [float('inf')]\n log_result = False\n\n # Log the response\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n 'status': status_code[0],\n 'runtime': float(\"{0:.3f}\".format((clock() - start_time) * 10**3)),\n 'content_length': content_length[0] if content_length else len(b''.join(retval)),\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n\n log_msg = '%(request_id)s - %(status)s %(content_length)s %(runtime)sms' % (values)\n LOG.info(log_msg, extra=values)\n\n if log_result:\n values['result'] = retval[0]\n log_msg = ('%(request_id)s - %(status)s %(content_length)s %(runtime)sms\\n%(result)s' %\n (values))\n LOG.debug(log_msg, extra=values)\n\n return retval\n", "path": "st2common/st2common/middleware/logging.py"}]} | 1,657 | 406 |
gh_patches_debug_10244 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-8207 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unused `group` parameter in `HGTConv` still documented
### 📚 Describe the documentation issue
#7117 replaces `HGTConv` with the implementation done for the faster `FastHGTConv`. In the process of doing so, the `group` parameter has been removed falling back to the default `sum` operation. (@puririshi98: this was intentional, right?). The docs, however, haven't been updated accordingly.
https://github.com/pyg-team/pytorch_geometric/blob/737707c37fc2bd712a2289b683ec14549926ff49/torch_geometric/nn/conv/hgt_conv.py#L40-L43
### Suggest a potential alternative/fix
Remove the unused parameter from the docs.
</issue>
<code>
[start of torch_geometric/nn/conv/hgt_conv.py]
1 import math
2 from typing import Dict, List, Optional, Tuple, Union
3
4 import torch
5 from torch import Tensor
6 from torch.nn import Parameter
7
8 from torch_geometric.nn.conv import MessagePassing
9 from torch_geometric.nn.dense import HeteroDictLinear, HeteroLinear
10 from torch_geometric.nn.inits import ones
11 from torch_geometric.nn.parameter_dict import ParameterDict
12 from torch_geometric.typing import Adj, EdgeType, Metadata, NodeType
13 from torch_geometric.utils import softmax
14 from torch_geometric.utils.hetero import construct_bipartite_edge_index
15
16
17 class HGTConv(MessagePassing):
18 r"""The Heterogeneous Graph Transformer (HGT) operator from the
19 `"Heterogeneous Graph Transformer" <https://arxiv.org/abs/2003.01332>`_
20 paper.
21
22 .. note::
23
24 For an example of using HGT, see `examples/hetero/hgt_dblp.py
25 <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/
26 hetero/hgt_dblp.py>`_.
27
28 Args:
29 in_channels (int or Dict[str, int]): Size of each input sample of every
30 node type, or :obj:`-1` to derive the size from the first input(s)
31 to the forward method.
32 out_channels (int): Size of each output sample.
33 metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata
34 of the heterogeneous graph, *i.e.* its node and edge types given
35 by a list of strings and a list of string triplets, respectively.
36 See :meth:`torch_geometric.data.HeteroData.metadata` for more
37 information.
38 heads (int, optional): Number of multi-head-attentions.
39 (default: :obj:`1`)
40 group (str, optional): The aggregation scheme to use for grouping node
41 embeddings generated by different relations
42 (:obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`).
43 (default: :obj:`"sum"`)
44 **kwargs (optional): Additional arguments of
45 :class:`torch_geometric.nn.conv.MessagePassing`.
46 """
47 def __init__(
48 self,
49 in_channels: Union[int, Dict[str, int]],
50 out_channels: int,
51 metadata: Metadata,
52 heads: int = 1,
53 **kwargs,
54 ):
55 super().__init__(aggr='add', node_dim=0, **kwargs)
56
57 if out_channels % heads != 0:
58 raise ValueError(f"'out_channels' (got {out_channels}) must be "
59 f"divisible by the number of heads (got {heads})")
60
61 if not isinstance(in_channels, dict):
62 in_channels = {node_type: in_channels for node_type in metadata[0]}
63
64 self.in_channels = in_channels
65 self.out_channels = out_channels
66 self.heads = heads
67 self.node_types = metadata[0]
68 self.edge_types = metadata[1]
69 self.edge_types_map = {
70 edge_type: i
71 for i, edge_type in enumerate(metadata[1])
72 }
73
74 self.dst_node_types = set([key[-1] for key in self.edge_types])
75
76 self.kqv_lin = HeteroDictLinear(self.in_channels,
77 self.out_channels * 3)
78
79 self.out_lin = HeteroDictLinear(self.out_channels, self.out_channels,
80 types=self.node_types)
81
82 dim = out_channels // heads
83 num_types = heads * len(self.edge_types)
84
85 self.k_rel = HeteroLinear(dim, dim, num_types, bias=False,
86 is_sorted=True)
87 self.v_rel = HeteroLinear(dim, dim, num_types, bias=False,
88 is_sorted=True)
89
90 self.skip = ParameterDict({
91 node_type: Parameter(torch.empty(1))
92 for node_type in self.node_types
93 })
94
95 self.p_rel = ParameterDict()
96 for edge_type in self.edge_types:
97 edge_type = '__'.join(edge_type)
98 self.p_rel[edge_type] = Parameter(torch.empty(1, heads))
99
100 self.reset_parameters()
101
102 def reset_parameters(self):
103 super().reset_parameters()
104 self.kqv_lin.reset_parameters()
105 self.out_lin.reset_parameters()
106 self.k_rel.reset_parameters()
107 self.v_rel.reset_parameters()
108 ones(self.skip)
109 ones(self.p_rel)
110
111 def _cat(self, x_dict: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, int]]:
112 """Concatenates a dictionary of features."""
113 cumsum = 0
114 outs: List[Tensor] = []
115 offset: Dict[str, int] = {}
116 for key, x in x_dict.items():
117 outs.append(x)
118 offset[key] = cumsum
119 cumsum += x.size(0)
120 return torch.cat(outs, dim=0), offset
121
122 def _construct_src_node_feat(
123 self, k_dict: Dict[str, Tensor], v_dict: Dict[str, Tensor],
124 edge_index_dict: Dict[EdgeType, Adj]
125 ) -> Tuple[Tensor, Tensor, Dict[EdgeType, int]]:
126 """Constructs the source node representations."""
127 cumsum = 0
128 num_edge_types = len(self.edge_types)
129 H, D = self.heads, self.out_channels // self.heads
130
131 # Flatten into a single tensor with shape [num_edge_types * heads, D]:
132 ks: List[Tensor] = []
133 vs: List[Tensor] = []
134 type_list: List[Tensor] = []
135 offset: Dict[EdgeType] = {}
136 for edge_type in edge_index_dict.keys():
137 src = edge_type[0]
138 N = k_dict[src].size(0)
139 offset[edge_type] = cumsum
140 cumsum += N
141
142 # construct type_vec for curr edge_type with shape [H, D]
143 edge_type_offset = self.edge_types_map[edge_type]
144 type_vec = torch.arange(H, dtype=torch.long).view(-1, 1).repeat(
145 1, N) * num_edge_types + edge_type_offset
146
147 type_list.append(type_vec)
148 ks.append(k_dict[src])
149 vs.append(v_dict[src])
150
151 ks = torch.cat(ks, dim=0).transpose(0, 1).reshape(-1, D)
152 vs = torch.cat(vs, dim=0).transpose(0, 1).reshape(-1, D)
153 type_vec = torch.cat(type_list, dim=1).flatten()
154
155 k = self.k_rel(ks, type_vec).view(H, -1, D).transpose(0, 1)
156 v = self.v_rel(vs, type_vec).view(H, -1, D).transpose(0, 1)
157
158 return k, v, offset
159
160 def forward(
161 self,
162 x_dict: Dict[NodeType, Tensor],
163 edge_index_dict: Dict[EdgeType, Adj] # Support both.
164 ) -> Dict[NodeType, Optional[Tensor]]:
165 r"""Runs the forward pass of the module.
166
167 Args:
168 x_dict (Dict[str, torch.Tensor]): A dictionary holding input node
169 features for each individual node type.
170 edge_index_dict (Dict[Tuple[str, str, str], torch.Tensor]): A
171 dictionary holding graph connectivity information for each
172 individual edge type, either as a :class:`torch.Tensor` of
173 shape :obj:`[2, num_edges]` or a
174 :class:`torch_sparse.SparseTensor`.
175
176 :rtype: :obj:`Dict[str, Optional[torch.Tensor]]` - The output node
177 embeddings for each node type.
178 In case a node type does not receive any message, its output will
179 be set to :obj:`None`.
180 """
181 F = self.out_channels
182 H = self.heads
183 D = F // H
184
185 k_dict, q_dict, v_dict, out_dict = {}, {}, {}, {}
186
187 # Compute K, Q, V over node types:
188 kqv_dict = self.kqv_lin(x_dict)
189 for key, val in kqv_dict.items():
190 k, q, v = torch.tensor_split(val, 3, dim=1)
191 k_dict[key] = k.view(-1, H, D)
192 q_dict[key] = q.view(-1, H, D)
193 v_dict[key] = v.view(-1, H, D)
194
195 q, dst_offset = self._cat(q_dict)
196 k, v, src_offset = self._construct_src_node_feat(
197 k_dict, v_dict, edge_index_dict)
198
199 edge_index, edge_attr = construct_bipartite_edge_index(
200 edge_index_dict, src_offset, dst_offset, edge_attr_dict=self.p_rel)
201
202 out = self.propagate(edge_index, k=k, q=q, v=v, edge_attr=edge_attr,
203 size=None)
204
205 # Reconstruct output node embeddings dict:
206 for node_type, start_offset in dst_offset.items():
207 end_offset = start_offset + q_dict[node_type].size(0)
208 if node_type in self.dst_node_types:
209 out_dict[node_type] = out[start_offset:end_offset]
210
211 # Transform output node embeddings:
212 a_dict = self.out_lin({
213 k:
214 torch.nn.functional.gelu(v) if v is not None else v
215 for k, v in out_dict.items()
216 })
217
218 # Iterate over node types:
219 for node_type, out in out_dict.items():
220 out = a_dict[node_type]
221
222 if out.size(-1) == x_dict[node_type].size(-1):
223 alpha = self.skip[node_type].sigmoid()
224 out = alpha * out + (1 - alpha) * x_dict[node_type]
225 out_dict[node_type] = out
226
227 return out_dict
228
229 def message(self, k_j: Tensor, q_i: Tensor, v_j: Tensor, edge_attr: Tensor,
230 index: Tensor, ptr: Optional[Tensor],
231 size_i: Optional[int]) -> Tensor:
232 alpha = (q_i * k_j).sum(dim=-1) * edge_attr
233 alpha = alpha / math.sqrt(q_i.size(-1))
234 alpha = softmax(alpha, index, ptr, size_i)
235 out = v_j * alpha.view(-1, self.heads, 1)
236 return out.view(-1, self.out_channels)
237
238 def __repr__(self) -> str:
239 return (f'{self.__class__.__name__}(-1, {self.out_channels}, '
240 f'heads={self.heads})')
241
[end of torch_geometric/nn/conv/hgt_conv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torch_geometric/nn/conv/hgt_conv.py b/torch_geometric/nn/conv/hgt_conv.py
--- a/torch_geometric/nn/conv/hgt_conv.py
+++ b/torch_geometric/nn/conv/hgt_conv.py
@@ -37,10 +37,6 @@
information.
heads (int, optional): Number of multi-head-attentions.
(default: :obj:`1`)
- group (str, optional): The aggregation scheme to use for grouping node
- embeddings generated by different relations
- (:obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`).
- (default: :obj:`"sum"`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
| {"golden_diff": "diff --git a/torch_geometric/nn/conv/hgt_conv.py b/torch_geometric/nn/conv/hgt_conv.py\n--- a/torch_geometric/nn/conv/hgt_conv.py\n+++ b/torch_geometric/nn/conv/hgt_conv.py\n@@ -37,10 +37,6 @@\n information.\n heads (int, optional): Number of multi-head-attentions.\n (default: :obj:`1`)\n- group (str, optional): The aggregation scheme to use for grouping node\n- embeddings generated by different relations\n- (:obj:`\"sum\"`, :obj:`\"mean\"`, :obj:`\"min\"`, :obj:`\"max\"`).\n- (default: :obj:`\"sum\"`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n \"\"\"\n", "issue": "unused `group` parameter in `HGTConv` still documented\n### \ud83d\udcda Describe the documentation issue\n\n#7117 replaces `HGTConv` with the implementation done for the faster `FastHGTConv`. In the process of doing so, the `group` parameter has been removed falling back to the default `sum` operation. (@puririshi98: this was intentional, right?). The docs, however, haven't been updated accordingly.\r\n\r\nhttps://github.com/pyg-team/pytorch_geometric/blob/737707c37fc2bd712a2289b683ec14549926ff49/torch_geometric/nn/conv/hgt_conv.py#L40-L43\n\n### Suggest a potential alternative/fix\n\nRemove the unused parameter from the docs.\n", "before_files": [{"content": "import math\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import Parameter\n\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.nn.dense import HeteroDictLinear, HeteroLinear\nfrom torch_geometric.nn.inits import ones\nfrom torch_geometric.nn.parameter_dict import ParameterDict\nfrom torch_geometric.typing import Adj, EdgeType, Metadata, NodeType\nfrom torch_geometric.utils import softmax\nfrom torch_geometric.utils.hetero import construct_bipartite_edge_index\n\n\nclass HGTConv(MessagePassing):\n r\"\"\"The Heterogeneous Graph Transformer (HGT) operator from the\n `\"Heterogeneous Graph Transformer\" <https://arxiv.org/abs/2003.01332>`_\n paper.\n\n .. note::\n\n For an example of using HGT, see `examples/hetero/hgt_dblp.py\n <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/\n hetero/hgt_dblp.py>`_.\n\n Args:\n in_channels (int or Dict[str, int]): Size of each input sample of every\n node type, or :obj:`-1` to derive the size from the first input(s)\n to the forward method.\n out_channels (int): Size of each output sample.\n metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata\n of the heterogeneous graph, *i.e.* its node and edge types given\n by a list of strings and a list of string triplets, respectively.\n See :meth:`torch_geometric.data.HeteroData.metadata` for more\n information.\n heads (int, optional): Number of multi-head-attentions.\n (default: :obj:`1`)\n group (str, optional): The aggregation scheme to use for grouping node\n embeddings generated by different relations\n (:obj:`\"sum\"`, :obj:`\"mean\"`, :obj:`\"min\"`, :obj:`\"max\"`).\n (default: :obj:`\"sum\"`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n \"\"\"\n def __init__(\n self,\n in_channels: Union[int, Dict[str, int]],\n out_channels: int,\n metadata: Metadata,\n heads: int = 1,\n **kwargs,\n ):\n super().__init__(aggr='add', node_dim=0, **kwargs)\n\n if out_channels % heads != 0:\n raise ValueError(f\"'out_channels' (got {out_channels}) must be \"\n f\"divisible by the number of heads (got {heads})\")\n\n if not isinstance(in_channels, dict):\n in_channels = {node_type: in_channels for node_type in metadata[0]}\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.heads = heads\n self.node_types = metadata[0]\n self.edge_types = metadata[1]\n self.edge_types_map = {\n edge_type: i\n for i, edge_type in enumerate(metadata[1])\n }\n\n self.dst_node_types = set([key[-1] for key in self.edge_types])\n\n self.kqv_lin = HeteroDictLinear(self.in_channels,\n self.out_channels * 3)\n\n self.out_lin = HeteroDictLinear(self.out_channels, self.out_channels,\n types=self.node_types)\n\n dim = out_channels // heads\n num_types = heads * len(self.edge_types)\n\n self.k_rel = HeteroLinear(dim, dim, num_types, bias=False,\n is_sorted=True)\n self.v_rel = HeteroLinear(dim, dim, num_types, bias=False,\n is_sorted=True)\n\n self.skip = ParameterDict({\n node_type: Parameter(torch.empty(1))\n for node_type in self.node_types\n })\n\n self.p_rel = ParameterDict()\n for edge_type in self.edge_types:\n edge_type = '__'.join(edge_type)\n self.p_rel[edge_type] = Parameter(torch.empty(1, heads))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n super().reset_parameters()\n self.kqv_lin.reset_parameters()\n self.out_lin.reset_parameters()\n self.k_rel.reset_parameters()\n self.v_rel.reset_parameters()\n ones(self.skip)\n ones(self.p_rel)\n\n def _cat(self, x_dict: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, int]]:\n \"\"\"Concatenates a dictionary of features.\"\"\"\n cumsum = 0\n outs: List[Tensor] = []\n offset: Dict[str, int] = {}\n for key, x in x_dict.items():\n outs.append(x)\n offset[key] = cumsum\n cumsum += x.size(0)\n return torch.cat(outs, dim=0), offset\n\n def _construct_src_node_feat(\n self, k_dict: Dict[str, Tensor], v_dict: Dict[str, Tensor],\n edge_index_dict: Dict[EdgeType, Adj]\n ) -> Tuple[Tensor, Tensor, Dict[EdgeType, int]]:\n \"\"\"Constructs the source node representations.\"\"\"\n cumsum = 0\n num_edge_types = len(self.edge_types)\n H, D = self.heads, self.out_channels // self.heads\n\n # Flatten into a single tensor with shape [num_edge_types * heads, D]:\n ks: List[Tensor] = []\n vs: List[Tensor] = []\n type_list: List[Tensor] = []\n offset: Dict[EdgeType] = {}\n for edge_type in edge_index_dict.keys():\n src = edge_type[0]\n N = k_dict[src].size(0)\n offset[edge_type] = cumsum\n cumsum += N\n\n # construct type_vec for curr edge_type with shape [H, D]\n edge_type_offset = self.edge_types_map[edge_type]\n type_vec = torch.arange(H, dtype=torch.long).view(-1, 1).repeat(\n 1, N) * num_edge_types + edge_type_offset\n\n type_list.append(type_vec)\n ks.append(k_dict[src])\n vs.append(v_dict[src])\n\n ks = torch.cat(ks, dim=0).transpose(0, 1).reshape(-1, D)\n vs = torch.cat(vs, dim=0).transpose(0, 1).reshape(-1, D)\n type_vec = torch.cat(type_list, dim=1).flatten()\n\n k = self.k_rel(ks, type_vec).view(H, -1, D).transpose(0, 1)\n v = self.v_rel(vs, type_vec).view(H, -1, D).transpose(0, 1)\n\n return k, v, offset\n\n def forward(\n self,\n x_dict: Dict[NodeType, Tensor],\n edge_index_dict: Dict[EdgeType, Adj] # Support both.\n ) -> Dict[NodeType, Optional[Tensor]]:\n r\"\"\"Runs the forward pass of the module.\n\n Args:\n x_dict (Dict[str, torch.Tensor]): A dictionary holding input node\n features for each individual node type.\n edge_index_dict (Dict[Tuple[str, str, str], torch.Tensor]): A\n dictionary holding graph connectivity information for each\n individual edge type, either as a :class:`torch.Tensor` of\n shape :obj:`[2, num_edges]` or a\n :class:`torch_sparse.SparseTensor`.\n\n :rtype: :obj:`Dict[str, Optional[torch.Tensor]]` - The output node\n embeddings for each node type.\n In case a node type does not receive any message, its output will\n be set to :obj:`None`.\n \"\"\"\n F = self.out_channels\n H = self.heads\n D = F // H\n\n k_dict, q_dict, v_dict, out_dict = {}, {}, {}, {}\n\n # Compute K, Q, V over node types:\n kqv_dict = self.kqv_lin(x_dict)\n for key, val in kqv_dict.items():\n k, q, v = torch.tensor_split(val, 3, dim=1)\n k_dict[key] = k.view(-1, H, D)\n q_dict[key] = q.view(-1, H, D)\n v_dict[key] = v.view(-1, H, D)\n\n q, dst_offset = self._cat(q_dict)\n k, v, src_offset = self._construct_src_node_feat(\n k_dict, v_dict, edge_index_dict)\n\n edge_index, edge_attr = construct_bipartite_edge_index(\n edge_index_dict, src_offset, dst_offset, edge_attr_dict=self.p_rel)\n\n out = self.propagate(edge_index, k=k, q=q, v=v, edge_attr=edge_attr,\n size=None)\n\n # Reconstruct output node embeddings dict:\n for node_type, start_offset in dst_offset.items():\n end_offset = start_offset + q_dict[node_type].size(0)\n if node_type in self.dst_node_types:\n out_dict[node_type] = out[start_offset:end_offset]\n\n # Transform output node embeddings:\n a_dict = self.out_lin({\n k:\n torch.nn.functional.gelu(v) if v is not None else v\n for k, v in out_dict.items()\n })\n\n # Iterate over node types:\n for node_type, out in out_dict.items():\n out = a_dict[node_type]\n\n if out.size(-1) == x_dict[node_type].size(-1):\n alpha = self.skip[node_type].sigmoid()\n out = alpha * out + (1 - alpha) * x_dict[node_type]\n out_dict[node_type] = out\n\n return out_dict\n\n def message(self, k_j: Tensor, q_i: Tensor, v_j: Tensor, edge_attr: Tensor,\n index: Tensor, ptr: Optional[Tensor],\n size_i: Optional[int]) -> Tensor:\n alpha = (q_i * k_j).sum(dim=-1) * edge_attr\n alpha = alpha / math.sqrt(q_i.size(-1))\n alpha = softmax(alpha, index, ptr, size_i)\n out = v_j * alpha.view(-1, self.heads, 1)\n return out.view(-1, self.out_channels)\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}(-1, {self.out_channels}, '\n f'heads={self.heads})')\n", "path": "torch_geometric/nn/conv/hgt_conv.py"}]} | 3,636 | 192 |
gh_patches_debug_12027 | rasdani/github-patches | git_diff | huggingface__trl-1045 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecated option `optimize_cuda_cache` warning on import of trl
When you just import trl, we get the following warning:
```
$ python3
Python 3.10.8 (main, Nov 24 2022, 14:13:03) [GCC 11.2.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import trl
/opt/conda/lib/python3.10/site-packages/trl/trainer/ppo_config.py:141: UserWarning: The `optimize_cuda_cache` arguement will be deprecated soon, please use `optimize_device_cache` instead.
warnings.warn(
```
Unless this is left on purpose, I believe that this line https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_config.py#L107, should be changed from:
```
optimize_cuda_cache: bool = False
```
to:
```
optimize_cuda_cache: Optional[bool] = None
```
to get rid of the warning.
I can open a PR if this warning is not needed on default config creation.
</issue>
<code>
[start of trl/trainer/ppo_config.py]
1 # Copyright 2022 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import json
15 import os
16 import sys
17 import warnings
18 from dataclasses import dataclass, field
19 from typing import Literal, Optional
20
21 import numpy as np
22 import tyro
23 from typing_extensions import Annotated
24
25 from trl.trainer.utils import exact_div
26
27 from ..core import flatten_dict
28 from ..import_utils import is_wandb_available
29
30
31 JSONDict = Annotated[Optional[dict], tyro.conf.arg(metavar="JSON", constructor=json.loads)]
32
33
34 @dataclass
35 class PPOConfig:
36 """
37 Configuration class for PPOTrainer
38 """
39
40 # common parameters
41 exp_name: str = os.path.basename(sys.argv[0])[: -len(".py")]
42 """the name of this experiment (by default is the file name without the extension name)"""
43 seed: int = 0
44 """Seed value for random generations"""
45 log_with: Optional[Literal["wandb", "tensorboard"]] = None
46 """Log with either 'wandb' or 'tensorboard', check https://huggingface.co/docs/accelerate/usage_guides/tracking for more details"""
47 task_name: Optional[str] = None
48 """Name of task to use - used only for tracking purposes"""
49 model_name: Optional[str] = None
50 """Name of model to use - used only for tracking purposes"""
51 query_dataset: Optional[str] = None
52 """Name of dataset to query - used only for tracking purposes"""
53 reward_model: Optional[str] = None
54 """The reward model to use - used only for tracking purposes"""
55 remove_unused_columns: bool = True
56 """Remove unused columns from the dataset if `datasets.Dataset` is used"""
57 tracker_kwargs: JSONDict = field(default_factory=dict)
58 """Keyword arguments for the tracker (e.g. python ppo.py --ppo_config.tracker_kwargs='{"wandb": {"entity": "my_wandb_entity", "name": "my_exp_name"}}'"""
59 accelerator_kwargs: JSONDict = field(default_factory=dict)
60 """Keyword arguments for the accelerator"""
61 project_kwargs: JSONDict = field(default_factory=dict)
62 """Keyword arguments for the accelerator project config (e.g. `logging_dir`)"""
63 tracker_project_name: str = "trl"
64 """Name of project to use for tracking"""
65 push_to_hub_if_best_kwargs: JSONDict = field(default_factory=dict)
66 """Keyword arguments for pushing model to the hub during training (e.g. repo_id)"""
67
68 # hyperparameters
69 steps: int = 20000
70 """Number of training steps"""
71 learning_rate: float = 1e-5
72 """Adam learning rate"""
73 adap_kl_ctrl: bool = True
74 """Use adaptive KL control, otherwise linear"""
75 init_kl_coef: Optional[float] = 0.2
76 """Initial KL penalty coefficient (used for adaptive and linear control)"""
77 kl_penalty: Literal["kl", "abs", "mse", "full"] = "kl"
78 """kl penalty options: 'kl': model_logp - ref_logp, 'abs': abs(kl), 'mse': mean squared error mse(kl) and 'full': the actual kl for all tokens in the distribution"""
79 target: Optional[float] = 6
80 """Target KL value for adaptive KL control"""
81 horizon: Optional[float] = 10000
82 """Horizon for adaptive KL control"""
83 gamma: float = 1
84 """Gamma parameter for advantage calculation"""
85 lam: float = 0.95
86 """Lambda parameter for advantage calculation"""
87 cliprange: float = 0.2
88 """Range for clipping in PPO policy gradient loss"""
89 cliprange_value: float = 0.2
90 """Range for clipping values in loss calculation"""
91 vf_coef: float = 0.1
92 """Scaling factor for value loss"""
93 batch_size: int = 256
94 """Number of samples per optimisation step"""
95 forward_batch_size: Optional[int] = None
96 """DEPRECATED: use `mini_batch_size` instead, which does the same thing."""
97 mini_batch_size: int = 1
98 """Number of samples optimized in each mini batch"""
99 gradient_accumulation_steps: int = 1
100 """The number of gradient accumulation steps"""
101 world_size: tyro.conf.Suppress[int] = None
102 """The world size for distributed training"""
103 ppo_epochs: int = 4
104 """Number of optimisation epochs per batch of samples"""
105 max_grad_norm: Optional[float] = None
106 """Maximum gradient norm for gradient clipping"""
107 optimize_cuda_cache: bool = False
108 """DEPRECATED: use `optimize_device_cache` instead, which does the same thing."""
109 optimize_device_cache: Optional[bool] = False
110 """Optimize device cache for slightly more memory-efficient training"""
111 early_stopping: bool = False
112 """Whether to stop the PPO optimization loop early is the KL too high"""
113 target_kl: float = 1
114 """Stop early if we exceed this value by over 50%"""
115 compare_steps: int = 1
116 """Number of steps between comparison of the current reward with the best seen so far"""
117 ratio_threshold: float = 10.0
118 """Skip mini-batches with high PPO ratios that can cause loss spikes"""
119 use_score_scaling: bool = False
120 """Use score scaling"""
121 use_score_norm: bool = False
122 """Use score normalization. Only applicable if use_score_scaling is True"""
123 score_clip: Optional[float] = None
124 """Score clipping"""
125 whiten_rewards: bool = False
126 """Whiten the rewards before compute advantages"""
127
128 # computed hyperparameters at runtime; we use `tyro.conf.Suppress` to hide them from the help text
129 is_encoder_decoder: Optional[tyro.conf.Suppress[bool]] = None
130 """TO BE FILLED In RUNTIME: Whether the model is an encoder-decoder model"""
131 is_peft_model: Optional[tyro.conf.Suppress[bool]] = None
132 """TO BE FILLED In RUNTIME: Whether the model is a PEFT model"""
133 backward_batch_size: tyro.conf.Suppress[int] = None
134 """TO BE FILLED In RUNTIME: Number of samples optimized in an `optimizer.step()` call"""
135 global_backward_batch_size: tyro.conf.Suppress[int] = None
136 """TO BE FILLED In RUNTIME: the effective `backward_batch_size` across all processes"""
137 global_batch_size: tyro.conf.Suppress[int] = None
138 """TO BE FILLED In RUNTIME: the effective `batch_size` across all processes"""
139
140 if optimize_cuda_cache is not None:
141 warnings.warn(
142 "The `optimize_cuda_cache` argument will be deprecated soon, please use `optimize_device_cache` instead."
143 )
144 optimize_device_cache = optimize_cuda_cache
145 else:
146 optimize_device_cache = False
147
148 def __post_init__(self):
149 if self.forward_batch_size is not None:
150 warnings.warn(
151 "Note that using `forward_batch_size` is deprecated, use `mini_batch_size` instead. By setting it you overwrite `mini_batch_size` which affects both the batch size during forward passes and also the mini batch size for PPO optimization."
152 )
153 self.mini_batch_size = self.forward_batch_size
154
155 self.backward_batch_size = self.mini_batch_size * self.gradient_accumulation_steps
156 exact_div(
157 self.batch_size,
158 self.backward_batch_size,
159 "`batch_size`",
160 "`mini_batch_size * gradient_accumulation_steps`",
161 "`batch_size` must be a multiple of `mini_batch_size * gradient_accumulation_steps`",
162 )
163
164 # check if wandb is installed
165 if self.log_with == "wandb":
166 # raise error if wandb is not installed
167 if not is_wandb_available():
168 raise ImportError(
169 "Please install wandb to use wandb logging. You can do this by running `pip install wandb`."
170 )
171
172 self.total_ppo_epochs = int(np.ceil(self.steps / self.batch_size))
173 assert self.kl_penalty in ["kl", "abs", "mse", "full"]
174
175 def to_dict(self):
176 output_dict = {}
177 for key, value in self.__dict__.items():
178 output_dict[key] = value
179 return flatten_dict(output_dict)
180
[end of trl/trainer/ppo_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/trl/trainer/ppo_config.py b/trl/trainer/ppo_config.py
--- a/trl/trainer/ppo_config.py
+++ b/trl/trainer/ppo_config.py
@@ -104,7 +104,7 @@
"""Number of optimisation epochs per batch of samples"""
max_grad_norm: Optional[float] = None
"""Maximum gradient norm for gradient clipping"""
- optimize_cuda_cache: bool = False
+ optimize_cuda_cache: Optional[bool] = None
"""DEPRECATED: use `optimize_device_cache` instead, which does the same thing."""
optimize_device_cache: Optional[bool] = False
"""Optimize device cache for slightly more memory-efficient training"""
| {"golden_diff": "diff --git a/trl/trainer/ppo_config.py b/trl/trainer/ppo_config.py\n--- a/trl/trainer/ppo_config.py\n+++ b/trl/trainer/ppo_config.py\n@@ -104,7 +104,7 @@\n \"\"\"Number of optimisation epochs per batch of samples\"\"\"\n max_grad_norm: Optional[float] = None\n \"\"\"Maximum gradient norm for gradient clipping\"\"\"\n- optimize_cuda_cache: bool = False\n+ optimize_cuda_cache: Optional[bool] = None\n \"\"\"DEPRECATED: use `optimize_device_cache` instead, which does the same thing.\"\"\"\n optimize_device_cache: Optional[bool] = False\n \"\"\"Optimize device cache for slightly more memory-efficient training\"\"\"\n", "issue": "Deprecated option `optimize_cuda_cache` warning on import of trl\nWhen you just import trl, we get the following warning:\r\n```\r\n$ python3\r\nPython 3.10.8 (main, Nov 24 2022, 14:13:03) [GCC 11.2.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import trl\r\n/opt/conda/lib/python3.10/site-packages/trl/trainer/ppo_config.py:141: UserWarning: The `optimize_cuda_cache` arguement will be deprecated soon, please use `optimize_device_cache` instead.\r\n warnings.warn(\r\n```\r\n\r\nUnless this is left on purpose, I believe that this line https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_config.py#L107, should be changed from:\r\n```\r\noptimize_cuda_cache: bool = False\r\n```\r\nto:\r\n```\r\noptimize_cuda_cache: Optional[bool] = None\r\n```\r\nto get rid of the warning.\r\n\r\nI can open a PR if this warning is not needed on default config creation.\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport os\nimport sys\nimport warnings\nfrom dataclasses import dataclass, field\nfrom typing import Literal, Optional\n\nimport numpy as np\nimport tyro\nfrom typing_extensions import Annotated\n\nfrom trl.trainer.utils import exact_div\n\nfrom ..core import flatten_dict\nfrom ..import_utils import is_wandb_available\n\n\nJSONDict = Annotated[Optional[dict], tyro.conf.arg(metavar=\"JSON\", constructor=json.loads)]\n\n\n@dataclass\nclass PPOConfig:\n \"\"\"\n Configuration class for PPOTrainer\n \"\"\"\n\n # common parameters\n exp_name: str = os.path.basename(sys.argv[0])[: -len(\".py\")]\n \"\"\"the name of this experiment (by default is the file name without the extension name)\"\"\"\n seed: int = 0\n \"\"\"Seed value for random generations\"\"\"\n log_with: Optional[Literal[\"wandb\", \"tensorboard\"]] = None\n \"\"\"Log with either 'wandb' or 'tensorboard', check https://huggingface.co/docs/accelerate/usage_guides/tracking for more details\"\"\"\n task_name: Optional[str] = None\n \"\"\"Name of task to use - used only for tracking purposes\"\"\"\n model_name: Optional[str] = None\n \"\"\"Name of model to use - used only for tracking purposes\"\"\"\n query_dataset: Optional[str] = None\n \"\"\"Name of dataset to query - used only for tracking purposes\"\"\"\n reward_model: Optional[str] = None\n \"\"\"The reward model to use - used only for tracking purposes\"\"\"\n remove_unused_columns: bool = True\n \"\"\"Remove unused columns from the dataset if `datasets.Dataset` is used\"\"\"\n tracker_kwargs: JSONDict = field(default_factory=dict)\n \"\"\"Keyword arguments for the tracker (e.g. python ppo.py --ppo_config.tracker_kwargs='{\"wandb\": {\"entity\": \"my_wandb_entity\", \"name\": \"my_exp_name\"}}'\"\"\"\n accelerator_kwargs: JSONDict = field(default_factory=dict)\n \"\"\"Keyword arguments for the accelerator\"\"\"\n project_kwargs: JSONDict = field(default_factory=dict)\n \"\"\"Keyword arguments for the accelerator project config (e.g. `logging_dir`)\"\"\"\n tracker_project_name: str = \"trl\"\n \"\"\"Name of project to use for tracking\"\"\"\n push_to_hub_if_best_kwargs: JSONDict = field(default_factory=dict)\n \"\"\"Keyword arguments for pushing model to the hub during training (e.g. repo_id)\"\"\"\n\n # hyperparameters\n steps: int = 20000\n \"\"\"Number of training steps\"\"\"\n learning_rate: float = 1e-5\n \"\"\"Adam learning rate\"\"\"\n adap_kl_ctrl: bool = True\n \"\"\"Use adaptive KL control, otherwise linear\"\"\"\n init_kl_coef: Optional[float] = 0.2\n \"\"\"Initial KL penalty coefficient (used for adaptive and linear control)\"\"\"\n kl_penalty: Literal[\"kl\", \"abs\", \"mse\", \"full\"] = \"kl\"\n \"\"\"kl penalty options: 'kl': model_logp - ref_logp, 'abs': abs(kl), 'mse': mean squared error mse(kl) and 'full': the actual kl for all tokens in the distribution\"\"\"\n target: Optional[float] = 6\n \"\"\"Target KL value for adaptive KL control\"\"\"\n horizon: Optional[float] = 10000\n \"\"\"Horizon for adaptive KL control\"\"\"\n gamma: float = 1\n \"\"\"Gamma parameter for advantage calculation\"\"\"\n lam: float = 0.95\n \"\"\"Lambda parameter for advantage calculation\"\"\"\n cliprange: float = 0.2\n \"\"\"Range for clipping in PPO policy gradient loss\"\"\"\n cliprange_value: float = 0.2\n \"\"\"Range for clipping values in loss calculation\"\"\"\n vf_coef: float = 0.1\n \"\"\"Scaling factor for value loss\"\"\"\n batch_size: int = 256\n \"\"\"Number of samples per optimisation step\"\"\"\n forward_batch_size: Optional[int] = None\n \"\"\"DEPRECATED: use `mini_batch_size` instead, which does the same thing.\"\"\"\n mini_batch_size: int = 1\n \"\"\"Number of samples optimized in each mini batch\"\"\"\n gradient_accumulation_steps: int = 1\n \"\"\"The number of gradient accumulation steps\"\"\"\n world_size: tyro.conf.Suppress[int] = None\n \"\"\"The world size for distributed training\"\"\"\n ppo_epochs: int = 4\n \"\"\"Number of optimisation epochs per batch of samples\"\"\"\n max_grad_norm: Optional[float] = None\n \"\"\"Maximum gradient norm for gradient clipping\"\"\"\n optimize_cuda_cache: bool = False\n \"\"\"DEPRECATED: use `optimize_device_cache` instead, which does the same thing.\"\"\"\n optimize_device_cache: Optional[bool] = False\n \"\"\"Optimize device cache for slightly more memory-efficient training\"\"\"\n early_stopping: bool = False\n \"\"\"Whether to stop the PPO optimization loop early is the KL too high\"\"\"\n target_kl: float = 1\n \"\"\"Stop early if we exceed this value by over 50%\"\"\"\n compare_steps: int = 1\n \"\"\"Number of steps between comparison of the current reward with the best seen so far\"\"\"\n ratio_threshold: float = 10.0\n \"\"\"Skip mini-batches with high PPO ratios that can cause loss spikes\"\"\"\n use_score_scaling: bool = False\n \"\"\"Use score scaling\"\"\"\n use_score_norm: bool = False\n \"\"\"Use score normalization. Only applicable if use_score_scaling is True\"\"\"\n score_clip: Optional[float] = None\n \"\"\"Score clipping\"\"\"\n whiten_rewards: bool = False\n \"\"\"Whiten the rewards before compute advantages\"\"\"\n\n # computed hyperparameters at runtime; we use `tyro.conf.Suppress` to hide them from the help text\n is_encoder_decoder: Optional[tyro.conf.Suppress[bool]] = None\n \"\"\"TO BE FILLED In RUNTIME: Whether the model is an encoder-decoder model\"\"\"\n is_peft_model: Optional[tyro.conf.Suppress[bool]] = None\n \"\"\"TO BE FILLED In RUNTIME: Whether the model is a PEFT model\"\"\"\n backward_batch_size: tyro.conf.Suppress[int] = None\n \"\"\"TO BE FILLED In RUNTIME: Number of samples optimized in an `optimizer.step()` call\"\"\"\n global_backward_batch_size: tyro.conf.Suppress[int] = None\n \"\"\"TO BE FILLED In RUNTIME: the effective `backward_batch_size` across all processes\"\"\"\n global_batch_size: tyro.conf.Suppress[int] = None\n \"\"\"TO BE FILLED In RUNTIME: the effective `batch_size` across all processes\"\"\"\n\n if optimize_cuda_cache is not None:\n warnings.warn(\n \"The `optimize_cuda_cache` argument will be deprecated soon, please use `optimize_device_cache` instead.\"\n )\n optimize_device_cache = optimize_cuda_cache\n else:\n optimize_device_cache = False\n\n def __post_init__(self):\n if self.forward_batch_size is not None:\n warnings.warn(\n \"Note that using `forward_batch_size` is deprecated, use `mini_batch_size` instead. By setting it you overwrite `mini_batch_size` which affects both the batch size during forward passes and also the mini batch size for PPO optimization.\"\n )\n self.mini_batch_size = self.forward_batch_size\n\n self.backward_batch_size = self.mini_batch_size * self.gradient_accumulation_steps\n exact_div(\n self.batch_size,\n self.backward_batch_size,\n \"`batch_size`\",\n \"`mini_batch_size * gradient_accumulation_steps`\",\n \"`batch_size` must be a multiple of `mini_batch_size * gradient_accumulation_steps`\",\n )\n\n # check if wandb is installed\n if self.log_with == \"wandb\":\n # raise error if wandb is not installed\n if not is_wandb_available():\n raise ImportError(\n \"Please install wandb to use wandb logging. You can do this by running `pip install wandb`.\"\n )\n\n self.total_ppo_epochs = int(np.ceil(self.steps / self.batch_size))\n assert self.kl_penalty in [\"kl\", \"abs\", \"mse\", \"full\"]\n\n def to_dict(self):\n output_dict = {}\n for key, value in self.__dict__.items():\n output_dict[key] = value\n return flatten_dict(output_dict)\n", "path": "trl/trainer/ppo_config.py"}]} | 3,155 | 157 |
gh_patches_debug_18409 | rasdani/github-patches | git_diff | pytorch__text-377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Translation datasets not automatically downloading
Code:
``` python
from torchtext.data import Field
from torchtext.datasets import Multi30k
DE = Field(init_token='<sos>', eos_token='<eos>')
EN = Field(init_token='<sos>', eos_token='<eos>')
train, val, test = Multi30k.splits(exts=('.de', '.en'), fields=(DE, EN))
```
Error:
```
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-3-637d49b65435> in <module>()
----> 1 train, val, test = Multi30k.splits(exts=('.de', '.en'), fields=(DE, EN))
~/miniconda3/envs/pytorch/lib/python3.6/site-packages/torchtext/datasets/translation.py in splits(cls, exts, fields, root, train, validation, test, **kwargs)
99 """
100 return super(Multi30k, cls).splits(
--> 101 exts, fields, root, train, validation, test, **kwargs)
102
103
~/miniconda3/envs/pytorch/lib/python3.6/site-packages/torchtext/datasets/translation.py in splits(cls, exts, fields, path, root, train, validation, test, **kwargs)
62
63 train_data = None if train is None else cls(
---> 64 os.path.join(path, train), exts, fields, **kwargs)
65 val_data = None if validation is None else cls(
66 os.path.join(path, validation), exts, fields, **kwargs)
~/miniconda3/envs/pytorch/lib/python3.6/site-packages/torchtext/datasets/translation.py in __init__(self, path, exts, fields, **kwargs)
31
32 examples = []
---> 33 with open(src_path) as src_file, open(trg_path) as trg_file:
34 for src_line, trg_line in zip(src_file, trg_file):
35 src_line, trg_line = src_line.strip(), trg_line.strip()
FileNotFoundError: [Errno 2] No such file or directory: '.data/val.de'
```
It just doesn't seem to automatically download the data for both the Multi30k and WMT14 datasets.
PyTorch version: 0.3.1
TorchText version 0.2.3
**EDIT**
I have downgraded my TorchText to version 0.2.1 and I do not get the error, had a quick look at the commits between 0.2.1 and 0.2.3 and couldn't figure out which commit introduced the break.
</issue>
<code>
[start of torchtext/datasets/translation.py]
1 import os
2 import xml.etree.ElementTree as ET
3 import glob
4 import io
5
6 from .. import data
7
8
9 class TranslationDataset(data.Dataset):
10 """Defines a dataset for machine translation."""
11
12 @staticmethod
13 def sort_key(ex):
14 return data.interleave_keys(len(ex.src), len(ex.trg))
15
16 def __init__(self, path, exts, fields, **kwargs):
17 """Create a TranslationDataset given paths and fields.
18
19 Arguments:
20 path: Common prefix of paths to the data files for both languages.
21 exts: A tuple containing the extension to path for each language.
22 fields: A tuple containing the fields that will be used for data
23 in each language.
24 Remaining keyword arguments: Passed to the constructor of
25 data.Dataset.
26 """
27 if not isinstance(fields[0], (tuple, list)):
28 fields = [('src', fields[0]), ('trg', fields[1])]
29
30 src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)
31
32 examples = []
33 with open(src_path) as src_file, open(trg_path) as trg_file:
34 for src_line, trg_line in zip(src_file, trg_file):
35 src_line, trg_line = src_line.strip(), trg_line.strip()
36 if src_line != '' and trg_line != '':
37 examples.append(data.Example.fromlist(
38 [src_line, trg_line], fields))
39
40 super(TranslationDataset, self).__init__(examples, fields, **kwargs)
41
42 @classmethod
43 def splits(cls, exts, fields, path=None, root='.data',
44 train='train', validation='val', test='test', **kwargs):
45 """Create dataset objects for splits of a TranslationDataset.
46
47 Arguments:
48 path (str): Common prefix of the splits' file paths, or None to use
49 the result of cls.download(root).
50 root: Root dataset storage directory. Default is '.data'.
51 exts: A tuple containing the extension to path for each language.
52 fields: A tuple containing the fields that will be used for data
53 in each language.
54 train: The prefix of the train data. Default: 'train'.
55 validation: The prefix of the validation data. Default: 'val'.
56 test: The prefix of the test data. Default: 'test'.
57 Remaining keyword arguments: Passed to the splits method of
58 Dataset.
59 """
60 if path is None:
61 path = cls.download(root)
62
63 train_data = None if train is None else cls(
64 os.path.join(path, train), exts, fields, **kwargs)
65 val_data = None if validation is None else cls(
66 os.path.join(path, validation), exts, fields, **kwargs)
67 test_data = None if test is None else cls(
68 os.path.join(path, test), exts, fields, **kwargs)
69 return tuple(d for d in (train_data, val_data, test_data)
70 if d is not None)
71
72
73 class Multi30k(TranslationDataset):
74 """The small-dataset WMT 2016 multimodal task, also known as Flickr30k"""
75
76 urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',
77 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',
78 'http://www.quest.dcs.shef.ac.uk/'
79 'wmt17_files_mmt/mmt_task1_test2016.tar.gz']
80 name = 'multi30k'
81 dirname = ''
82
83 @classmethod
84 def splits(cls, exts, fields, root='.data',
85 train='train', validation='val', test='test2016', **kwargs):
86 """Create dataset objects for splits of the Multi30k dataset.
87
88 Arguments:
89
90 root: Root dataset storage directory. Default is '.data'.
91 exts: A tuple containing the extension to path for each language.
92 fields: A tuple containing the fields that will be used for data
93 in each language.
94 train: The prefix of the train data. Default: 'train'.
95 validation: The prefix of the validation data. Default: 'val'.
96 test: The prefix of the test data. Default: 'test'.
97 Remaining keyword arguments: Passed to the splits method of
98 Dataset.
99 """
100 return super(Multi30k, cls).splits(
101 exts, fields, root, train, validation, test, **kwargs)
102
103
104 class IWSLT(TranslationDataset):
105 """The IWSLT 2016 TED talk translation task"""
106
107 base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'
108 name = 'iwslt'
109 base_dirname = '{}-{}'
110
111 @classmethod
112 def splits(cls, exts, fields, root='.data',
113 train='train', validation='IWSLT16.TED.tst2013',
114 test='IWSLT16.TED.tst2014', **kwargs):
115 """Create dataset objects for splits of the IWSLT dataset.
116
117 Arguments:
118
119 root: Root dataset storage directory. Default is '.data'.
120 exts: A tuple containing the extension to path for each language.
121 fields: A tuple containing the fields that will be used for data
122 in each language.
123 train: The prefix of the train data. Default: 'train'.
124 validation: The prefix of the validation data. Default: 'val'.
125 test: The prefix of the test data. Default: 'test'.
126 Remaining keyword arguments: Passed to the splits method of
127 Dataset.
128 """
129 cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:])
130 cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)]
131 check = os.path.join(root, cls.name, cls.dirname)
132 path = cls.download(root, check=check)
133
134 train = '.'.join([train, cls.dirname])
135 validation = '.'.join([validation, cls.dirname])
136 if test is not None:
137 test = '.'.join([test, cls.dirname])
138
139 if not os.path.exists(os.path.join(path, train) + exts[0]):
140 cls.clean(path)
141
142 train_data = None if train is None else cls(
143 os.path.join(path, train), exts, fields, **kwargs)
144 val_data = None if validation is None else cls(
145 os.path.join(path, validation), exts, fields, **kwargs)
146 test_data = None if test is None else cls(
147 os.path.join(path, test), exts, fields, **kwargs)
148 return tuple(d for d in (train_data, val_data, test_data)
149 if d is not None)
150
151 @staticmethod
152 def clean(path):
153 for f_xml in glob.iglob(os.path.join(path, '*.xml')):
154 print(f_xml)
155 f_txt = os.path.splitext(f_xml)[0]
156 with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:
157 root = ET.parse(f_xml).getroot()[0]
158 for doc in root.findall('doc'):
159 for e in doc.findall('seg'):
160 fd_txt.write(e.text.strip() + '\n')
161
162 xml_tags = ['<url', '<keywords', '<talkid', '<description',
163 '<reviewer', '<translator', '<title', '<speaker']
164 for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):
165 print(f_orig)
166 f_txt = f_orig.replace('.tags', '')
167 with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \
168 io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:
169 for l in fd_orig:
170 if not any(tag in l for tag in xml_tags):
171 fd_txt.write(l.strip() + '\n')
172
173
174 class WMT14(TranslationDataset):
175 """The WMT 2014 English-German dataset, as preprocessed by Google Brain.
176
177 Though this download contains test sets from 2015 and 2016, the train set
178 differs slightly from WMT 2015 and 2016 and significantly from WMT 2017."""
179
180 urls = [('https://drive.google.com/uc?export=download&'
181 'id=0B_bZck-ksdkpM25jRUN2X2UxMm8', 'wmt16_en_de.tar.gz')]
182 name = 'wmt14'
183 dirname = ''
184
185 @classmethod
186 def splits(cls, exts, fields, root='.data',
187 train='train.tok.clean.bpe.32000',
188 validation='newstest2013.tok.bpe.32000',
189 test='newstest2014.tok.bpe.32000', **kwargs):
190 """Create dataset objects for splits of the WMT 2014 dataset.
191
192 Arguments:
193
194 root: Root dataset storage directory. Default is '.data'.
195 exts: A tuple containing the extensions for each language. Must be
196 either ('.en', '.de') or the reverse.
197 fields: A tuple containing the fields that will be used for data
198 in each language.
199 train: The prefix of the train data. Default:
200 'train.tok.clean.bpe.32000'.
201 validation: The prefix of the validation data. Default:
202 'newstest2013.tok.bpe.32000'.
203 test: The prefix of the test data. Default:
204 'newstest2014.tok.bpe.32000'.
205 Remaining keyword arguments: Passed to the splits method of
206 Dataset.
207 """
208 return super(WMT14, cls).splits(
209 exts, fields, root, train, validation, test, **kwargs)
210
[end of torchtext/datasets/translation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchtext/datasets/translation.py b/torchtext/datasets/translation.py
--- a/torchtext/datasets/translation.py
+++ b/torchtext/datasets/translation.py
@@ -97,8 +97,9 @@
Remaining keyword arguments: Passed to the splits method of
Dataset.
"""
+ path = os.path.join('data', cls.name)
return super(Multi30k, cls).splits(
- exts, fields, root, train, validation, test, **kwargs)
+ exts, fields, path, root, train, validation, test, **kwargs)
class IWSLT(TranslationDataset):
@@ -205,5 +206,6 @@
Remaining keyword arguments: Passed to the splits method of
Dataset.
"""
+ path = os.path.join('data', cls.name)
return super(WMT14, cls).splits(
- exts, fields, root, train, validation, test, **kwargs)
+ exts, fields, path, root, train, validation, test, **kwargs)
| {"golden_diff": "diff --git a/torchtext/datasets/translation.py b/torchtext/datasets/translation.py\n--- a/torchtext/datasets/translation.py\n+++ b/torchtext/datasets/translation.py\n@@ -97,8 +97,9 @@\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n+ path = os.path.join('data', cls.name)\n return super(Multi30k, cls).splits(\n- exts, fields, root, train, validation, test, **kwargs)\n+ exts, fields, path, root, train, validation, test, **kwargs)\n \n \n class IWSLT(TranslationDataset):\n@@ -205,5 +206,6 @@\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n+ path = os.path.join('data', cls.name)\n return super(WMT14, cls).splits(\n- exts, fields, root, train, validation, test, **kwargs)\n+ exts, fields, path, root, train, validation, test, **kwargs)\n", "issue": "Translation datasets not automatically downloading\nCode:\r\n``` python\r\n\r\nfrom torchtext.data import Field\r\nfrom torchtext.datasets import Multi30k\r\n\r\nDE = Field(init_token='<sos>', eos_token='<eos>')\r\nEN = Field(init_token='<sos>', eos_token='<eos>')\r\n\r\ntrain, val, test = Multi30k.splits(exts=('.de', '.en'), fields=(DE, EN))\r\n```\r\n\r\nError:\r\n```\r\n---------------------------------------------------------------------------\r\nFileNotFoundError Traceback (most recent call last)\r\n<ipython-input-3-637d49b65435> in <module>()\r\n----> 1 train, val, test = Multi30k.splits(exts=('.de', '.en'), fields=(DE, EN))\r\n\r\n~/miniconda3/envs/pytorch/lib/python3.6/site-packages/torchtext/datasets/translation.py in splits(cls, exts, fields, root, train, validation, test, **kwargs)\r\n 99 \"\"\"\r\n 100 return super(Multi30k, cls).splits(\r\n--> 101 exts, fields, root, train, validation, test, **kwargs)\r\n 102 \r\n 103 \r\n\r\n~/miniconda3/envs/pytorch/lib/python3.6/site-packages/torchtext/datasets/translation.py in splits(cls, exts, fields, path, root, train, validation, test, **kwargs)\r\n 62 \r\n 63 train_data = None if train is None else cls(\r\n---> 64 os.path.join(path, train), exts, fields, **kwargs)\r\n 65 val_data = None if validation is None else cls(\r\n 66 os.path.join(path, validation), exts, fields, **kwargs)\r\n\r\n~/miniconda3/envs/pytorch/lib/python3.6/site-packages/torchtext/datasets/translation.py in __init__(self, path, exts, fields, **kwargs)\r\n 31 \r\n 32 examples = []\r\n---> 33 with open(src_path) as src_file, open(trg_path) as trg_file:\r\n 34 for src_line, trg_line in zip(src_file, trg_file):\r\n 35 src_line, trg_line = src_line.strip(), trg_line.strip()\r\n\r\nFileNotFoundError: [Errno 2] No such file or directory: '.data/val.de'\r\n```\r\n\r\nIt just doesn't seem to automatically download the data for both the Multi30k and WMT14 datasets. \r\n\r\nPyTorch version: 0.3.1\r\nTorchText version 0.2.3\r\n\r\n**EDIT**\r\n\r\nI have downgraded my TorchText to version 0.2.1 and I do not get the error, had a quick look at the commits between 0.2.1 and 0.2.3 and couldn't figure out which commit introduced the break.\n", "before_files": [{"content": "import os\nimport xml.etree.ElementTree as ET\nimport glob\nimport io\n\nfrom .. import data\n\n\nclass TranslationDataset(data.Dataset):\n \"\"\"Defines a dataset for machine translation.\"\"\"\n\n @staticmethod\n def sort_key(ex):\n return data.interleave_keys(len(ex.src), len(ex.trg))\n\n def __init__(self, path, exts, fields, **kwargs):\n \"\"\"Create a TranslationDataset given paths and fields.\n\n Arguments:\n path: Common prefix of paths to the data files for both languages.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n Remaining keyword arguments: Passed to the constructor of\n data.Dataset.\n \"\"\"\n if not isinstance(fields[0], (tuple, list)):\n fields = [('src', fields[0]), ('trg', fields[1])]\n\n src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)\n\n examples = []\n with open(src_path) as src_file, open(trg_path) as trg_file:\n for src_line, trg_line in zip(src_file, trg_file):\n src_line, trg_line = src_line.strip(), trg_line.strip()\n if src_line != '' and trg_line != '':\n examples.append(data.Example.fromlist(\n [src_line, trg_line], fields))\n\n super(TranslationDataset, self).__init__(examples, fields, **kwargs)\n\n @classmethod\n def splits(cls, exts, fields, path=None, root='.data',\n train='train', validation='val', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of a TranslationDataset.\n\n Arguments:\n path (str): Common prefix of the splits' file paths, or None to use\n the result of cls.download(root).\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n if path is None:\n path = cls.download(root)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n\nclass Multi30k(TranslationDataset):\n \"\"\"The small-dataset WMT 2016 multimodal task, also known as Flickr30k\"\"\"\n\n urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',\n 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',\n 'http://www.quest.dcs.shef.ac.uk/'\n 'wmt17_files_mmt/mmt_task1_test2016.tar.gz']\n name = 'multi30k'\n dirname = ''\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='val', test='test2016', **kwargs):\n \"\"\"Create dataset objects for splits of the Multi30k dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(Multi30k, cls).splits(\n exts, fields, root, train, validation, test, **kwargs)\n\n\nclass IWSLT(TranslationDataset):\n \"\"\"The IWSLT 2016 TED talk translation task\"\"\"\n\n base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'\n name = 'iwslt'\n base_dirname = '{}-{}'\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', validation='IWSLT16.TED.tst2013',\n test='IWSLT16.TED.tst2014', **kwargs):\n \"\"\"Create dataset objects for splits of the IWSLT dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:])\n cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)]\n check = os.path.join(root, cls.name, cls.dirname)\n path = cls.download(root, check=check)\n\n train = '.'.join([train, cls.dirname])\n validation = '.'.join([validation, cls.dirname])\n if test is not None:\n test = '.'.join([test, cls.dirname])\n\n if not os.path.exists(os.path.join(path, train) + exts[0]):\n cls.clean(path)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if validation is None else cls(\n os.path.join(path, validation), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n @staticmethod\n def clean(path):\n for f_xml in glob.iglob(os.path.join(path, '*.xml')):\n print(f_xml)\n f_txt = os.path.splitext(f_xml)[0]\n with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:\n root = ET.parse(f_xml).getroot()[0]\n for doc in root.findall('doc'):\n for e in doc.findall('seg'):\n fd_txt.write(e.text.strip() + '\\n')\n\n xml_tags = ['<url', '<keywords', '<talkid', '<description',\n '<reviewer', '<translator', '<title', '<speaker']\n for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):\n print(f_orig)\n f_txt = f_orig.replace('.tags', '')\n with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \\\n io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:\n for l in fd_orig:\n if not any(tag in l for tag in xml_tags):\n fd_txt.write(l.strip() + '\\n')\n\n\nclass WMT14(TranslationDataset):\n \"\"\"The WMT 2014 English-German dataset, as preprocessed by Google Brain.\n\n Though this download contains test sets from 2015 and 2016, the train set\n differs slightly from WMT 2015 and 2016 and significantly from WMT 2017.\"\"\"\n\n urls = [('https://drive.google.com/uc?export=download&'\n 'id=0B_bZck-ksdkpM25jRUN2X2UxMm8', 'wmt16_en_de.tar.gz')]\n name = 'wmt14'\n dirname = ''\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train.tok.clean.bpe.32000',\n validation='newstest2013.tok.bpe.32000',\n test='newstest2014.tok.bpe.32000', **kwargs):\n \"\"\"Create dataset objects for splits of the WMT 2014 dataset.\n\n Arguments:\n\n root: Root dataset storage directory. Default is '.data'.\n exts: A tuple containing the extensions for each language. Must be\n either ('.en', '.de') or the reverse.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default:\n 'train.tok.clean.bpe.32000'.\n validation: The prefix of the validation data. Default:\n 'newstest2013.tok.bpe.32000'.\n test: The prefix of the test data. Default:\n 'newstest2014.tok.bpe.32000'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(WMT14, cls).splits(\n exts, fields, root, train, validation, test, **kwargs)\n", "path": "torchtext/datasets/translation.py"}]} | 3,885 | 242 |
gh_patches_debug_16766 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-5102 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Trying to Incubate Eggs Always
Same issue for me
#4875
> I was listening to events via websocket. I realized that it always try to incubate eggs. While I'm listening events these logs appear every 5-10 secs.
>
> Attempting to apply incubator EggIncubatorProto7823468502291754353 to egg 14174621514891967477
> Incubator in use.
But restarting bot is not a solution for me.
</issue>
<code>
[start of pokemongo_bot/cell_workers/incubate_eggs.py]
1 from datetime import datetime, timedelta
2
3 from pokemongo_bot import inventory
4 from pokemongo_bot.human_behaviour import sleep
5 from pokemongo_bot.base_task import BaseTask
6
7
8 class IncubateEggs(BaseTask):
9 SUPPORTED_TASK_API_VERSION = 1
10
11 last_km_walked = 0
12
13 def initialize(self):
14 self.next_update = None
15 self.ready_breakable_incubators = []
16 self.ready_infinite_incubators = []
17 self.used_incubators = []
18 self.eggs = []
19 self.km_walked = 0
20 self.hatching_animation_delay = 4.20
21 self.max_iv = 45.0
22
23 self._process_config()
24
25 def _process_config(self):
26 self.infinite_longer_eggs_first = self.config.get("infinite_longer_eggs_first", False)
27 self.breakable_longer_eggs_first = self.config.get("breakable_longer_eggs_first", True)
28 self.min_interval = self.config.get('min_interval', 120)
29
30 self.breakable_incubator = self.config.get("breakable", [2,5,10])
31 self.infinite_incubator = self.config.get("infinite", [2,5,10])
32
33 def work(self):
34 try:
35 self._check_inventory()
36 except:
37 return
38
39 should_print = self._should_print()
40
41 if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:
42 self.used_incubators.sort(key=lambda x: x.get("km"))
43 km_left = self.used_incubators[0]['km']-self.km_walked
44 if km_left <= 0:
45 self._hatch_eggs()
46 should_print = False
47 else:
48 self.bot.metrics.next_hatching_km(km_left)
49
50 if should_print:
51 self._print_eggs()
52 self._compute_next_update()
53
54 IncubateEggs.last_km_walked = self.km_walked
55
56 # if there is a ready infinite incubator
57 if self.ready_infinite_incubators:
58 # get available eggs
59 eggs = self._filter_sort_eggs(self.infinite_incubator,
60 self.infinite_longer_eggs_first)
61 self._apply_incubators(eggs, self.ready_infinite_incubators)
62
63 if self.ready_breakable_incubators:
64 # get available eggs
65 eggs = self._filter_sort_eggs(self.breakable_incubator,
66 self.breakable_longer_eggs_first)
67 self._apply_incubators(eggs, self.ready_breakable_incubators)
68
69
70 def _filter_sort_eggs(self, allowed, sorting):
71 eligible_eggs = filter(lambda egg: int(egg["km"]) in allowed, self.eggs)
72 eligible_eggs.sort(key=lambda egg: egg["km"], reverse=sorting)
73
74 return eligible_eggs
75
76
77 def _apply_incubators(self, available_eggs, available_incubators):
78
79 for incubator in available_incubators:
80 for egg in available_eggs:
81 if egg["used"] or egg["km"] == -1:
82 continue
83
84 self.emit_event(
85 'incubate_try',
86 level='debug',
87 formatted="Attempting to apply incubator {incubator_id} to egg {egg_id}",
88 data={
89 'incubator_id': incubator['id'],
90 'egg_id': egg['id']
91 }
92 )
93 ret = self.bot.api.use_item_egg_incubator(
94 item_id=incubator["id"],
95 pokemon_id=egg["id"]
96 )
97 if ret:
98 code = ret.get("responses", {}).get("USE_ITEM_EGG_INCUBATOR", {}).get("result", 0)
99 if code == 1:
100 self.emit_event(
101 'incubate',
102 formatted='Incubating a {distance_in_km} egg.',
103 data={
104 'distance_in_km': str(egg['km'])
105 }
106 )
107 egg["used"] = True
108 incubator["used"] = True
109 break
110 elif code == 5 or code == 7:
111 self.emit_event(
112 'incubator_already_used',
113 level='debug',
114 formatted='Incubator in use.',
115 )
116 incubator["used"] = True
117 break
118 elif code == 6:
119 self.emit_event(
120 'egg_already_incubating',
121 level='debug',
122 formatted='Egg already incubating',
123 )
124 egg["used"] = True
125
126 def _check_inventory(self, lookup_ids=[]):
127 if lookup_ids:
128 inventory.refresh_inventory()
129 matched_pokemon = []
130 temp_eggs = []
131 temp_used_incubators = []
132 temp_ready_breakable_incubators = []
133 temp_ready_infinite_incubators = []
134 inv = inventory.jsonify_inventory()
135 for inv_data in inv:
136 inv_data = inv_data.get("inventory_item_data", {})
137 if "egg_incubators" in inv_data:
138 incubators = inv_data.get("egg_incubators", {}).get("egg_incubator",[])
139 if isinstance(incubators, basestring): # checking for old response
140 incubators = [incubators]
141 for incubator in incubators:
142 if 'pokemon_id' in incubator:
143 start_km = incubator.get('start_km_walked', 0)
144 km_walked = incubator.get('target_km_walked', 0)
145 temp_used_incubators.append({
146 "id": incubator.get('id', -1),
147 "km": km_walked,
148 "km_needed": (km_walked - start_km)
149 })
150 else:
151 if incubator.get('uses_remaining') is not None:
152 temp_ready_breakable_incubators.append({
153 "id": incubator.get('id', -1)
154 })
155 else:
156 temp_ready_infinite_incubators.append({
157 "id": incubator.get('id', -1)
158 })
159 continue
160 if "pokemon_data" in inv_data:
161 pokemon = inv_data.get("pokemon_data", {})
162 if pokemon.get("is_egg", False) and "egg_incubator_id" not in pokemon:
163 temp_eggs.append({
164 "id": pokemon.get("id", -1),
165 "km": pokemon.get("egg_km_walked_target", -1),
166 "used": False
167 })
168 elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:
169 pokemon.update({
170 "iv": [
171 pokemon.get('individual_attack', 0),
172 pokemon.get('individual_defense', 0),
173 pokemon.get('individual_stamina', 0)
174 ]})
175 matched_pokemon.append(pokemon)
176 continue
177 if "player_stats" in inv_data:
178 self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0)
179 if temp_used_incubators:
180 self.used_incubators = temp_used_incubators
181 if temp_ready_breakable_incubators:
182 self.ready_breakable_incubators = temp_ready_breakable_incubators
183 if temp_ready_infinite_incubators:
184 self.ready_infinite_incubators = temp_ready_infinite_incubators
185 if temp_eggs:
186 self.eggs = temp_eggs
187 return matched_pokemon
188
189 def _hatch_eggs(self):
190 response_dict = self.bot.api.get_hatched_eggs()
191 log_color = 'green'
192 try:
193 result = reduce(dict.__getitem__, ["responses", "GET_HATCHED_EGGS"], response_dict)
194 except KeyError:
195 return
196 pokemon_ids = []
197 if 'pokemon_id' in result:
198 pokemon_ids = [id for id in result['pokemon_id']]
199 stardust = result.get('stardust_awarded', "error")
200 candy = result.get('candy_awarded', "error")
201 xp = result.get('experience_awarded', "error")
202 sleep(self.hatching_animation_delay)
203 try:
204 pokemon_data = self._check_inventory(pokemon_ids)
205 for pokemon in pokemon_data:
206 # pokemon ids seem to be offset by one
207 if pokemon['pokemon_id']!=-1:
208 pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']
209 #remove as egg and add as pokemon
210 inventory.pokemons().remove(pokemon['id'])
211 inventory.pokemons().add(inventory.Pokemon(pokemon))
212 else:
213 pokemon['name'] = "error"
214 except:
215 pokemon_data = [{"name":"error", "cp":"error", "iv":"error"}]
216 if not pokemon_ids or not pokemon_data or pokemon_data[0]['name'] == "error":
217 self.emit_event(
218 'egg_hatched',
219 data={
220 'pokemon': 'error',
221 'cp': 'error',
222 'iv': 'error',
223 'exp': 'error',
224 'stardust': 'error',
225 'candy': 'error',
226 }
227 )
228 return
229 for i in range(len(pokemon_data)):
230 msg = "Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies."
231 self.bot.metrics.hatched_eggs(1)
232 self.emit_event(
233 'egg_hatched',
234 formatted=msg,
235 data={
236 'pokemon': pokemon_data[i]['name'],
237 'cp': pokemon_data[i]['cp'],
238 'iv': "{} {}".format(
239 "/".join(map(str, pokemon_data[i]['iv'])),
240 round(sum(pokemon_data[i]['iv'])/self.max_iv, 2)
241 ),
242 'exp': xp[i],
243 'stardust': stardust[i],
244 'candy': candy[i],
245 }
246 )
247 # hatching egg gets exp too!
248 inventory.player().exp += xp[i]
249
250 def _print_eggs(self):
251 if not self.used_incubators:
252 return
253
254 self.used_incubators.sort(key=lambda x: x.get("km"))
255
256 eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators]
257
258 self.emit_event(
259 'next_egg_incubates',
260 formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})',
261 data={
262 'eggs_left': len(self.eggs),
263 'eggs_inc': len(self.used_incubators),
264 'eggs': ', '.join(eggs)
265 }
266 )
267
268 def _should_print(self):
269 """
270 Returns a value indicating whether the eggs should be displayed.
271 :return: True if the stats should be displayed; otherwise, False.
272 :rtype: bool
273 """
274 return self.next_update is None or datetime.now() >= self.next_update
275
276 def _compute_next_update(self):
277 """
278 Computes the next update datetime based on the minimum update interval.
279 :return: Nothing.
280 :rtype: None
281 """
282 self.next_update = datetime.now() + timedelta(seconds=self.min_interval)
283
[end of pokemongo_bot/cell_workers/incubate_eggs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pokemongo_bot/cell_workers/incubate_eggs.py b/pokemongo_bot/cell_workers/incubate_eggs.py
--- a/pokemongo_bot/cell_workers/incubate_eggs.py
+++ b/pokemongo_bot/cell_workers/incubate_eggs.py
@@ -176,14 +176,10 @@
continue
if "player_stats" in inv_data:
self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0)
- if temp_used_incubators:
- self.used_incubators = temp_used_incubators
- if temp_ready_breakable_incubators:
- self.ready_breakable_incubators = temp_ready_breakable_incubators
- if temp_ready_infinite_incubators:
- self.ready_infinite_incubators = temp_ready_infinite_incubators
- if temp_eggs:
- self.eggs = temp_eggs
+ self.used_incubators = temp_used_incubators
+ self.ready_breakable_incubators = temp_ready_breakable_incubators
+ self.ready_infinite_incubators = temp_ready_infinite_incubators
+ self.eggs = temp_eggs
return matched_pokemon
def _hatch_eggs(self):
| {"golden_diff": "diff --git a/pokemongo_bot/cell_workers/incubate_eggs.py b/pokemongo_bot/cell_workers/incubate_eggs.py\n--- a/pokemongo_bot/cell_workers/incubate_eggs.py\n+++ b/pokemongo_bot/cell_workers/incubate_eggs.py\n@@ -176,14 +176,10 @@\n continue\n if \"player_stats\" in inv_data:\n self.km_walked = inv_data.get(\"player_stats\", {}).get(\"km_walked\", 0)\n- if temp_used_incubators:\n- self.used_incubators = temp_used_incubators\n- if temp_ready_breakable_incubators:\n- self.ready_breakable_incubators = temp_ready_breakable_incubators\n- if temp_ready_infinite_incubators:\n- self.ready_infinite_incubators = temp_ready_infinite_incubators\n- if temp_eggs:\n- self.eggs = temp_eggs\n+ self.used_incubators = temp_used_incubators\n+ self.ready_breakable_incubators = temp_ready_breakable_incubators\n+ self.ready_infinite_incubators = temp_ready_infinite_incubators\n+ self.eggs = temp_eggs\n return matched_pokemon\n \n def _hatch_eggs(self):\n", "issue": "Trying to Incubate Eggs Always\nSame issue for me\n #4875 \n\n> I was listening to events via websocket. I realized that it always try to incubate eggs. While I'm listening events these logs appear every 5-10 secs.\n> \n> Attempting to apply incubator EggIncubatorProto7823468502291754353 to egg 14174621514891967477\n> Incubator in use.\n\nBut restarting bot is not a solution for me. \n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom pokemongo_bot import inventory\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass IncubateEggs(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n last_km_walked = 0\n\n def initialize(self):\n self.next_update = None\n self.ready_breakable_incubators = []\n self.ready_infinite_incubators = []\n self.used_incubators = []\n self.eggs = []\n self.km_walked = 0\n self.hatching_animation_delay = 4.20\n self.max_iv = 45.0\n\n self._process_config()\n\n def _process_config(self):\n self.infinite_longer_eggs_first = self.config.get(\"infinite_longer_eggs_first\", False)\n self.breakable_longer_eggs_first = self.config.get(\"breakable_longer_eggs_first\", True)\n self.min_interval = self.config.get('min_interval', 120)\n\n self.breakable_incubator = self.config.get(\"breakable\", [2,5,10])\n self.infinite_incubator = self.config.get(\"infinite\", [2,5,10])\n\n def work(self):\n try:\n self._check_inventory()\n except:\n return\n\n should_print = self._should_print()\n\n if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n km_left = self.used_incubators[0]['km']-self.km_walked\n if km_left <= 0:\n self._hatch_eggs()\n should_print = False\n else:\n self.bot.metrics.next_hatching_km(km_left)\n\n if should_print:\n self._print_eggs()\n self._compute_next_update()\n\n IncubateEggs.last_km_walked = self.km_walked\n\n # if there is a ready infinite incubator\n if self.ready_infinite_incubators:\n # get available eggs\n eggs = self._filter_sort_eggs(self.infinite_incubator,\n self.infinite_longer_eggs_first)\n self._apply_incubators(eggs, self.ready_infinite_incubators)\n\n if self.ready_breakable_incubators:\n # get available eggs\n eggs = self._filter_sort_eggs(self.breakable_incubator,\n self.breakable_longer_eggs_first)\n self._apply_incubators(eggs, self.ready_breakable_incubators)\n\n\n def _filter_sort_eggs(self, allowed, sorting):\n eligible_eggs = filter(lambda egg: int(egg[\"km\"]) in allowed, self.eggs)\n eligible_eggs.sort(key=lambda egg: egg[\"km\"], reverse=sorting)\n\n return eligible_eggs\n\n\n def _apply_incubators(self, available_eggs, available_incubators):\n\n for incubator in available_incubators:\n for egg in available_eggs:\n if egg[\"used\"] or egg[\"km\"] == -1:\n continue\n\n self.emit_event(\n 'incubate_try',\n level='debug',\n formatted=\"Attempting to apply incubator {incubator_id} to egg {egg_id}\",\n data={\n 'incubator_id': incubator['id'],\n 'egg_id': egg['id']\n }\n )\n ret = self.bot.api.use_item_egg_incubator(\n item_id=incubator[\"id\"],\n pokemon_id=egg[\"id\"]\n )\n if ret:\n code = ret.get(\"responses\", {}).get(\"USE_ITEM_EGG_INCUBATOR\", {}).get(\"result\", 0)\n if code == 1:\n self.emit_event(\n 'incubate',\n formatted='Incubating a {distance_in_km} egg.',\n data={\n 'distance_in_km': str(egg['km'])\n }\n )\n egg[\"used\"] = True\n incubator[\"used\"] = True\n break\n elif code == 5 or code == 7:\n self.emit_event(\n 'incubator_already_used',\n level='debug',\n formatted='Incubator in use.',\n )\n incubator[\"used\"] = True\n break\n elif code == 6:\n self.emit_event(\n 'egg_already_incubating',\n level='debug',\n formatted='Egg already incubating',\n )\n egg[\"used\"] = True\n\n def _check_inventory(self, lookup_ids=[]):\n if lookup_ids:\n inventory.refresh_inventory()\n matched_pokemon = []\n temp_eggs = []\n temp_used_incubators = []\n temp_ready_breakable_incubators = []\n temp_ready_infinite_incubators = []\n inv = inventory.jsonify_inventory()\n for inv_data in inv:\n inv_data = inv_data.get(\"inventory_item_data\", {})\n if \"egg_incubators\" in inv_data:\n incubators = inv_data.get(\"egg_incubators\", {}).get(\"egg_incubator\",[])\n if isinstance(incubators, basestring): # checking for old response\n incubators = [incubators]\n for incubator in incubators:\n if 'pokemon_id' in incubator:\n start_km = incubator.get('start_km_walked', 0)\n km_walked = incubator.get('target_km_walked', 0)\n temp_used_incubators.append({\n \"id\": incubator.get('id', -1),\n \"km\": km_walked,\n \"km_needed\": (km_walked - start_km)\n })\n else:\n if incubator.get('uses_remaining') is not None:\n temp_ready_breakable_incubators.append({\n \"id\": incubator.get('id', -1)\n })\n else:\n temp_ready_infinite_incubators.append({\n \"id\": incubator.get('id', -1)\n })\n continue\n if \"pokemon_data\" in inv_data:\n pokemon = inv_data.get(\"pokemon_data\", {})\n if pokemon.get(\"is_egg\", False) and \"egg_incubator_id\" not in pokemon:\n temp_eggs.append({\n \"id\": pokemon.get(\"id\", -1),\n \"km\": pokemon.get(\"egg_km_walked_target\", -1),\n \"used\": False\n })\n elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:\n pokemon.update({\n \"iv\": [\n pokemon.get('individual_attack', 0),\n pokemon.get('individual_defense', 0),\n pokemon.get('individual_stamina', 0)\n ]})\n matched_pokemon.append(pokemon)\n continue\n if \"player_stats\" in inv_data:\n self.km_walked = inv_data.get(\"player_stats\", {}).get(\"km_walked\", 0)\n if temp_used_incubators:\n self.used_incubators = temp_used_incubators\n if temp_ready_breakable_incubators:\n self.ready_breakable_incubators = temp_ready_breakable_incubators\n if temp_ready_infinite_incubators:\n self.ready_infinite_incubators = temp_ready_infinite_incubators\n if temp_eggs:\n self.eggs = temp_eggs\n return matched_pokemon\n\n def _hatch_eggs(self):\n response_dict = self.bot.api.get_hatched_eggs()\n log_color = 'green'\n try:\n result = reduce(dict.__getitem__, [\"responses\", \"GET_HATCHED_EGGS\"], response_dict)\n except KeyError:\n return\n pokemon_ids = []\n if 'pokemon_id' in result:\n pokemon_ids = [id for id in result['pokemon_id']]\n stardust = result.get('stardust_awarded', \"error\")\n candy = result.get('candy_awarded', \"error\")\n xp = result.get('experience_awarded', \"error\")\n sleep(self.hatching_animation_delay)\n try:\n pokemon_data = self._check_inventory(pokemon_ids)\n for pokemon in pokemon_data:\n # pokemon ids seem to be offset by one\n if pokemon['pokemon_id']!=-1:\n pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']\n #remove as egg and add as pokemon\n inventory.pokemons().remove(pokemon['id'])\n inventory.pokemons().add(inventory.Pokemon(pokemon))\n else:\n pokemon['name'] = \"error\"\n except:\n pokemon_data = [{\"name\":\"error\", \"cp\":\"error\", \"iv\":\"error\"}]\n if not pokemon_ids or not pokemon_data or pokemon_data[0]['name'] == \"error\":\n self.emit_event(\n 'egg_hatched',\n data={\n 'pokemon': 'error',\n 'cp': 'error',\n 'iv': 'error',\n 'exp': 'error',\n 'stardust': 'error',\n 'candy': 'error',\n }\n )\n return\n for i in range(len(pokemon_data)):\n msg = \"Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies.\"\n self.bot.metrics.hatched_eggs(1)\n self.emit_event(\n 'egg_hatched',\n formatted=msg,\n data={\n 'pokemon': pokemon_data[i]['name'],\n 'cp': pokemon_data[i]['cp'],\n 'iv': \"{} {}\".format(\n \"/\".join(map(str, pokemon_data[i]['iv'])),\n round(sum(pokemon_data[i]['iv'])/self.max_iv, 2)\n ),\n 'exp': xp[i],\n 'stardust': stardust[i],\n 'candy': candy[i],\n }\n )\n # hatching egg gets exp too!\n inventory.player().exp += xp[i]\n\n def _print_eggs(self):\n if not self.used_incubators:\n return\n\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n\n eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators]\n\n self.emit_event(\n 'next_egg_incubates',\n formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})',\n data={\n 'eggs_left': len(self.eggs),\n 'eggs_inc': len(self.used_incubators),\n 'eggs': ', '.join(eggs)\n }\n )\n\n def _should_print(self):\n \"\"\"\n Returns a value indicating whether the eggs should be displayed.\n :return: True if the stats should be displayed; otherwise, False.\n :rtype: bool\n \"\"\"\n return self.next_update is None or datetime.now() >= self.next_update\n\n def _compute_next_update(self):\n \"\"\"\n Computes the next update datetime based on the minimum update interval.\n :return: Nothing.\n :rtype: None\n \"\"\"\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)\n", "path": "pokemongo_bot/cell_workers/incubate_eggs.py"}]} | 3,884 | 296 |
gh_patches_debug_14129 | rasdani/github-patches | git_diff | freedomofpress__securedrop-237 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Possible path confusion / traversal via imprecise store.verify()
The method `store.verify()` checks file paths provided via URL and other ways and raises an exception if they are not matching the validation criteria.
A problem with this validation process was spotted: `os.path.commonprefix()` is not sufficient to check if the path is inside the configured store path. It only compares character by character. Thus allows to navigate into another folder when they share the same start string.
```
Example: config.STORE_DIR = '/opt/store'
PoC: store.verify('/opt/store_backup')
```
Mitigation has to make sure, that the path is inside the configured store folder. A mitigation could be to add another check in `store.verify()` with `os.path.relpath(p, config.STORE_DIR)`. If the absolute path p is not inside the store directory, `os.path.relpath()` will return a string starting with '../'.
Example:
```
os.path.relpath('/opt/store_backup', config.STORE_DIR) == '../store_backup'
```
**Reported as part of the cure53 audit of 0.2 as: SD-01-006**
</issue>
<code>
[start of securedrop/store.py]
1 # -*- coding: utf-8 -*-
2 import os
3 import re
4 import config
5 import zipfile
6 import crypto_util
7 import uuid
8 import tempfile
9
10 VALIDATE_FILENAME = re.compile(
11 "^(reply-)?[a-f0-9-]+(_msg|_doc\.zip|)\.gpg$").match
12
13
14 class PathException(Exception):
15
16 '''An exception raised by `store.verify` when it encounters a bad path. A path
17 can be bad when it is not absolute, not normalized, not within
18 `config.STORE_DIR`, or doesn't match the filename format.
19 '''
20 pass
21
22
23 def verify(p):
24 '''Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and
25 matches the filename format.
26 '''
27 if not os.path.isabs(config.STORE_DIR):
28 raise PathException("config.STORE_DIR(%s) is not absolute" % (
29 config.STORE_DIR, ))
30
31 # os.path.abspath makes the path absolute and normalizes '/foo/../bar' to
32 # '/bar', etc. We have to check that the path is normalized before checking
33 # that it starts with the `config.STORE_DIR` or else a malicious actor could
34 # append a bunch of '../../..' to access files outside of the store.
35 if not p == os.path.abspath(p):
36 raise PathException("The path is not absolute and/or normalized")
37
38 if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:
39 raise PathException("Invalid directory %s" % (p, ))
40
41 filename = os.path.basename(p)
42 ext = os.path.splitext(filename)[-1]
43
44 if os.path.isfile(p):
45 if filename == '_FLAG':
46 return True
47 if ext != '.gpg':
48 # if there's an extension, verify it's a GPG
49 raise PathException("Invalid file extension %s" % (ext, ))
50 if not VALIDATE_FILENAME(filename):
51 raise PathException("Invalid filename %s" % (filename, ))
52
53
54 def path(*s):
55 '''Get the normalized, absolute file path, within `config.STORE_DIR`.'''
56 joined = os.path.join(os.path.abspath(config.STORE_DIR), *s)
57 absolute = os.path.abspath(joined)
58 verify(absolute)
59 return absolute
60
61
62 def get_bulk_archive(filenames):
63 zip_file = tempfile.NamedTemporaryFile(prefix='tmp_securedrop_bulk_dl_')
64 with zipfile.ZipFile(zip_file, 'w') as zip:
65 for filename in filenames:
66 verify(filename)
67 zip.write(filename, arcname=os.path.basename(filename))
68 return zip_file
69
70
71 def log(msg):
72 file(path('NOTES'), 'a').write(msg)
73
[end of securedrop/store.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/store.py b/securedrop/store.py
--- a/securedrop/store.py
+++ b/securedrop/store.py
@@ -35,13 +35,13 @@
if not p == os.path.abspath(p):
raise PathException("The path is not absolute and/or normalized")
- if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:
+ # Check that the path p is in config.STORE_DIR
+ if os.path.relpath(p, config.STORE_DIR).startswith('..'):
raise PathException("Invalid directory %s" % (p, ))
- filename = os.path.basename(p)
- ext = os.path.splitext(filename)[-1]
-
if os.path.isfile(p):
+ filename = os.path.basename(p)
+ ext = os.path.splitext(filename)[-1]
if filename == '_FLAG':
return True
if ext != '.gpg':
| {"golden_diff": "diff --git a/securedrop/store.py b/securedrop/store.py\n--- a/securedrop/store.py\n+++ b/securedrop/store.py\n@@ -35,13 +35,13 @@\n if not p == os.path.abspath(p):\n raise PathException(\"The path is not absolute and/or normalized\")\n \n- if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:\n+ # Check that the path p is in config.STORE_DIR\n+ if os.path.relpath(p, config.STORE_DIR).startswith('..'):\n raise PathException(\"Invalid directory %s\" % (p, ))\n \n- filename = os.path.basename(p)\n- ext = os.path.splitext(filename)[-1]\n-\n if os.path.isfile(p):\n+ filename = os.path.basename(p)\n+ ext = os.path.splitext(filename)[-1]\n if filename == '_FLAG':\n return True\n if ext != '.gpg':\n", "issue": "Possible path confusion / traversal via imprecise store.verify()\nThe method `store.verify()` checks file paths provided via URL and other ways and raises an exception if they are not matching the validation criteria.\n\nA problem with this validation process was spotted: `os.path.commonprefix()` is not sufficient to check if the path is inside the configured store path. It only compares character by character. Thus allows to navigate into another folder when they share the same start string.\n\n```\nExample: config.STORE_DIR = '/opt/store'\nPoC: store.verify('/opt/store_backup')\n```\n\nMitigation has to make sure, that the path is inside the configured store folder. A mitigation could be to add another check in `store.verify()` with `os.path.relpath(p, config.STORE_DIR)`. If the absolute path p is not inside the store directory, `os.path.relpath()` will return a string starting with '../'.\n\nExample:\n\n```\nos.path.relpath('/opt/store_backup', config.STORE_DIR) == '../store_backup'\n```\n\n**Reported as part of the cure53 audit of 0.2 as: SD-01-006**\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport re\nimport config\nimport zipfile\nimport crypto_util\nimport uuid\nimport tempfile\n\nVALIDATE_FILENAME = re.compile(\n \"^(reply-)?[a-f0-9-]+(_msg|_doc\\.zip|)\\.gpg$\").match\n\n\nclass PathException(Exception):\n\n '''An exception raised by `store.verify` when it encounters a bad path. A path\n can be bad when it is not absolute, not normalized, not within\n `config.STORE_DIR`, or doesn't match the filename format.\n '''\n pass\n\n\ndef verify(p):\n '''Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and\n matches the filename format.\n '''\n if not os.path.isabs(config.STORE_DIR):\n raise PathException(\"config.STORE_DIR(%s) is not absolute\" % (\n config.STORE_DIR, ))\n\n # os.path.abspath makes the path absolute and normalizes '/foo/../bar' to\n # '/bar', etc. We have to check that the path is normalized before checking\n # that it starts with the `config.STORE_DIR` or else a malicious actor could\n # append a bunch of '../../..' to access files outside of the store.\n if not p == os.path.abspath(p):\n raise PathException(\"The path is not absolute and/or normalized\")\n\n if os.path.commonprefix([config.STORE_DIR, p]) != config.STORE_DIR:\n raise PathException(\"Invalid directory %s\" % (p, ))\n\n filename = os.path.basename(p)\n ext = os.path.splitext(filename)[-1]\n\n if os.path.isfile(p):\n if filename == '_FLAG':\n return True\n if ext != '.gpg':\n # if there's an extension, verify it's a GPG\n raise PathException(\"Invalid file extension %s\" % (ext, ))\n if not VALIDATE_FILENAME(filename):\n raise PathException(\"Invalid filename %s\" % (filename, ))\n\n\ndef path(*s):\n '''Get the normalized, absolute file path, within `config.STORE_DIR`.'''\n joined = os.path.join(os.path.abspath(config.STORE_DIR), *s)\n absolute = os.path.abspath(joined)\n verify(absolute)\n return absolute\n\n\ndef get_bulk_archive(filenames):\n zip_file = tempfile.NamedTemporaryFile(prefix='tmp_securedrop_bulk_dl_')\n with zipfile.ZipFile(zip_file, 'w') as zip:\n for filename in filenames:\n verify(filename)\n zip.write(filename, arcname=os.path.basename(filename))\n return zip_file\n\n\ndef log(msg):\n file(path('NOTES'), 'a').write(msg)\n", "path": "securedrop/store.py"}]} | 1,497 | 208 |
gh_patches_debug_26522 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3741 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Assigning group members: memberlist batch navigation is broken.
## groupmembers listing batch and `showAll` link is broken
### What I did:
Assign members to a group:
- click on "show all" in the user filter.
- if you have lots of users the list is batched
- click on the next batch page
### What I expect to happen:
the next user batch list is shown
### What actually happened:
the user list is empty
### What version of Plone/ Addons I am using:
Plone 6.0.2
### Additional
The "toggle all" checkboxes do not work. This can be solved with `pat-checklist` ...
</issue>
<code>
[start of Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py]
1 from Products.CMFCore.utils import getToolByName
2 from Products.CMFPlone import PloneMessageFactory as _
3 from Products.CMFPlone.controlpanel.browser.usergroups import (
4 UsersGroupsControlPanelView,
5 )
6 from Products.CMFPlone.utils import normalizeString
7 from zExceptions import Forbidden
8
9
10 class GroupMembershipControlPanel(UsersGroupsControlPanelView):
11
12 def update(self):
13 self.groupname = getattr(self.request, 'groupname')
14 self.gtool = getToolByName(self, 'portal_groups')
15 self.mtool = getToolByName(self, 'portal_membership')
16 self.group = self.gtool.getGroupById(self.groupname)
17 if self.group is None:
18 return
19
20 self.grouptitle = self.group.getGroupTitleOrName() or self.groupname
21
22 self.request.set('grouproles', self.group.getRoles()
23 if self.group else [])
24 self.canAddUsers = True
25 if 'Manager' in self.request.get('grouproles') and not self.is_zope_manager:
26 self.canAddUsers = False
27
28 self.groupquery = self.makeQuery(groupname=self.groupname)
29 self.groupkeyquery = self.makeQuery(key=self.groupname)
30
31 form = self.request.form
32 submitted = form.get('form.submitted', False)
33
34 self.searchResults = []
35 self.searchString = ''
36 self.newSearch = False
37
38 if submitted:
39 # add/delete before we search so we don't show stale results
40 toAdd = form.get('add', [])
41 if toAdd:
42 if not self.canAddUsers:
43 raise Forbidden
44
45 for u in toAdd:
46 self.gtool.addPrincipalToGroup(
47 u, self.groupname, self.request)
48 self.context.plone_utils.addPortalMessage(_('Changes made.'))
49
50 toDelete = form.get('delete', [])
51 if toDelete:
52 for u in toDelete:
53 self.gtool.removePrincipalFromGroup(
54 u, self.groupname, self.request)
55 self.context.plone_utils.addPortalMessage(_('Changes made.'))
56
57 search = form.get('form.button.Search', None) is not None
58 edit = form.get('form.button.Edit', None) is not None and toDelete
59 add = form.get('form.button.Add', None) is not None and toAdd
60 findAll = form.get('form.button.FindAll', None) is not None and \
61 not self.many_users
62 # The search string should be cleared when one of the
63 # non-search buttons has been clicked.
64 if findAll or edit or add:
65 form['searchstring'] = ''
66 self.searchString = form.get('searchstring', '')
67 if findAll or bool(self.searchString):
68 self.searchResults = self.getPotentialMembers(
69 self.searchString)
70
71 if search or findAll:
72 self.newSearch = True
73
74 self.groupMembers = self.getMembers()
75
76 def __call__(self):
77 self.update()
78 return self.index()
79
80 def isGroup(self, itemName):
81 return self.gtool.isGroup(itemName)
82
83 def getMembers(self):
84 searchResults = self.gtool.getGroupMembers(self.groupname)
85
86 groupResults = []
87 userResults = []
88 for principal_id in searchResults:
89 principal = self.gtool.getGroupById(principal_id)
90 if principal is not None:
91 groupResults.append(principal)
92 continue
93 principal = self.mtool.getMemberById(principal_id)
94 if principal is not None:
95 userResults.append(principal)
96
97 groupResults.sort(key=lambda x: normalizeString(x.getGroupTitleOrName()))
98 userResults.sort(key=lambda x: normalizeString(x.getProperty('fullname') or ''))
99
100 return groupResults + userResults
101
102 def getPotentialMembers(self, searchString):
103 ignoredUsersGroups = [
104 x.id for x in self.getMembers() + [self.group, ] if x is not None]
105 return self.membershipSearch(searchString, ignore=ignoredUsersGroups)
106
[end of Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py
--- a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py
+++ b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py
@@ -57,14 +57,21 @@
search = form.get('form.button.Search', None) is not None
edit = form.get('form.button.Edit', None) is not None and toDelete
add = form.get('form.button.Add', None) is not None and toAdd
- findAll = form.get('form.button.FindAll', None) is not None and \
- not self.many_users
+ isBatched = form.get("b_start", None) is not None
+ findAll = (
+ form.get('form.button.FindAll', None) is not None
+ and not self.many_users
+ )
+ unbatchedAll = (
+ form.get("showAll", "") == "y"
+ and not self.many_users
+ )
# The search string should be cleared when one of the
# non-search buttons has been clicked.
- if findAll or edit or add:
+ if findAll or unbatchedAll or edit or add:
form['searchstring'] = ''
self.searchString = form.get('searchstring', '')
- if findAll or bool(self.searchString):
+ if findAll or isBatched or unbatchedAll or bool(self.searchString):
self.searchResults = self.getPotentialMembers(
self.searchString)
| {"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py\n--- a/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py\n+++ b/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py\n@@ -57,14 +57,21 @@\n search = form.get('form.button.Search', None) is not None\n edit = form.get('form.button.Edit', None) is not None and toDelete\n add = form.get('form.button.Add', None) is not None and toAdd\n- findAll = form.get('form.button.FindAll', None) is not None and \\\n- not self.many_users\n+ isBatched = form.get(\"b_start\", None) is not None\n+ findAll = (\n+ form.get('form.button.FindAll', None) is not None\n+ and not self.many_users\n+ )\n+ unbatchedAll = (\n+ form.get(\"showAll\", \"\") == \"y\"\n+ and not self.many_users\n+ )\n # The search string should be cleared when one of the\n # non-search buttons has been clicked.\n- if findAll or edit or add:\n+ if findAll or unbatchedAll or edit or add:\n form['searchstring'] = ''\n self.searchString = form.get('searchstring', '')\n- if findAll or bool(self.searchString):\n+ if findAll or isBatched or unbatchedAll or bool(self.searchString):\n self.searchResults = self.getPotentialMembers(\n self.searchString)\n", "issue": "Assigning group members: memberlist batch navigation is broken.\n## groupmembers listing batch and `showAll` link is broken\r\n\r\n### What I did:\r\n\r\nAssign members to a group:\r\n\r\n- click on \"show all\" in the user filter.\r\n- if you have lots of users the list is batched\r\n- click on the next batch page\r\n\r\n### What I expect to happen:\r\n\r\nthe next user batch list is shown\r\n\r\n### What actually happened:\r\n\r\nthe user list is empty\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\nPlone 6.0.2\r\n\r\n\r\n### Additional\r\n\r\nThe \"toggle all\" checkboxes do not work. This can be solved with `pat-checklist` ...\n", "before_files": [{"content": "from Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.controlpanel.browser.usergroups import (\n UsersGroupsControlPanelView,\n)\nfrom Products.CMFPlone.utils import normalizeString\nfrom zExceptions import Forbidden\n\n\nclass GroupMembershipControlPanel(UsersGroupsControlPanelView):\n\n def update(self):\n self.groupname = getattr(self.request, 'groupname')\n self.gtool = getToolByName(self, 'portal_groups')\n self.mtool = getToolByName(self, 'portal_membership')\n self.group = self.gtool.getGroupById(self.groupname)\n if self.group is None:\n return\n\n self.grouptitle = self.group.getGroupTitleOrName() or self.groupname\n\n self.request.set('grouproles', self.group.getRoles()\n if self.group else [])\n self.canAddUsers = True\n if 'Manager' in self.request.get('grouproles') and not self.is_zope_manager:\n self.canAddUsers = False\n\n self.groupquery = self.makeQuery(groupname=self.groupname)\n self.groupkeyquery = self.makeQuery(key=self.groupname)\n\n form = self.request.form\n submitted = form.get('form.submitted', False)\n\n self.searchResults = []\n self.searchString = ''\n self.newSearch = False\n\n if submitted:\n # add/delete before we search so we don't show stale results\n toAdd = form.get('add', [])\n if toAdd:\n if not self.canAddUsers:\n raise Forbidden\n\n for u in toAdd:\n self.gtool.addPrincipalToGroup(\n u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_('Changes made.'))\n\n toDelete = form.get('delete', [])\n if toDelete:\n for u in toDelete:\n self.gtool.removePrincipalFromGroup(\n u, self.groupname, self.request)\n self.context.plone_utils.addPortalMessage(_('Changes made.'))\n\n search = form.get('form.button.Search', None) is not None\n edit = form.get('form.button.Edit', None) is not None and toDelete\n add = form.get('form.button.Add', None) is not None and toAdd\n findAll = form.get('form.button.FindAll', None) is not None and \\\n not self.many_users\n # The search string should be cleared when one of the\n # non-search buttons has been clicked.\n if findAll or edit or add:\n form['searchstring'] = ''\n self.searchString = form.get('searchstring', '')\n if findAll or bool(self.searchString):\n self.searchResults = self.getPotentialMembers(\n self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groupMembers = self.getMembers()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def isGroup(self, itemName):\n return self.gtool.isGroup(itemName)\n\n def getMembers(self):\n searchResults = self.gtool.getGroupMembers(self.groupname)\n\n groupResults = []\n userResults = []\n for principal_id in searchResults:\n principal = self.gtool.getGroupById(principal_id)\n if principal is not None:\n groupResults.append(principal)\n continue\n principal = self.mtool.getMemberById(principal_id)\n if principal is not None:\n userResults.append(principal)\n\n groupResults.sort(key=lambda x: normalizeString(x.getGroupTitleOrName()))\n userResults.sort(key=lambda x: normalizeString(x.getProperty('fullname') or ''))\n\n return groupResults + userResults\n\n def getPotentialMembers(self, searchString):\n ignoredUsersGroups = [\n x.id for x in self.getMembers() + [self.group, ] if x is not None]\n return self.membershipSearch(searchString, ignore=ignoredUsersGroups)\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py"}]} | 1,756 | 360 |
gh_patches_debug_2548 | rasdani/github-patches | git_diff | biopython__biopython-4545 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ScanProsite no longer working
### Setup
I am reporting a problem with Biopython version, Python version, and operating
system as follows:
```python
import sys; print(sys.version)
import platform; print(platform.python_implementation()); print(platform.platform())
import Bio; print(Bio.__version__)
```
(*Please copy and run the above in your Python, and copy-and-paste the output*)
3.7.16 (default, Mar 10 2023, 03:25:26)
[GCC 7.3.1 20180712 (Red Hat 7.3.1-15)]
CPython
Linux-5.10.177-158.645.amzn2.x86_64-x86_64-with-glibc2.2.5
1.81
### Expected behaviour
Hi, I have been recently having difficulty with the ScanProsite module. I am wondering if the ScanProsite mirror needs to updated. Thank you in advance.
### Actual behaviour
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.7/urllib/request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib64/python3.7/urllib/request.py", line 531, in open
response = meth(req, response)
File "/usr/lib64/python3.7/urllib/request.py", line 641, in http_response
'http', request, response, code, msg, hdrs)
File "/usr/lib64/python3.7/urllib/request.py", line 569, in error
return self._call_chain(*args)
File "/usr/lib64/python3.7/urllib/request.py", line 503, in _call_chain
result = func(*args)
File "/usr/lib64/python3.7/urllib/request.py", line 649, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 308: Permanent Redirect
### Steps to reproduce
from Bio import ExPASy
from Bio.ExPASy import ScanProsite
import pandas as pd
# Run input sequence through ScanProsite
handle = ScanProsite.scan(sig = sequence, output = 'xml', lineage='Homo sapiens', mirror = 'https://prosite.expasy.org/cgi-bin/prosite/scanprosite/PSScan.cgi')
result = ScanProsite.read(handle)
df = pd.DataFrame(handle)
</issue>
<code>
[start of Bio/ExPASy/ScanProsite.py]
1 # Copyright 2009 by Michiel de Hoon. All rights reserved.
2 # This code is part of the Biopython distribution and governed by its
3 # license. Please see the LICENSE file that should have been included
4 # as part of this package.
5
6 """Code for calling and parsing ScanProsite from ExPASy."""
7
8 # Importing these functions with leading underscore as not intended for reuse
9 from urllib.request import urlopen
10 from urllib.parse import urlencode
11
12 from xml.sax import handler
13 from xml.sax.expatreader import ExpatParser
14
15
16 class Record(list):
17 """Represents search results returned by ScanProsite.
18
19 This record is a list containing the search results returned by
20 ScanProsite. The record also contains the data members n_match,
21 n_seq, capped, and warning.
22 """
23
24 def __init__(self):
25 """Initialize the class."""
26 self.n_match = None
27 self.n_seq = None
28 self.capped = None
29 self.warning = None
30
31
32 # October 28th 2020 it was recognised that between October 10th 2020 and October
33 # 28th the main url of prosite changed from https://www.expasy.org to
34 # https://prosite.expasy.org. Thus a change in the mirror was issued from
35 # https://www.expasy.org to https://prosite.expasy.org.
36 def scan(seq="", mirror="https://prosite.expasy.org", output="xml", **keywords):
37 """Execute a ScanProsite search.
38
39 Arguments:
40 - mirror: The ScanProsite mirror to be used
41 (default: https://prosite.expasy.org).
42 - seq: The query sequence, or UniProtKB (Swiss-Prot,
43 TrEMBL) accession
44 - output: Format of the search results
45 (default: xml)
46
47 Further search parameters can be passed as keywords; see the
48 documentation for programmatic access to ScanProsite at
49 https://prosite.expasy.org/scanprosite/scanprosite_doc.html
50 for a description of such parameters.
51
52 This function returns a handle to the search results returned by
53 ScanProsite. Search results in the XML format can be parsed into a
54 Python object, by using the Bio.ExPASy.ScanProsite.read function.
55
56 """
57 parameters = {"seq": seq, "output": output}
58 for key, value in keywords.items():
59 if value is not None:
60 parameters[key] = value
61 command = urlencode(parameters)
62 url = f"{mirror}/cgi-bin/prosite/PSScan.cgi?{command}"
63 handle = urlopen(url)
64 return handle
65
66
67 def read(handle):
68 """Parse search results returned by ScanProsite into a Python object."""
69 content_handler = ContentHandler()
70 saxparser = Parser()
71 saxparser.setContentHandler(content_handler)
72 saxparser.parse(handle)
73 record = content_handler.record
74 return record
75
76
77 # The classes below are considered private
78
79
80 class Parser(ExpatParser):
81 """Process the result from a ScanProsite search (PRIVATE)."""
82
83 def __init__(self):
84 """Initialize the class."""
85 ExpatParser.__init__(self)
86 self.firsttime = True
87
88 def feed(self, data, isFinal=0):
89 """Raise an Error if plain text is received in the data.
90
91 This is to show the Error messages returned by ScanProsite.
92 """
93 # Error messages returned by the ScanProsite server are formatted as
94 # as plain text instead of an XML document. To catch such error
95 # messages, we override the feed method of the Expat parser.
96 # The error message is (hopefully) contained in the data that was just
97 # fed to the parser.
98 if self.firsttime:
99 if data[:5].decode("utf-8") != "<?xml":
100 raise ValueError(data)
101 self.firsttime = False
102 return ExpatParser.feed(self, data, isFinal)
103
104
105 class ContentHandler(handler.ContentHandler):
106 """Process and fill in the records, results of the search (PRIVATE)."""
107
108 integers = ("start", "stop")
109 strings = (
110 "sequence_ac",
111 "sequence_id",
112 "sequence_db",
113 "signature_ac",
114 "level",
115 "level_tag",
116 )
117
118 def __init__(self):
119 """Initialize the class."""
120 self.element = []
121
122 def startElement(self, name, attrs):
123 """Define the beginning of a record and stores the search record."""
124 self.element.append(name)
125 self.content = ""
126 if self.element == ["matchset"]:
127 self.record = Record()
128 self.record.n_match = int(attrs["n_match"])
129 self.record.n_seq = int(attrs["n_seq"])
130 elif self.element == ["matchset", "match"]:
131 match = {}
132 self.record.append(match)
133
134 def endElement(self, name):
135 """Define the end of the search record."""
136 assert name == self.element.pop()
137 if self.element == ["matchset", "match"]:
138 match = self.record[-1]
139 if name in ContentHandler.integers:
140 match[name] = int(self.content)
141 elif name in ContentHandler.strings:
142 match[name] = self.content
143 else:
144 # Unknown type, treat it as a string
145 match[name] = self.content
146
147 def characters(self, content):
148 """Store the record content."""
149 self.content += content
150
[end of Bio/ExPASy/ScanProsite.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Bio/ExPASy/ScanProsite.py b/Bio/ExPASy/ScanProsite.py
--- a/Bio/ExPASy/ScanProsite.py
+++ b/Bio/ExPASy/ScanProsite.py
@@ -59,7 +59,7 @@
if value is not None:
parameters[key] = value
command = urlencode(parameters)
- url = f"{mirror}/cgi-bin/prosite/PSScan.cgi?{command}"
+ url = f"{mirror}/cgi-bin/prosite/scanprosite/PSScan.cgi?{command}"
handle = urlopen(url)
return handle
| {"golden_diff": "diff --git a/Bio/ExPASy/ScanProsite.py b/Bio/ExPASy/ScanProsite.py\n--- a/Bio/ExPASy/ScanProsite.py\n+++ b/Bio/ExPASy/ScanProsite.py\n@@ -59,7 +59,7 @@\n if value is not None:\n parameters[key] = value\n command = urlencode(parameters)\n- url = f\"{mirror}/cgi-bin/prosite/PSScan.cgi?{command}\"\n+ url = f\"{mirror}/cgi-bin/prosite/scanprosite/PSScan.cgi?{command}\"\n handle = urlopen(url)\n return handle\n", "issue": "ScanProsite no longer working\n### Setup\r\n\r\nI am reporting a problem with Biopython version, Python version, and operating\r\nsystem as follows:\r\n\r\n```python\r\nimport sys; print(sys.version)\r\nimport platform; print(platform.python_implementation()); print(platform.platform())\r\nimport Bio; print(Bio.__version__)\r\n```\r\n\r\n(*Please copy and run the above in your Python, and copy-and-paste the output*)\r\n3.7.16 (default, Mar 10 2023, 03:25:26) \r\n[GCC 7.3.1 20180712 (Red Hat 7.3.1-15)]\r\nCPython\r\nLinux-5.10.177-158.645.amzn2.x86_64-x86_64-with-glibc2.2.5\r\n1.81\r\n\r\n### Expected behaviour\r\n\r\nHi, I have been recently having difficulty with the ScanProsite module. I am wondering if the ScanProsite mirror needs to updated. Thank you in advance.\r\n\r\n### Actual behaviour\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/lib64/python3.7/urllib/request.py\", line 222, in urlopen\r\n return opener.open(url, data, timeout)\r\n File \"/usr/lib64/python3.7/urllib/request.py\", line 531, in open\r\n response = meth(req, response)\r\n File \"/usr/lib64/python3.7/urllib/request.py\", line 641, in http_response\r\n 'http', request, response, code, msg, hdrs)\r\n File \"/usr/lib64/python3.7/urllib/request.py\", line 569, in error\r\n return self._call_chain(*args)\r\n File \"/usr/lib64/python3.7/urllib/request.py\", line 503, in _call_chain\r\n result = func(*args)\r\n File \"/usr/lib64/python3.7/urllib/request.py\", line 649, in http_error_default\r\n raise HTTPError(req.full_url, code, msg, hdrs, fp)\r\nurllib.error.HTTPError: HTTP Error 308: Permanent Redirect\r\n\r\n### Steps to reproduce\r\n\r\n from Bio import ExPASy\r\n from Bio.ExPASy import ScanProsite\r\n import pandas as pd\r\n # Run input sequence through ScanProsite\r\n handle = ScanProsite.scan(sig = sequence, output = 'xml', lineage='Homo sapiens', mirror = 'https://prosite.expasy.org/cgi-bin/prosite/scanprosite/PSScan.cgi')\r\n result = ScanProsite.read(handle)\r\n df = pd.DataFrame(handle)\r\n\n", "before_files": [{"content": "# Copyright 2009 by Michiel de Hoon. All rights reserved.\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"Code for calling and parsing ScanProsite from ExPASy.\"\"\"\n\n# Importing these functions with leading underscore as not intended for reuse\nfrom urllib.request import urlopen\nfrom urllib.parse import urlencode\n\nfrom xml.sax import handler\nfrom xml.sax.expatreader import ExpatParser\n\n\nclass Record(list):\n \"\"\"Represents search results returned by ScanProsite.\n\n This record is a list containing the search results returned by\n ScanProsite. The record also contains the data members n_match,\n n_seq, capped, and warning.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n self.n_match = None\n self.n_seq = None\n self.capped = None\n self.warning = None\n\n\n# October 28th 2020 it was recognised that between October 10th 2020 and October\n# 28th the main url of prosite changed from https://www.expasy.org to\n# https://prosite.expasy.org. Thus a change in the mirror was issued from\n# https://www.expasy.org to https://prosite.expasy.org.\ndef scan(seq=\"\", mirror=\"https://prosite.expasy.org\", output=\"xml\", **keywords):\n \"\"\"Execute a ScanProsite search.\n\n Arguments:\n - mirror: The ScanProsite mirror to be used\n (default: https://prosite.expasy.org).\n - seq: The query sequence, or UniProtKB (Swiss-Prot,\n TrEMBL) accession\n - output: Format of the search results\n (default: xml)\n\n Further search parameters can be passed as keywords; see the\n documentation for programmatic access to ScanProsite at\n https://prosite.expasy.org/scanprosite/scanprosite_doc.html\n for a description of such parameters.\n\n This function returns a handle to the search results returned by\n ScanProsite. Search results in the XML format can be parsed into a\n Python object, by using the Bio.ExPASy.ScanProsite.read function.\n\n \"\"\"\n parameters = {\"seq\": seq, \"output\": output}\n for key, value in keywords.items():\n if value is not None:\n parameters[key] = value\n command = urlencode(parameters)\n url = f\"{mirror}/cgi-bin/prosite/PSScan.cgi?{command}\"\n handle = urlopen(url)\n return handle\n\n\ndef read(handle):\n \"\"\"Parse search results returned by ScanProsite into a Python object.\"\"\"\n content_handler = ContentHandler()\n saxparser = Parser()\n saxparser.setContentHandler(content_handler)\n saxparser.parse(handle)\n record = content_handler.record\n return record\n\n\n# The classes below are considered private\n\n\nclass Parser(ExpatParser):\n \"\"\"Process the result from a ScanProsite search (PRIVATE).\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n ExpatParser.__init__(self)\n self.firsttime = True\n\n def feed(self, data, isFinal=0):\n \"\"\"Raise an Error if plain text is received in the data.\n\n This is to show the Error messages returned by ScanProsite.\n \"\"\"\n # Error messages returned by the ScanProsite server are formatted as\n # as plain text instead of an XML document. To catch such error\n # messages, we override the feed method of the Expat parser.\n # The error message is (hopefully) contained in the data that was just\n # fed to the parser.\n if self.firsttime:\n if data[:5].decode(\"utf-8\") != \"<?xml\":\n raise ValueError(data)\n self.firsttime = False\n return ExpatParser.feed(self, data, isFinal)\n\n\nclass ContentHandler(handler.ContentHandler):\n \"\"\"Process and fill in the records, results of the search (PRIVATE).\"\"\"\n\n integers = (\"start\", \"stop\")\n strings = (\n \"sequence_ac\",\n \"sequence_id\",\n \"sequence_db\",\n \"signature_ac\",\n \"level\",\n \"level_tag\",\n )\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n self.element = []\n\n def startElement(self, name, attrs):\n \"\"\"Define the beginning of a record and stores the search record.\"\"\"\n self.element.append(name)\n self.content = \"\"\n if self.element == [\"matchset\"]:\n self.record = Record()\n self.record.n_match = int(attrs[\"n_match\"])\n self.record.n_seq = int(attrs[\"n_seq\"])\n elif self.element == [\"matchset\", \"match\"]:\n match = {}\n self.record.append(match)\n\n def endElement(self, name):\n \"\"\"Define the end of the search record.\"\"\"\n assert name == self.element.pop()\n if self.element == [\"matchset\", \"match\"]:\n match = self.record[-1]\n if name in ContentHandler.integers:\n match[name] = int(self.content)\n elif name in ContentHandler.strings:\n match[name] = self.content\n else:\n # Unknown type, treat it as a string\n match[name] = self.content\n\n def characters(self, content):\n \"\"\"Store the record content.\"\"\"\n self.content += content\n", "path": "Bio/ExPASy/ScanProsite.py"}]} | 2,677 | 147 |
gh_patches_debug_3110 | rasdani/github-patches | git_diff | kserve__kserve-2018 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KServe 0.8 release tracking
/kind feature
**Describe the solution you'd like**
KServe 0.8 release tracking:
RC release Date: 12/30/2021
Release Date: 1/14/2021
KServe Model Serving:
- [x] torchserve v2 protocol
- https://github.com/kserve/kserve/pull/1870 @jagadeeshi2i
- [X] Transformer -> Predictor gRPC support
- https://github.com/kserve/kserve/pull/1933
- [X] MLServer 0.5 update
- https://github.com/kserve/kserve/pull/1853 @adriangonz
- [X] Scikit-Learn 1.0.1 and XGBoost 1.5.0 upgrade
- https://github.com/kserve/kserve/pull/1954 @yuzisun
- [X] Introduce ServingRuntime to single model serving @pvaneck @Suresh-Nakkeran
- https://github.com/kserve/kserve/pull/1901
- https://github.com/kserve/kserve/pull/1926
- [ ] Introduce new storage spec @Tomcli
- https://github.com/kserve/kserve/pull/1899
- [X] Storage initializer fixes
- https://github.com/kserve/kserve/pull/1883
- https://github.com/kserve/kserve/pull/1940
- [X] Helm chart for KServe and ModelMesh @yuzisun
- https://github.com/kserve/kserve/pull/1878
- [X] KServe SDK features and fixes
- https://github.com/kserve/kserve/pull/1949 @markwinter
- https://github.com/kserve/kserve/pull/1934 @markwinter
- https://github.com/kserve/kserve/pull/1918 @markwinter
ModelMesh:
- [X] Multi-namespace support for ModelMesh
- [X] Improve rest proxy support
- https://github.com/kserve/rest-proxy/pull/6
Models UI:
- [ ] Models Web App KServe migration @kimwnasptd
Website:
- [ ] Website doc update
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
</issue>
<code>
[start of python/kserve/setup.py]
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import setuptools
16
17 TESTS_REQUIRES = [
18 'pytest',
19 'pytest-xdist',
20 'pytest-cov',
21 'pytest-asyncio',
22 'pytest-tornasync',
23 'mypy'
24 ]
25
26 with open('requirements.txt') as f:
27 REQUIRES = f.readlines()
28
29 setuptools.setup(
30 name='kserve',
31 version='0.8.0rc0',
32 author="The KServe Authors",
33 author_email='[email protected], [email protected], [email protected]',
34 license="Apache License Version 2.0",
35 url="https://github.com/kserve/kserve/tree/master/python/kserve",
36 description="KServe Python SDK",
37 long_description="Python SDK for KServe Server and Client.",
38 python_requires='>=3.6',
39 packages=[
40 'kserve',
41 'kserve.api',
42 'kserve.constants',
43 'kserve.models',
44 'kserve.handlers',
45 'kserve.utils',
46 ],
47 package_data={'': ['requirements.txt']},
48 include_package_data=True,
49 zip_safe=False,
50 classifiers=[
51 'Intended Audience :: Developers',
52 'Intended Audience :: Education',
53 'Intended Audience :: Science/Research',
54 'Programming Language :: Python :: 3',
55 'Programming Language :: Python :: 3.6',
56 'Programming Language :: Python :: 3.7',
57 "License :: OSI Approved :: Apache Software License",
58 "Operating System :: OS Independent",
59 'Topic :: Scientific/Engineering',
60 'Topic :: Scientific/Engineering :: Artificial Intelligence',
61 'Topic :: Software Development',
62 'Topic :: Software Development :: Libraries',
63 'Topic :: Software Development :: Libraries :: Python Modules',
64 ],
65 install_requires=REQUIRES,
66 tests_require=TESTS_REQUIRES,
67 extras_require={'test': TESTS_REQUIRES}
68 )
69
[end of python/kserve/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/kserve/setup.py b/python/kserve/setup.py
--- a/python/kserve/setup.py
+++ b/python/kserve/setup.py
@@ -28,7 +28,7 @@
setuptools.setup(
name='kserve',
- version='0.8.0rc0',
+ version='0.8.0',
author="The KServe Authors",
author_email='[email protected], [email protected], [email protected]',
license="Apache License Version 2.0",
| {"golden_diff": "diff --git a/python/kserve/setup.py b/python/kserve/setup.py\n--- a/python/kserve/setup.py\n+++ b/python/kserve/setup.py\n@@ -28,7 +28,7 @@\n \n setuptools.setup(\n name='kserve',\n- version='0.8.0rc0',\n+ version='0.8.0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n", "issue": "KServe 0.8 release tracking\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\nKServe 0.8 release tracking:\r\nRC release Date: 12/30/2021\r\nRelease Date: 1/14/2021\r\n\r\nKServe Model Serving:\r\n- [x] torchserve v2 protocol\r\n - https://github.com/kserve/kserve/pull/1870 @jagadeeshi2i \r\n- [X] Transformer -> Predictor gRPC support\r\n - https://github.com/kserve/kserve/pull/1933\r\n- [X] MLServer 0.5 update\r\n - https://github.com/kserve/kserve/pull/1853 @adriangonz \r\n- [X] Scikit-Learn 1.0.1 and XGBoost 1.5.0 upgrade\r\n - https://github.com/kserve/kserve/pull/1954 @yuzisun \r\n- [X] Introduce ServingRuntime to single model serving @pvaneck @Suresh-Nakkeran \r\n - https://github.com/kserve/kserve/pull/1901\r\n - https://github.com/kserve/kserve/pull/1926\r\n- [ ] Introduce new storage spec @Tomcli \r\n - https://github.com/kserve/kserve/pull/1899\r\n- [X] Storage initializer fixes\r\n - https://github.com/kserve/kserve/pull/1883\r\n - https://github.com/kserve/kserve/pull/1940\r\n- [X] Helm chart for KServe and ModelMesh @yuzisun \r\n - https://github.com/kserve/kserve/pull/1878\r\n- [X] KServe SDK features and fixes\r\n - https://github.com/kserve/kserve/pull/1949 @markwinter \r\n - https://github.com/kserve/kserve/pull/1934 @markwinter \r\n - https://github.com/kserve/kserve/pull/1918 @markwinter \r\n\r\nModelMesh:\r\n- [X] Multi-namespace support for ModelMesh\r\n- [X] Improve rest proxy support\r\n - https://github.com/kserve/rest-proxy/pull/6\r\n\r\nModels UI:\r\n- [ ] Models Web App KServe migration @kimwnasptd \r\n \r\n \r\nWebsite: \r\n- [ ] Website doc update\r\n\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kserve',\n version='0.8.0rc0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.6',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}]} | 1,748 | 124 |
gh_patches_debug_22124 | rasdani/github-patches | git_diff | fossasia__open-event-server-5566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Session Export CSV does not include all data
The Session Export should export all data sets that are available e.g. including:
* Submission time
* All speakers
* Proposed length
* Type (Workshop, Talk)
* Level (e.g. Intermediate)
* Status (e.g. pending, accepted etc.)

</issue>
<code>
[start of app/api/helpers/csv_jobs_util.py]
1 from app.models.helpers.versioning import strip_tags
2
3
4 def export_orders_csv(orders):
5 headers = ['Order#', 'Order Date', 'Status', 'Payment Type', 'Total Amount', 'Quantity',
6 'Discount Code', 'First Name', 'Last Name', 'Email']
7
8 rows = [headers]
9 for order in orders:
10 if order.status != "deleted":
11 column = [str(order.get_invoice_number()), str(order.created_at) if order.created_at else '',
12 str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',
13 str(order.amount) if order.amount else '', str(order.tickets_count),
14 str(order.discount_code.code) if order.discount_code else '',
15 str(order.user.first_name)
16 if order.user and order.user.first_name else '',
17 str(order.user.last_name)
18 if order.user and order.user.last_name else '',
19 str(order.user.email) if order.user and order.user.email else '']
20 rows.append(column)
21
22 return rows
23
24
25 def export_attendees_csv(attendees):
26 headers = ['Order#', 'Order Date', 'Status', 'First Name', 'Last Name', 'Email',
27 'Country', 'Payment Type', 'Ticket Name', 'Ticket Price', 'Ticket Type']
28
29 rows = [headers]
30 for attendee in attendees:
31 column = [str(attendee.order.get_invoice_number()) if attendee.order else '-',
32 str(attendee.order.created_at) if attendee.order and attendee.order.created_at else '-',
33 str(attendee.order.status) if attendee.order and attendee.order.status else '-',
34 str(attendee.firstname) if attendee.firstname else '',
35 str(attendee.lastname) if attendee.lastname else '',
36 str(attendee.email) if attendee.email else '',
37 str(attendee.country) if attendee.country else '',
38 str(attendee.order.payment_mode) if attendee.order and attendee.order.payment_mode else '',
39 str(attendee.ticket.name) if attendee.ticket and attendee.ticket.name else '',
40 str(attendee.ticket.price) if attendee.ticket and attendee.ticket.price else '0',
41 str(attendee.ticket.type) if attendee.ticket and attendee.ticket.type else '']
42
43 rows.append(column)
44
45 return rows
46
47
48 def export_sessions_csv(sessions):
49 headers = ['Session Title', 'Session Speakers',
50 'Session Track', 'Session Abstract', 'Created At', 'Email Sent']
51 rows = [headers]
52 for session in sessions:
53 if not session.deleted_at:
54 column = [session.title + ' (' + session.state + ')' if session.title else '']
55 if session.speakers:
56 in_session = ''
57 for speaker in session.speakers:
58 if speaker.name:
59 in_session += (speaker.name + '; ')
60 column.append(in_session[:-2])
61 else:
62 column.append('')
63 column.append(session.track.name if session.track and session.track.name else '')
64 column.append(strip_tags(session.short_abstract) if session.short_abstract else '')
65 column.append(session.created_at if session.created_at else '')
66 column.append('Yes' if session.is_mail_sent else 'No')
67 rows.append(column)
68
69 return rows
70
71
72 def export_speakers_csv(speakers):
73 headers = ['Speaker Name', 'Speaker Email', 'Speaker Session(s)',
74 'Speaker Mobile', 'Speaker Bio', 'Speaker Organisation', 'Speaker Position']
75 rows = [headers]
76 for speaker in speakers:
77 column = [speaker.name if speaker.name else '', speaker.email if speaker.email else '']
78 if speaker.sessions:
79 session_details = ''
80 for session in speaker.sessions:
81 if not session.deleted_at:
82 session_details += session.title + ' (' + session.state + '); '
83 column.append(session_details[:-2])
84 else:
85 column.append('')
86 column.append(speaker.mobile if speaker.mobile else '')
87 column.append(speaker.short_biography if speaker.short_biography else '')
88 column.append(speaker.organisation if speaker.organisation else '')
89 column.append(speaker.position if speaker.position else '')
90 rows.append(column)
91
92 return rows
93
[end of app/api/helpers/csv_jobs_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/helpers/csv_jobs_util.py b/app/api/helpers/csv_jobs_util.py
--- a/app/api/helpers/csv_jobs_util.py
+++ b/app/api/helpers/csv_jobs_util.py
@@ -47,7 +47,8 @@
def export_sessions_csv(sessions):
headers = ['Session Title', 'Session Speakers',
- 'Session Track', 'Session Abstract', 'Created At', 'Email Sent']
+ 'Session Track', 'Session Abstract', 'Created At', 'Email Sent',
+ 'Level', 'Status', 'Session Type', 'Talk Length']
rows = [headers]
for session in sessions:
if not session.deleted_at:
@@ -64,6 +65,10 @@
column.append(strip_tags(session.short_abstract) if session.short_abstract else '')
column.append(session.created_at if session.created_at else '')
column.append('Yes' if session.is_mail_sent else 'No')
+ column.append(session.level)
+ column.append(session.state)
+ column.append(session.type)
+ column.append(len(session.long_abstract))
rows.append(column)
return rows
| {"golden_diff": "diff --git a/app/api/helpers/csv_jobs_util.py b/app/api/helpers/csv_jobs_util.py\n--- a/app/api/helpers/csv_jobs_util.py\n+++ b/app/api/helpers/csv_jobs_util.py\n@@ -47,7 +47,8 @@\n \n def export_sessions_csv(sessions):\n headers = ['Session Title', 'Session Speakers',\n- 'Session Track', 'Session Abstract', 'Created At', 'Email Sent']\n+ 'Session Track', 'Session Abstract', 'Created At', 'Email Sent',\n+ 'Level', 'Status', 'Session Type', 'Talk Length']\n rows = [headers]\n for session in sessions:\n if not session.deleted_at:\n@@ -64,6 +65,10 @@\n column.append(strip_tags(session.short_abstract) if session.short_abstract else '')\n column.append(session.created_at if session.created_at else '')\n column.append('Yes' if session.is_mail_sent else 'No')\n+ column.append(session.level)\n+ column.append(session.state)\n+ column.append(session.type)\n+ column.append(len(session.long_abstract))\n rows.append(column)\n \n return rows\n", "issue": "Session Export CSV does not include all data \nThe Session Export should export all data sets that are available e.g. including:\r\n* Submission time\r\n* All speakers\r\n* Proposed length\r\n* Type (Workshop, Talk)\r\n* Level (e.g. Intermediate)\r\n* Status (e.g. pending, accepted etc.)\r\n\r\n\n", "before_files": [{"content": "from app.models.helpers.versioning import strip_tags\n\n\ndef export_orders_csv(orders):\n headers = ['Order#', 'Order Date', 'Status', 'Payment Type', 'Total Amount', 'Quantity',\n 'Discount Code', 'First Name', 'Last Name', 'Email']\n\n rows = [headers]\n for order in orders:\n if order.status != \"deleted\":\n column = [str(order.get_invoice_number()), str(order.created_at) if order.created_at else '',\n str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',\n str(order.amount) if order.amount else '', str(order.tickets_count),\n str(order.discount_code.code) if order.discount_code else '',\n str(order.user.first_name)\n if order.user and order.user.first_name else '',\n str(order.user.last_name)\n if order.user and order.user.last_name else '',\n str(order.user.email) if order.user and order.user.email else '']\n rows.append(column)\n\n return rows\n\n\ndef export_attendees_csv(attendees):\n headers = ['Order#', 'Order Date', 'Status', 'First Name', 'Last Name', 'Email',\n 'Country', 'Payment Type', 'Ticket Name', 'Ticket Price', 'Ticket Type']\n\n rows = [headers]\n for attendee in attendees:\n column = [str(attendee.order.get_invoice_number()) if attendee.order else '-',\n str(attendee.order.created_at) if attendee.order and attendee.order.created_at else '-',\n str(attendee.order.status) if attendee.order and attendee.order.status else '-',\n str(attendee.firstname) if attendee.firstname else '',\n str(attendee.lastname) if attendee.lastname else '',\n str(attendee.email) if attendee.email else '',\n str(attendee.country) if attendee.country else '',\n str(attendee.order.payment_mode) if attendee.order and attendee.order.payment_mode else '',\n str(attendee.ticket.name) if attendee.ticket and attendee.ticket.name else '',\n str(attendee.ticket.price) if attendee.ticket and attendee.ticket.price else '0',\n str(attendee.ticket.type) if attendee.ticket and attendee.ticket.type else '']\n\n rows.append(column)\n\n return rows\n\n\ndef export_sessions_csv(sessions):\n headers = ['Session Title', 'Session Speakers',\n 'Session Track', 'Session Abstract', 'Created At', 'Email Sent']\n rows = [headers]\n for session in sessions:\n if not session.deleted_at:\n column = [session.title + ' (' + session.state + ')' if session.title else '']\n if session.speakers:\n in_session = ''\n for speaker in session.speakers:\n if speaker.name:\n in_session += (speaker.name + '; ')\n column.append(in_session[:-2])\n else:\n column.append('')\n column.append(session.track.name if session.track and session.track.name else '')\n column.append(strip_tags(session.short_abstract) if session.short_abstract else '')\n column.append(session.created_at if session.created_at else '')\n column.append('Yes' if session.is_mail_sent else 'No')\n rows.append(column)\n\n return rows\n\n\ndef export_speakers_csv(speakers):\n headers = ['Speaker Name', 'Speaker Email', 'Speaker Session(s)',\n 'Speaker Mobile', 'Speaker Bio', 'Speaker Organisation', 'Speaker Position']\n rows = [headers]\n for speaker in speakers:\n column = [speaker.name if speaker.name else '', speaker.email if speaker.email else '']\n if speaker.sessions:\n session_details = ''\n for session in speaker.sessions:\n if not session.deleted_at:\n session_details += session.title + ' (' + session.state + '); '\n column.append(session_details[:-2])\n else:\n column.append('')\n column.append(speaker.mobile if speaker.mobile else '')\n column.append(speaker.short_biography if speaker.short_biography else '')\n column.append(speaker.organisation if speaker.organisation else '')\n column.append(speaker.position if speaker.position else '')\n rows.append(column)\n\n return rows\n", "path": "app/api/helpers/csv_jobs_util.py"}]} | 1,722 | 237 |
gh_patches_debug_10825 | rasdani/github-patches | git_diff | chainer__chainer-601 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
chainer.functions.Parameter cannot accept cupy.ndarray
```
In [1]: import numpy, chainer, cupy
In [2]: p = chainer.functions.Parameter(numpy.arange(12, dtype=numpy.float32))
In [3]: p = chainer.functions.Parameter(cupy.arange(12, dtype=numpy.float32))
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-3-3bee41ef9fca> in <module>()
----> 1 p = chainer.functions.Parameter(cupy.arange(12, dtype=numpy.float32))
/home/delta/dev/chainer2/chainer/functions/connection/parameter.py in __init__(self, array)
21 def __init__(self, array):
22 self.W = array
---> 23 self.gW = numpy.full_like(array, numpy.nan)
24
25 def __call__(self, volatile=False):
/home/delta/.pyenv/versions/pyenv-2.7.9/lib/python2.7/site-packages/numpy/core/numeric.pyc in full_like(a, fill_value, dtype, order, subok)
344
345 """
--> 346 res = empty_like(a, dtype=dtype, order=order, subok=subok)
347 multiarray.copyto(res, fill_value, casting='unsafe')
348 return res
ValueError: object __array__ method not producing an array
```
</issue>
<code>
[start of chainer/functions/connection/parameter.py]
1 import numpy
2
3 from chainer import function
4 from chainer.utils import type_check
5
6
7 class Parameter(function.Function):
8
9 """Function that outputs its weight array.
10
11 This is a parameterized function that takes no input and returns a variable
12 holding a shallow copy of the parameter array.
13
14 Args:
15 array: Initial parameter array.
16
17 """
18 parameter_names = 'W',
19 gradient_names = 'gW',
20
21 def __init__(self, array):
22 self.W = array
23 self.gW = numpy.full_like(array, numpy.nan)
24
25 def __call__(self, volatile=False):
26 ret = super(Parameter, self).__call__()
27 if volatile:
28 ret.unchain_backward()
29 ret.volatile = volatile
30 return ret
31
32 def check_type_forward(self, in_types):
33 type_check.expect(in_types.size() == 0)
34
35 def forward(self, x):
36 return self.W,
37
38 def backward(self, x, gy):
39 self.gW += gy[0]
40 return ()
41
[end of chainer/functions/connection/parameter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/connection/parameter.py b/chainer/functions/connection/parameter.py
--- a/chainer/functions/connection/parameter.py
+++ b/chainer/functions/connection/parameter.py
@@ -1,5 +1,6 @@
import numpy
+from chainer import cuda
from chainer import function
from chainer.utils import type_check
@@ -20,7 +21,8 @@
def __init__(self, array):
self.W = array
- self.gW = numpy.full_like(array, numpy.nan)
+ xp = cuda.get_array_module(array)
+ self.gW = xp.full_like(self.W, numpy.nan)
def __call__(self, volatile=False):
ret = super(Parameter, self).__call__()
| {"golden_diff": "diff --git a/chainer/functions/connection/parameter.py b/chainer/functions/connection/parameter.py\n--- a/chainer/functions/connection/parameter.py\n+++ b/chainer/functions/connection/parameter.py\n@@ -1,5 +1,6 @@\n import numpy\n \n+from chainer import cuda\n from chainer import function\n from chainer.utils import type_check\n \n@@ -20,7 +21,8 @@\n \n def __init__(self, array):\n self.W = array\n- self.gW = numpy.full_like(array, numpy.nan)\n+ xp = cuda.get_array_module(array)\n+ self.gW = xp.full_like(self.W, numpy.nan)\n \n def __call__(self, volatile=False):\n ret = super(Parameter, self).__call__()\n", "issue": "chainer.functions.Parameter cannot accept cupy.ndarray\n```\nIn [1]: import numpy, chainer, cupy\nIn [2]: p = chainer.functions.Parameter(numpy.arange(12, dtype=numpy.float32))\nIn [3]: p = chainer.functions.Parameter(cupy.arange(12, dtype=numpy.float32))\n---------------------------------------------------------------------------\nValueError Traceback (most recent call last)\n<ipython-input-3-3bee41ef9fca> in <module>()\n----> 1 p = chainer.functions.Parameter(cupy.arange(12, dtype=numpy.float32))\n\n/home/delta/dev/chainer2/chainer/functions/connection/parameter.py in __init__(self, array)\n 21 def __init__(self, array):\n 22 self.W = array\n---> 23 self.gW = numpy.full_like(array, numpy.nan)\n 24 \n 25 def __call__(self, volatile=False):\n\n/home/delta/.pyenv/versions/pyenv-2.7.9/lib/python2.7/site-packages/numpy/core/numeric.pyc in full_like(a, fill_value, dtype, order, subok)\n 344 \n 345 \"\"\"\n--> 346 res = empty_like(a, dtype=dtype, order=order, subok=subok)\n 347 multiarray.copyto(res, fill_value, casting='unsafe')\n 348 return res\n\nValueError: object __array__ method not producing an array\n```\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Parameter(function.Function):\n\n \"\"\"Function that outputs its weight array.\n\n This is a parameterized function that takes no input and returns a variable\n holding a shallow copy of the parameter array.\n\n Args:\n array: Initial parameter array.\n\n \"\"\"\n parameter_names = 'W',\n gradient_names = 'gW',\n\n def __init__(self, array):\n self.W = array\n self.gW = numpy.full_like(array, numpy.nan)\n\n def __call__(self, volatile=False):\n ret = super(Parameter, self).__call__()\n if volatile:\n ret.unchain_backward()\n ret.volatile = volatile\n return ret\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 0)\n\n def forward(self, x):\n return self.W,\n\n def backward(self, x, gy):\n self.gW += gy[0]\n return ()\n", "path": "chainer/functions/connection/parameter.py"}]} | 1,170 | 162 |
gh_patches_debug_17745 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1675 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add new method to return the columns for MySQL datasources :electric_plug: :1234:
When MindsDB creates a new MySQL datasource we get information for columns by fetching all datasources. The problem here is that if datasource is big it takes a lot of time. We need a new get_columns method to return the columns name per datasource. The PR should include this method inside the MySQL class .
## Steps :male_detective: :female_detective:
- Implement in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51
- Push to staging branch
## Additional rewards :1st_place_medal:
Each code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/
</issue>
<code>
[start of mindsdb/integrations/mysql/mysql.py]
1 import os
2 import shutil
3 import tempfile
4
5 from contextlib import closing
6 import mysql.connector
7
8 from lightwood.api import dtype
9 from mindsdb.integrations.base import Integration
10 from mindsdb.utilities.log import log
11
12
13 class MySQLConnectionChecker:
14 def __init__(self, **kwargs):
15 self.host = kwargs.get('host')
16 self.port = kwargs.get('port')
17 self.user = kwargs.get('user')
18 self.password = kwargs.get('password')
19 self.ssl = kwargs.get('ssl')
20 self.ssl_ca = kwargs.get('ssl_ca')
21 self.ssl_cert = kwargs.get('ssl_cert')
22 self.ssl_key = kwargs.get('ssl_key')
23
24 def _get_connnection(self):
25 config = {
26 "host": self.host,
27 "port": self.port,
28 "user": self.user,
29 "password": self.password
30 }
31 if self.ssl is True:
32 config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]
33 if self.ssl_ca is not None:
34 config["ssl_ca"] = self.ssl_ca
35 if self.ssl_cert is not None:
36 config["ssl_cert"] = self.ssl_cert
37 if self.ssl_key is not None:
38 config["ssl_key"] = self.ssl_key
39 return mysql.connector.connect(**config)
40
41 def check_connection(self):
42 try:
43 con = self._get_connnection()
44 with closing(con) as con:
45 connected = con.is_connected()
46 except Exception:
47 connected = False
48 return connected
49
50
51 class MySQL(Integration, MySQLConnectionChecker):
52 def __init__(self, config, name, db_info):
53 super().__init__(config, name)
54 self.user = db_info.get('user')
55 self.password = db_info.get('password')
56 self.host = db_info.get('host')
57 self.port = db_info.get('port')
58 self.ssl = db_info.get('ssl')
59 self.ssl_ca = db_info.get('ssl_ca')
60 self.ssl_cert = db_info.get('ssl_cert')
61 self.ssl_key = db_info.get('ssl_key')
62
63 def _to_mysql_table(self, dtype_dict, predicted_cols, columns):
64 subtype_map = {
65 dtype.integer: 'int',
66 dtype.float: 'double',
67 dtype.binary: 'bool',
68 dtype.date: 'Date',
69 dtype.datetime: 'Datetime',
70 dtype.binary: 'VARCHAR(500)',
71 dtype.categorical: 'VARCHAR(500)',
72 dtype.tags: 'VARCHAR(500)',
73 dtype.image: 'VARCHAR(500)',
74 dtype.video: 'VARCHAR(500)',
75 dtype.audio: 'VARCHAR(500)',
76 dtype.short_text: 'VARCHAR(500)',
77 dtype.rich_text: 'VARCHAR(500)',
78 dtype.array: 'VARCHAR(500)'
79 }
80
81 column_declaration = []
82 for name in columns:
83 try:
84 col_subtype = dtype_dict[name]
85 new_type = subtype_map[col_subtype]
86 column_declaration.append(f' `{name}` {new_type} ')
87 if name in predicted_cols:
88 column_declaration.append(f' `{name}_original` {new_type} ')
89 except Exception as e:
90 log.error(f'Error: can not determine mysql data type for column {name}: {e}')
91
92 return column_declaration
93
94 def _escape_table_name(self, name):
95 return '`' + name.replace('`', '``') + '`'
96
97 def _query(self, query):
98 con = self._get_connnection()
99 with closing(con) as con:
100 cur = con.cursor(dictionary=True, buffered=True)
101 cur.execute(query)
102 res = True
103 try:
104 res = cur.fetchall()
105 except Exception:
106 pass
107 con.commit()
108
109 return res
110
111 def _get_connect_string(self, table):
112 user = f"{self.config['api']['mysql']['user']}_{self.name}"
113 password = self.config['api']['mysql']['password']
114 host = self.config['api']['mysql']['host']
115 port = self.config['api']['mysql']['port']
116
117 if password is None or password == '':
118 connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'
119 else:
120 connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'
121
122 return connect
123
124 def setup(self):
125 self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')
126 self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')
127
128 connect = self._get_connect_string('predictors')
129
130 q = f"""
131 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (
132 name VARCHAR(500),
133 status VARCHAR(500),
134 accuracy VARCHAR(500),
135 predict VARCHAR(500),
136 select_data_query VARCHAR(500),
137 external_datasource VARCHAR(500),
138 training_options VARCHAR(500),
139 key name_key (name)
140 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
141 """
142 self._query(q)
143
144 connect = self._get_connect_string('commands')
145
146 q = f"""
147 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (
148 command VARCHAR(500),
149 key command_key (command)
150 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
151 """
152 self._query(q)
153
154 def register_predictors(self, model_data_arr):
155 for model_meta in model_data_arr:
156 name = model_meta['name']
157 predict = model_meta['predict']
158 if not isinstance(predict, list):
159 predict = [predict]
160 columns_sql = ','.join(self._to_mysql_table(
161 model_meta['dtype_dict'],
162 predict,
163 list(model_meta['dtype_dict'].keys())
164 ))
165 columns_sql += ',`when_data` varchar(500)'
166 columns_sql += ',`select_data_query` varchar(500)'
167 columns_sql += ',`external_datasource` varchar(500)'
168 for col in predict:
169 columns_sql += f',`{col}_confidence` double'
170 if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):
171 columns_sql += f',`{col}_min` double'
172 columns_sql += f',`{col}_max` double'
173 columns_sql += f',`{col}_explain` varchar(500)'
174
175 connect = self._get_connect_string(name)
176
177 self.unregister_predictor(name)
178 q = f"""
179 CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (
180 {columns_sql},
181 index when_data_index (when_data),
182 index select_data_query_index (select_data_query),
183 index external_datasource_index (external_datasource)
184 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
185 """
186 self._query(q)
187
188 def unregister_predictor(self, name):
189 q = f"""
190 drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};
191 """
192 self._query(q)
193
194 def get_row_count(self, query):
195 q = f"""
196 SELECT COUNT(*) as count
197 FROM ({query}) as query;"""
198 result = self._query(q)
199 return result[0]['count']
200
201 def get_columns(self):
202 q = f"""SELECT COLUMN_NAME ,TABLE_NAME
203 FROM INFORMATION_SCHEMA.COLUMNS
204 WHERE TABLE_SCHEMA = database()
205 ORDER BY COLUMN_NAME, TABLE_NAME;"""
206 columns_list = self._query(q)
207 columns = [f"{columns[0]}.{columns[1]}" for columns in columns_list]
208 return columns
209
210 def get_tables_list(self):
211 q= f"""
212 SHOW TABLES;
213 """
214 result = self._query(q)
215 return result
[end of mindsdb/integrations/mysql/mysql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py
--- a/mindsdb/integrations/mysql/mysql.py
+++ b/mindsdb/integrations/mysql/mysql.py
@@ -198,18 +198,18 @@
result = self._query(q)
return result[0]['count']
- def get_columns(self):
- q = f"""SELECT COLUMN_NAME ,TABLE_NAME
- FROM INFORMATION_SCHEMA.COLUMNS
- WHERE TABLE_SCHEMA = database()
- ORDER BY COLUMN_NAME, TABLE_NAME;"""
- columns_list = self._query(q)
- columns = [f"{columns[0]}.{columns[1]}" for columns in columns_list]
- return columns
+ def get_columns(self,query):
+ q = f"""SELECT * from ({query}) LIMIT 1;"""
+ query_response = self._query(q)
+ if len(query_response) > 0:
+ columns = list(query_response[0].keys())
+ return columns
+ else:
+ return []
def get_tables_list(self):
q= f"""
SHOW TABLES;
"""
result = self._query(q)
- return result
\ No newline at end of file
+ return result
| {"golden_diff": "diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py\n--- a/mindsdb/integrations/mysql/mysql.py\n+++ b/mindsdb/integrations/mysql/mysql.py\n@@ -198,18 +198,18 @@\n result = self._query(q)\n return result[0]['count']\n \n- def get_columns(self):\n- q = f\"\"\"SELECT COLUMN_NAME ,TABLE_NAME\n- FROM INFORMATION_SCHEMA.COLUMNS \n- WHERE TABLE_SCHEMA = database()\n- ORDER BY COLUMN_NAME, TABLE_NAME;\"\"\"\n- columns_list = self._query(q)\n- columns = [f\"{columns[0]}.{columns[1]}\" for columns in columns_list]\n- return columns\n+ def get_columns(self,query):\n+ q = f\"\"\"SELECT * from ({query}) LIMIT 1;\"\"\"\n+ query_response = self._query(q)\n+ if len(query_response) > 0:\n+ columns = list(query_response[0].keys())\n+ return columns\n+ else:\n+ return []\n \n def get_tables_list(self):\n q= f\"\"\"\n SHOW TABLES;\n \"\"\"\n result = self._query(q)\n- return result\n\\ No newline at end of file\n+ return result\n", "issue": "Add new method to return the columns for MySQL datasources :electric_plug: :1234: \nWhen MindsDB creates a new MySQL datasource we get information for columns by fetching all datasources. The problem here is that if datasource is big it takes a lot of time. We need a new get_columns method to return the columns name per datasource. The PR should include this method inside the MySQL class .\r\n\r\n## Steps :male_detective: :female_detective: \r\n\r\n- Implement in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51\r\n- Push to staging branch\r\n\r\n## Additional rewards :1st_place_medal: \r\n\r\nEach code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/\r\n \r\n\r\n\n", "before_files": [{"content": "import os\nimport shutil\nimport tempfile\n\nfrom contextlib import closing\nimport mysql.connector\n\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass MySQLConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get('host')\n self.port = kwargs.get('port')\n self.user = kwargs.get('user')\n self.password = kwargs.get('password')\n self.ssl = kwargs.get('ssl')\n self.ssl_ca = kwargs.get('ssl_ca')\n self.ssl_cert = kwargs.get('ssl_cert')\n self.ssl_key = kwargs.get('ssl_key')\n\n def _get_connnection(self):\n config = {\n \"host\": self.host,\n \"port\": self.port,\n \"user\": self.user,\n \"password\": self.password\n }\n if self.ssl is True:\n config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]\n if self.ssl_ca is not None:\n config[\"ssl_ca\"] = self.ssl_ca\n if self.ssl_cert is not None:\n config[\"ssl_cert\"] = self.ssl_cert\n if self.ssl_key is not None:\n config[\"ssl_key\"] = self.ssl_key\n return mysql.connector.connect(**config)\n\n def check_connection(self):\n try:\n con = self._get_connnection()\n with closing(con) as con:\n connected = con.is_connected()\n except Exception:\n connected = False\n return connected\n\n\nclass MySQL(Integration, MySQLConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user')\n self.password = db_info.get('password')\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n self.ssl = db_info.get('ssl')\n self.ssl_ca = db_info.get('ssl_ca')\n self.ssl_cert = db_info.get('ssl_cert')\n self.ssl_key = db_info.get('ssl_key')\n\n def _to_mysql_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'int',\n dtype.float: 'double',\n dtype.binary: 'bool',\n dtype.date: 'Date',\n dtype.datetime: 'Datetime',\n dtype.binary: 'VARCHAR(500)',\n dtype.categorical: 'VARCHAR(500)',\n dtype.tags: 'VARCHAR(500)',\n dtype.image: 'VARCHAR(500)',\n dtype.video: 'VARCHAR(500)',\n dtype.audio: 'VARCHAR(500)',\n dtype.short_text: 'VARCHAR(500)',\n dtype.rich_text: 'VARCHAR(500)',\n dtype.array: 'VARCHAR(500)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_original` {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine mysql data type for column {name}: {e}')\n\n return column_declaration\n\n def _escape_table_name(self, name):\n return '`' + name.replace('`', '``') + '`'\n\n def _query(self, query):\n con = self._get_connnection()\n with closing(con) as con:\n cur = con.cursor(dictionary=True, buffered=True)\n cur.execute(query)\n res = True\n try:\n res = cur.fetchall()\n except Exception:\n pass\n con.commit()\n\n return res\n\n def _get_connect_string(self, table):\n user = f\"{self.config['api']['mysql']['user']}_{self.name}\"\n password = self.config['api']['mysql']['password']\n host = self.config['api']['mysql']['host']\n port = self.config['api']['mysql']['port']\n\n if password is None or password == '':\n connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'\n else:\n connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'\n\n return connect\n\n def setup(self):\n self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')\n self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')\n\n connect = self._get_connect_string('predictors')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name VARCHAR(500),\n status VARCHAR(500),\n accuracy VARCHAR(500),\n predict VARCHAR(500),\n select_data_query VARCHAR(500),\n external_datasource VARCHAR(500),\n training_options VARCHAR(500),\n key name_key (name)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n connect = self._get_connect_string('commands')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command VARCHAR(500),\n key command_key (command)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = model_meta['name']\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n columns_sql = ','.join(self._to_mysql_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',`when_data` varchar(500)'\n columns_sql += ',`select_data_query` varchar(500)'\n columns_sql += ',`external_datasource` varchar(500)'\n for col in predict:\n columns_sql += f',`{col}_confidence` double'\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',`{col}_min` double'\n columns_sql += f',`{col}_max` double'\n columns_sql += f',`{col}_explain` varchar(500)'\n\n connect = self._get_connect_string(name)\n\n self.unregister_predictor(name)\n q = f\"\"\"\n CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (\n {columns_sql},\n index when_data_index (when_data),\n index select_data_query_index (select_data_query),\n index external_datasource_index (external_datasource)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n\n def get_row_count(self, query):\n q = f\"\"\" \n SELECT COUNT(*) as count\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n\n def get_columns(self):\n q = f\"\"\"SELECT COLUMN_NAME ,TABLE_NAME\n FROM INFORMATION_SCHEMA.COLUMNS \n WHERE TABLE_SCHEMA = database()\n ORDER BY COLUMN_NAME, TABLE_NAME;\"\"\"\n columns_list = self._query(q)\n columns = [f\"{columns[0]}.{columns[1]}\" for columns in columns_list]\n return columns\n \n def get_tables_list(self):\n q= f\"\"\"\n SHOW TABLES;\n \"\"\"\n result = self._query(q)\n return result", "path": "mindsdb/integrations/mysql/mysql.py"}]} | 2,972 | 283 |
gh_patches_debug_19977 | rasdani/github-patches | git_diff | uclapi__uclapi-1219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Data exposed by webhooks, not shown by /bookings
An example is Gordon St (22) 4.01 . which is provided by webhooks when bookings change but we do not return it for bookings usually.
</issue>
<code>
[start of backend/uclapi/roombookings/management/commands/trigger_webhooks.py]
1 from django.core.management.base import BaseCommand
2 from roombookings.models import BookingA, BookingB
3 from timetable.models import Lock
4 from roombookings.helpers import _serialize_bookings
5 from dashboard.models import Webhook, WebhookTriggerHistory
6 from datetime import datetime
7 from deepdiff import DeepDiff
8 from django.utils import timezone
9 from requests_futures.sessions import FuturesSession
10
11
12 class Command(BaseCommand):
13
14 help = 'Diff roombooking result sets and notify relevant webhooks'
15
16 def add_arguments(self, parser):
17 parser.add_argument(
18 '--debug',
19 action='store_true',
20 dest='debug',
21 help='Print webhook responses',
22 )
23
24 def handle(self, *args, **options):
25 self.stdout.write("Triggering webhooks")
26 session = FuturesSession()
27
28 # currently not locked table is the old one, more recent one is locked
29 lock = Lock.objects.all()[0] # there is only ever one lock
30
31 if not lock.a:
32 old_booking_table = BookingA
33 new_booking_table = BookingB
34 else:
35 old_booking_table = BookingB
36 new_booking_table = BookingA
37
38 now = datetime.now()
39
40 old_bookings = _serialize_bookings(
41 old_booking_table.objects.filter(
42 startdatetime__gt=now
43 )
44 )
45 new_bookings = _serialize_bookings(
46 new_booking_table.objects.filter(
47 startdatetime__gt=now
48 )
49 )
50
51 ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)
52
53 webhooks = Webhook.objects.filter(app__deleted=False)
54 # assumption: list of webhooks will be longer than ddiff
55
56 num_bookings_added = 0
57 num_bookings_removed = 0
58 if "iterable_item_added" in ddiff:
59 num_bookings_added = len(
60 ddiff["iterable_item_added"].values()
61 )
62
63 if "iterable_item_removed" in ddiff:
64 num_bookings_removed = len(
65 ddiff["iterable_item_removed"].values()
66 )
67
68 self.stdout.write(
69 "{} bookings added\n{} bookings removed.".format(
70 num_bookings_added,
71 num_bookings_removed
72 )
73 )
74
75 def webhook_map(webhook):
76 def webhook_filter(booking):
77 return (
78 (
79 webhook.siteid == '' or
80 booking["siteid"] == webhook.siteid
81 ) and
82 (
83 webhook.roomid == '' or
84 booking["roomid"] == webhook.roomid
85 ) and
86 (
87 webhook.contact == '' or
88 # mimick SQL 'like'
89 webhook.contact in str(booking["contact"])
90 )
91 )
92 output = {
93 "webhook_in_db": webhook,
94 "url": webhook.url,
95 "verification_secret": webhook.verification_secret
96 }
97 if "iterable_item_added" in ddiff:
98 bookings_added = list(filter(
99 webhook_filter, ddiff["iterable_item_added"].values()
100 ))
101 if bookings_added != []:
102 output["bookings_added"] = bookings_added
103 if "iterable_item_removed" in ddiff:
104 bookings_removed = list(filter(
105 webhook_filter, ddiff["iterable_item_removed"].values()
106 ))
107 if bookings_removed != []:
108 output["bookings_removed"] = bookings_removed
109
110 return output
111
112 webhooks_to_enact = list(map(webhook_map, webhooks))
113
114 unsent_requests = []
115 for idx, webhook in enumerate(webhooks_to_enact):
116 payload = {
117 "service": "roombookings",
118 "name": "bookings_changed",
119 "verification_secret": webhook["verification_secret"],
120 "content": {}
121 }
122
123 if "bookings_added" in webhook:
124 payload["content"]["bookings_added"] = (
125 webhook["bookings_added"]
126 )
127 if "bookings_removed" in webhook:
128 payload["content"]["bookings_removed"] = (
129 webhook["bookings_removed"]
130 )
131
132 webhooks_to_enact[idx]["payload"] = payload
133
134 if payload["content"] != {} and webhook["url"] != "":
135 unsent_requests.append(
136 session.post(
137 webhook["url"], json=payload,
138 headers={
139 "User-Agent": "uclapi-bot/1"
140 }
141 )
142 )
143 self.stdout.write(
144 "Triggering {} webhooks.".format(len(unsent_requests))
145 )
146 if("debug" in options):
147 for i in unsent_requests:
148 self.stdout.write(
149 'response status {0}'.format(i.result().status_code)
150 )
151
152 for webhook in webhooks_to_enact:
153 if webhook["payload"]["content"] != {}:
154 webhook_in_db = webhook["webhook_in_db"]
155 webhook_in_db.last_fired = timezone.now()
156 webhook_in_db.save()
157
158 new_webhook_history_entry = WebhookTriggerHistory(
159 webhook=webhook_in_db,
160 payload=webhook["payload"]
161 )
162 new_webhook_history_entry.save()
163
164 self.stdout.write("Webhooks triggered.")
165
[end of backend/uclapi/roombookings/management/commands/trigger_webhooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
@@ -7,6 +7,7 @@
from deepdiff import DeepDiff
from django.utils import timezone
from requests_futures.sessions import FuturesSession
+from django.db.models import Q
class Command(BaseCommand):
@@ -39,11 +40,13 @@
old_bookings = _serialize_bookings(
old_booking_table.objects.filter(
+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),
startdatetime__gt=now
)
)
new_bookings = _serialize_bookings(
new_booking_table.objects.filter(
+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),
startdatetime__gt=now
)
)
| {"golden_diff": "diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n@@ -7,6 +7,7 @@\n from deepdiff import DeepDiff\n from django.utils import timezone\n from requests_futures.sessions import FuturesSession\n+from django.db.models import Q\n \n \n class Command(BaseCommand):\n@@ -39,11 +40,13 @@\n \n old_bookings = _serialize_bookings(\n old_booking_table.objects.filter(\n+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),\n startdatetime__gt=now\n )\n )\n new_bookings = _serialize_bookings(\n new_booking_table.objects.filter(\n+ Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),\n startdatetime__gt=now\n )\n )\n", "issue": "Data exposed by webhooks, not shown by /bookings\nAn example is Gordon St (22) 4.01 . which is provided by webhooks when bookings change but we do not return it for bookings usually.\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom roombookings.models import BookingA, BookingB\nfrom timetable.models import Lock\nfrom roombookings.helpers import _serialize_bookings\nfrom dashboard.models import Webhook, WebhookTriggerHistory\nfrom datetime import datetime\nfrom deepdiff import DeepDiff\nfrom django.utils import timezone\nfrom requests_futures.sessions import FuturesSession\n\n\nclass Command(BaseCommand):\n\n help = 'Diff roombooking result sets and notify relevant webhooks'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--debug',\n action='store_true',\n dest='debug',\n help='Print webhook responses',\n )\n\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n session = FuturesSession()\n\n # currently not locked table is the old one, more recent one is locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n\n if not lock.a:\n old_booking_table = BookingA\n new_booking_table = BookingB\n else:\n old_booking_table = BookingB\n new_booking_table = BookingA\n\n now = datetime.now()\n\n old_bookings = _serialize_bookings(\n old_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n new_bookings = _serialize_bookings(\n new_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n\n ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)\n\n webhooks = Webhook.objects.filter(app__deleted=False)\n # assumption: list of webhooks will be longer than ddiff\n\n num_bookings_added = 0\n num_bookings_removed = 0\n if \"iterable_item_added\" in ddiff:\n num_bookings_added = len(\n ddiff[\"iterable_item_added\"].values()\n )\n\n if \"iterable_item_removed\" in ddiff:\n num_bookings_removed = len(\n ddiff[\"iterable_item_removed\"].values()\n )\n\n self.stdout.write(\n \"{} bookings added\\n{} bookings removed.\".format(\n num_bookings_added,\n num_bookings_removed\n )\n )\n\n def webhook_map(webhook):\n def webhook_filter(booking):\n return (\n (\n webhook.siteid == '' or\n booking[\"siteid\"] == webhook.siteid\n ) and\n (\n webhook.roomid == '' or\n booking[\"roomid\"] == webhook.roomid\n ) and\n (\n webhook.contact == '' or\n # mimick SQL 'like'\n webhook.contact in str(booking[\"contact\"])\n )\n )\n output = {\n \"webhook_in_db\": webhook,\n \"url\": webhook.url,\n \"verification_secret\": webhook.verification_secret\n }\n if \"iterable_item_added\" in ddiff:\n bookings_added = list(filter(\n webhook_filter, ddiff[\"iterable_item_added\"].values()\n ))\n if bookings_added != []:\n output[\"bookings_added\"] = bookings_added\n if \"iterable_item_removed\" in ddiff:\n bookings_removed = list(filter(\n webhook_filter, ddiff[\"iterable_item_removed\"].values()\n ))\n if bookings_removed != []:\n output[\"bookings_removed\"] = bookings_removed\n\n return output\n\n webhooks_to_enact = list(map(webhook_map, webhooks))\n\n unsent_requests = []\n for idx, webhook in enumerate(webhooks_to_enact):\n payload = {\n \"service\": \"roombookings\",\n \"name\": \"bookings_changed\",\n \"verification_secret\": webhook[\"verification_secret\"],\n \"content\": {}\n }\n\n if \"bookings_added\" in webhook:\n payload[\"content\"][\"bookings_added\"] = (\n webhook[\"bookings_added\"]\n )\n if \"bookings_removed\" in webhook:\n payload[\"content\"][\"bookings_removed\"] = (\n webhook[\"bookings_removed\"]\n )\n\n webhooks_to_enact[idx][\"payload\"] = payload\n\n if payload[\"content\"] != {} and webhook[\"url\"] != \"\":\n unsent_requests.append(\n session.post(\n webhook[\"url\"], json=payload,\n headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n )\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n if(\"debug\" in options):\n for i in unsent_requests:\n self.stdout.write(\n 'response status {0}'.format(i.result().status_code)\n )\n\n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n webhook_in_db = webhook[\"webhook_in_db\"]\n webhook_in_db.last_fired = timezone.now()\n webhook_in_db.save()\n\n new_webhook_history_entry = WebhookTriggerHistory(\n webhook=webhook_in_db,\n payload=webhook[\"payload\"]\n )\n new_webhook_history_entry.save()\n\n self.stdout.write(\"Webhooks triggered.\")\n", "path": "backend/uclapi/roombookings/management/commands/trigger_webhooks.py"}]} | 2,078 | 253 |
gh_patches_debug_18966 | rasdani/github-patches | git_diff | celery__celery-7609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
make REVOKES_MAX and REVOKE_EXPIRES configurable
Values of REVOKE_EXPIRES and REVOKES_MAX in worker/state.py are hardcoded.
This should be configurable. Some of us really needed to change this.
</issue>
<code>
[start of celery/worker/state.py]
1 """Internal worker state (global).
2
3 This includes the currently active and reserved tasks,
4 statistics, and revoked tasks.
5 """
6 import os
7 import platform
8 import shelve
9 import sys
10 import weakref
11 import zlib
12 from collections import Counter
13
14 from kombu.serialization import pickle, pickle_protocol
15 from kombu.utils.objects import cached_property
16
17 from celery import __version__
18 from celery.exceptions import WorkerShutdown, WorkerTerminate
19 from celery.utils.collections import LimitedSet
20
21 __all__ = (
22 'SOFTWARE_INFO', 'reserved_requests', 'active_requests',
23 'total_count', 'revoked', 'task_reserved', 'maybe_shutdown',
24 'task_accepted', 'task_ready', 'Persistent',
25 )
26
27 #: Worker software/platform information.
28 SOFTWARE_INFO = {
29 'sw_ident': 'py-celery',
30 'sw_ver': __version__,
31 'sw_sys': platform.system(),
32 }
33
34 #: maximum number of revokes to keep in memory.
35 REVOKES_MAX = 50000
36
37 #: maximum number of successful tasks to keep in memory.
38 SUCCESSFUL_MAX = 1000
39
40 #: how many seconds a revoke will be active before
41 #: being expired when the max limit has been exceeded.
42 REVOKE_EXPIRES = 10800
43
44 #: how many seconds a successful task will be cached in memory
45 #: before being expired when the max limit has been exceeded.
46 SUCCESSFUL_EXPIRES = 10800
47
48 #: Mapping of reserved task_id->Request.
49 requests = {}
50
51 #: set of all reserved :class:`~celery.worker.request.Request`'s.
52 reserved_requests = weakref.WeakSet()
53
54 #: set of currently active :class:`~celery.worker.request.Request`'s.
55 active_requests = weakref.WeakSet()
56
57 #: A limited set of successful :class:`~celery.worker.request.Request`'s.
58 successful_requests = LimitedSet(maxlen=SUCCESSFUL_MAX,
59 expires=SUCCESSFUL_EXPIRES)
60
61 #: count of tasks accepted by the worker, sorted by type.
62 total_count = Counter()
63
64 #: count of all tasks accepted by the worker
65 all_total_count = [0]
66
67 #: the list of currently revoked tasks. Persistent if ``statedb`` set.
68 revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES)
69
70 should_stop = None
71 should_terminate = None
72
73
74 def reset_state():
75 requests.clear()
76 reserved_requests.clear()
77 active_requests.clear()
78 successful_requests.clear()
79 total_count.clear()
80 all_total_count[:] = [0]
81 revoked.clear()
82
83
84 def maybe_shutdown():
85 """Shutdown if flags have been set."""
86 if should_terminate is not None and should_terminate is not False:
87 raise WorkerTerminate(should_terminate)
88 elif should_stop is not None and should_stop is not False:
89 raise WorkerShutdown(should_stop)
90
91
92 def task_reserved(request,
93 add_request=requests.__setitem__,
94 add_reserved_request=reserved_requests.add):
95 """Update global state when a task has been reserved."""
96 add_request(request.id, request)
97 add_reserved_request(request)
98
99
100 def task_accepted(request,
101 _all_total_count=None,
102 add_active_request=active_requests.add,
103 add_to_total_count=total_count.update):
104 """Update global state when a task has been accepted."""
105 if not _all_total_count:
106 _all_total_count = all_total_count
107 add_active_request(request)
108 add_to_total_count({request.name: 1})
109 all_total_count[0] += 1
110
111
112 def task_ready(request,
113 successful=False,
114 remove_request=requests.pop,
115 discard_active_request=active_requests.discard,
116 discard_reserved_request=reserved_requests.discard):
117 """Update global state when a task is ready."""
118 if successful:
119 successful_requests.add(request.id)
120
121 remove_request(request.id, None)
122 discard_active_request(request)
123 discard_reserved_request(request)
124
125
126 C_BENCH = os.environ.get('C_BENCH') or os.environ.get('CELERY_BENCH')
127 C_BENCH_EVERY = int(os.environ.get('C_BENCH_EVERY') or
128 os.environ.get('CELERY_BENCH_EVERY') or 1000)
129 if C_BENCH: # pragma: no cover
130 import atexit
131 from time import monotonic
132
133 from billiard.process import current_process
134
135 from celery.utils.debug import memdump, sample_mem
136
137 all_count = 0
138 bench_first = None
139 bench_start = None
140 bench_last = None
141 bench_every = C_BENCH_EVERY
142 bench_sample = []
143 __reserved = task_reserved
144 __ready = task_ready
145
146 if current_process()._name == 'MainProcess':
147 @atexit.register
148 def on_shutdown():
149 if bench_first is not None and bench_last is not None:
150 print('- Time spent in benchmark: {!r}'.format(
151 bench_last - bench_first))
152 print('- Avg: {}'.format(
153 sum(bench_sample) / len(bench_sample)))
154 memdump()
155
156 def task_reserved(request):
157 """Called when a task is reserved by the worker."""
158 global bench_start
159 global bench_first
160 now = None
161 if bench_start is None:
162 bench_start = now = monotonic()
163 if bench_first is None:
164 bench_first = now
165
166 return __reserved(request)
167
168 def task_ready(request):
169 """Called when a task is completed."""
170 global all_count
171 global bench_start
172 global bench_last
173 all_count += 1
174 if not all_count % bench_every:
175 now = monotonic()
176 diff = now - bench_start
177 print('- Time spent processing {} tasks (since first '
178 'task received): ~{:.4f}s\n'.format(bench_every, diff))
179 sys.stdout.flush()
180 bench_start = bench_last = now
181 bench_sample.append(diff)
182 sample_mem()
183 return __ready(request)
184
185
186 class Persistent:
187 """Stores worker state between restarts.
188
189 This is the persistent data stored by the worker when
190 :option:`celery worker --statedb` is enabled.
191
192 Currently only stores revoked task id's.
193 """
194
195 storage = shelve
196 protocol = pickle_protocol
197 compress = zlib.compress
198 decompress = zlib.decompress
199 _is_open = False
200
201 def __init__(self, state, filename, clock=None):
202 self.state = state
203 self.filename = filename
204 self.clock = clock
205 self.merge()
206
207 def open(self):
208 return self.storage.open(
209 self.filename, protocol=self.protocol, writeback=True,
210 )
211
212 def merge(self):
213 self._merge_with(self.db)
214
215 def sync(self):
216 self._sync_with(self.db)
217 self.db.sync()
218
219 def close(self):
220 if self._is_open:
221 self.db.close()
222 self._is_open = False
223
224 def save(self):
225 self.sync()
226 self.close()
227
228 def _merge_with(self, d):
229 self._merge_revoked(d)
230 self._merge_clock(d)
231 return d
232
233 def _sync_with(self, d):
234 self._revoked_tasks.purge()
235 d.update({
236 '__proto__': 3,
237 'zrevoked': self.compress(self._dumps(self._revoked_tasks)),
238 'clock': self.clock.forward() if self.clock else 0,
239 })
240 return d
241
242 def _merge_clock(self, d):
243 if self.clock:
244 d['clock'] = self.clock.adjust(d.get('clock') or 0)
245
246 def _merge_revoked(self, d):
247 try:
248 self._merge_revoked_v3(d['zrevoked'])
249 except KeyError:
250 try:
251 self._merge_revoked_v2(d.pop('revoked'))
252 except KeyError:
253 pass
254 # purge expired items at boot
255 self._revoked_tasks.purge()
256
257 def _merge_revoked_v3(self, zrevoked):
258 if zrevoked:
259 self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked)))
260
261 def _merge_revoked_v2(self, saved):
262 if not isinstance(saved, LimitedSet):
263 # (pre 3.0.18) used to be stored as a dict
264 return self._merge_revoked_v1(saved)
265 self._revoked_tasks.update(saved)
266
267 def _merge_revoked_v1(self, saved):
268 add = self._revoked_tasks.add
269 for item in saved:
270 add(item)
271
272 def _dumps(self, obj):
273 return pickle.dumps(obj, protocol=self.protocol)
274
275 @property
276 def _revoked_tasks(self):
277 return self.state.revoked
278
279 @cached_property
280 def db(self):
281 self._is_open = True
282 return self.open()
283
[end of celery/worker/state.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celery/worker/state.py b/celery/worker/state.py
--- a/celery/worker/state.py
+++ b/celery/worker/state.py
@@ -32,18 +32,18 @@
}
#: maximum number of revokes to keep in memory.
-REVOKES_MAX = 50000
+REVOKES_MAX = int(os.environ.get('CELERY_WORKER_REVOKES_MAX', 50000))
#: maximum number of successful tasks to keep in memory.
-SUCCESSFUL_MAX = 1000
+SUCCESSFUL_MAX = int(os.environ.get('CELERY_WORKER_SUCCESSFUL_MAX', 1000))
#: how many seconds a revoke will be active before
#: being expired when the max limit has been exceeded.
-REVOKE_EXPIRES = 10800
+REVOKE_EXPIRES = float(os.environ.get('CELERY_WORKER_REVOKE_EXPIRES', 10800))
#: how many seconds a successful task will be cached in memory
#: before being expired when the max limit has been exceeded.
-SUCCESSFUL_EXPIRES = 10800
+SUCCESSFUL_EXPIRES = float(os.environ.get('CELERY_WORKER_SUCCESSFUL_EXPIRES', 10800))
#: Mapping of reserved task_id->Request.
requests = {}
| {"golden_diff": "diff --git a/celery/worker/state.py b/celery/worker/state.py\n--- a/celery/worker/state.py\n+++ b/celery/worker/state.py\n@@ -32,18 +32,18 @@\n }\n \n #: maximum number of revokes to keep in memory.\n-REVOKES_MAX = 50000\n+REVOKES_MAX = int(os.environ.get('CELERY_WORKER_REVOKES_MAX', 50000))\n \n #: maximum number of successful tasks to keep in memory.\n-SUCCESSFUL_MAX = 1000\n+SUCCESSFUL_MAX = int(os.environ.get('CELERY_WORKER_SUCCESSFUL_MAX', 1000))\n \n #: how many seconds a revoke will be active before\n #: being expired when the max limit has been exceeded.\n-REVOKE_EXPIRES = 10800\n+REVOKE_EXPIRES = float(os.environ.get('CELERY_WORKER_REVOKE_EXPIRES', 10800))\n \n #: how many seconds a successful task will be cached in memory\n #: before being expired when the max limit has been exceeded.\n-SUCCESSFUL_EXPIRES = 10800\n+SUCCESSFUL_EXPIRES = float(os.environ.get('CELERY_WORKER_SUCCESSFUL_EXPIRES', 10800))\n \n #: Mapping of reserved task_id->Request.\n requests = {}\n", "issue": "make REVOKES_MAX and REVOKE_EXPIRES configurable\nValues of REVOKE_EXPIRES and REVOKES_MAX in worker/state.py are hardcoded.\n\nThis should be configurable. Some of us really needed to change this.\n\n\n", "before_files": [{"content": "\"\"\"Internal worker state (global).\n\nThis includes the currently active and reserved tasks,\nstatistics, and revoked tasks.\n\"\"\"\nimport os\nimport platform\nimport shelve\nimport sys\nimport weakref\nimport zlib\nfrom collections import Counter\n\nfrom kombu.serialization import pickle, pickle_protocol\nfrom kombu.utils.objects import cached_property\n\nfrom celery import __version__\nfrom celery.exceptions import WorkerShutdown, WorkerTerminate\nfrom celery.utils.collections import LimitedSet\n\n__all__ = (\n 'SOFTWARE_INFO', 'reserved_requests', 'active_requests',\n 'total_count', 'revoked', 'task_reserved', 'maybe_shutdown',\n 'task_accepted', 'task_ready', 'Persistent',\n)\n\n#: Worker software/platform information.\nSOFTWARE_INFO = {\n 'sw_ident': 'py-celery',\n 'sw_ver': __version__,\n 'sw_sys': platform.system(),\n}\n\n#: maximum number of revokes to keep in memory.\nREVOKES_MAX = 50000\n\n#: maximum number of successful tasks to keep in memory.\nSUCCESSFUL_MAX = 1000\n\n#: how many seconds a revoke will be active before\n#: being expired when the max limit has been exceeded.\nREVOKE_EXPIRES = 10800\n\n#: how many seconds a successful task will be cached in memory\n#: before being expired when the max limit has been exceeded.\nSUCCESSFUL_EXPIRES = 10800\n\n#: Mapping of reserved task_id->Request.\nrequests = {}\n\n#: set of all reserved :class:`~celery.worker.request.Request`'s.\nreserved_requests = weakref.WeakSet()\n\n#: set of currently active :class:`~celery.worker.request.Request`'s.\nactive_requests = weakref.WeakSet()\n\n#: A limited set of successful :class:`~celery.worker.request.Request`'s.\nsuccessful_requests = LimitedSet(maxlen=SUCCESSFUL_MAX,\n expires=SUCCESSFUL_EXPIRES)\n\n#: count of tasks accepted by the worker, sorted by type.\ntotal_count = Counter()\n\n#: count of all tasks accepted by the worker\nall_total_count = [0]\n\n#: the list of currently revoked tasks. Persistent if ``statedb`` set.\nrevoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES)\n\nshould_stop = None\nshould_terminate = None\n\n\ndef reset_state():\n requests.clear()\n reserved_requests.clear()\n active_requests.clear()\n successful_requests.clear()\n total_count.clear()\n all_total_count[:] = [0]\n revoked.clear()\n\n\ndef maybe_shutdown():\n \"\"\"Shutdown if flags have been set.\"\"\"\n if should_terminate is not None and should_terminate is not False:\n raise WorkerTerminate(should_terminate)\n elif should_stop is not None and should_stop is not False:\n raise WorkerShutdown(should_stop)\n\n\ndef task_reserved(request,\n add_request=requests.__setitem__,\n add_reserved_request=reserved_requests.add):\n \"\"\"Update global state when a task has been reserved.\"\"\"\n add_request(request.id, request)\n add_reserved_request(request)\n\n\ndef task_accepted(request,\n _all_total_count=None,\n add_active_request=active_requests.add,\n add_to_total_count=total_count.update):\n \"\"\"Update global state when a task has been accepted.\"\"\"\n if not _all_total_count:\n _all_total_count = all_total_count\n add_active_request(request)\n add_to_total_count({request.name: 1})\n all_total_count[0] += 1\n\n\ndef task_ready(request,\n successful=False,\n remove_request=requests.pop,\n discard_active_request=active_requests.discard,\n discard_reserved_request=reserved_requests.discard):\n \"\"\"Update global state when a task is ready.\"\"\"\n if successful:\n successful_requests.add(request.id)\n\n remove_request(request.id, None)\n discard_active_request(request)\n discard_reserved_request(request)\n\n\nC_BENCH = os.environ.get('C_BENCH') or os.environ.get('CELERY_BENCH')\nC_BENCH_EVERY = int(os.environ.get('C_BENCH_EVERY') or\n os.environ.get('CELERY_BENCH_EVERY') or 1000)\nif C_BENCH: # pragma: no cover\n import atexit\n from time import monotonic\n\n from billiard.process import current_process\n\n from celery.utils.debug import memdump, sample_mem\n\n all_count = 0\n bench_first = None\n bench_start = None\n bench_last = None\n bench_every = C_BENCH_EVERY\n bench_sample = []\n __reserved = task_reserved\n __ready = task_ready\n\n if current_process()._name == 'MainProcess':\n @atexit.register\n def on_shutdown():\n if bench_first is not None and bench_last is not None:\n print('- Time spent in benchmark: {!r}'.format(\n bench_last - bench_first))\n print('- Avg: {}'.format(\n sum(bench_sample) / len(bench_sample)))\n memdump()\n\n def task_reserved(request):\n \"\"\"Called when a task is reserved by the worker.\"\"\"\n global bench_start\n global bench_first\n now = None\n if bench_start is None:\n bench_start = now = monotonic()\n if bench_first is None:\n bench_first = now\n\n return __reserved(request)\n\n def task_ready(request):\n \"\"\"Called when a task is completed.\"\"\"\n global all_count\n global bench_start\n global bench_last\n all_count += 1\n if not all_count % bench_every:\n now = monotonic()\n diff = now - bench_start\n print('- Time spent processing {} tasks (since first '\n 'task received): ~{:.4f}s\\n'.format(bench_every, diff))\n sys.stdout.flush()\n bench_start = bench_last = now\n bench_sample.append(diff)\n sample_mem()\n return __ready(request)\n\n\nclass Persistent:\n \"\"\"Stores worker state between restarts.\n\n This is the persistent data stored by the worker when\n :option:`celery worker --statedb` is enabled.\n\n Currently only stores revoked task id's.\n \"\"\"\n\n storage = shelve\n protocol = pickle_protocol\n compress = zlib.compress\n decompress = zlib.decompress\n _is_open = False\n\n def __init__(self, state, filename, clock=None):\n self.state = state\n self.filename = filename\n self.clock = clock\n self.merge()\n\n def open(self):\n return self.storage.open(\n self.filename, protocol=self.protocol, writeback=True,\n )\n\n def merge(self):\n self._merge_with(self.db)\n\n def sync(self):\n self._sync_with(self.db)\n self.db.sync()\n\n def close(self):\n if self._is_open:\n self.db.close()\n self._is_open = False\n\n def save(self):\n self.sync()\n self.close()\n\n def _merge_with(self, d):\n self._merge_revoked(d)\n self._merge_clock(d)\n return d\n\n def _sync_with(self, d):\n self._revoked_tasks.purge()\n d.update({\n '__proto__': 3,\n 'zrevoked': self.compress(self._dumps(self._revoked_tasks)),\n 'clock': self.clock.forward() if self.clock else 0,\n })\n return d\n\n def _merge_clock(self, d):\n if self.clock:\n d['clock'] = self.clock.adjust(d.get('clock') or 0)\n\n def _merge_revoked(self, d):\n try:\n self._merge_revoked_v3(d['zrevoked'])\n except KeyError:\n try:\n self._merge_revoked_v2(d.pop('revoked'))\n except KeyError:\n pass\n # purge expired items at boot\n self._revoked_tasks.purge()\n\n def _merge_revoked_v3(self, zrevoked):\n if zrevoked:\n self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked)))\n\n def _merge_revoked_v2(self, saved):\n if not isinstance(saved, LimitedSet):\n # (pre 3.0.18) used to be stored as a dict\n return self._merge_revoked_v1(saved)\n self._revoked_tasks.update(saved)\n\n def _merge_revoked_v1(self, saved):\n add = self._revoked_tasks.add\n for item in saved:\n add(item)\n\n def _dumps(self, obj):\n return pickle.dumps(obj, protocol=self.protocol)\n\n @property\n def _revoked_tasks(self):\n return self.state.revoked\n\n @cached_property\n def db(self):\n self._is_open = True\n return self.open()\n", "path": "celery/worker/state.py"}]} | 3,208 | 296 |
gh_patches_debug_24551 | rasdani/github-patches | git_diff | opsdroid__opsdroid-41 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Generate default config
It should be possible to generate some basic config with a command line flag to opsdroid. It should cause opsdroid to print out the config so that is can be piped into a file.
e.g
```
opsdroid --gen-config > configuration.yaml
```
</issue>
<code>
[start of opsdroid/__main__.py]
1 """Starts opsdroid."""
2
3 import logging
4
5 from opsdroid.loader import Loader
6 from opsdroid.core import OpsDroid
7 from opsdroid.helper import set_logging_level
8 from opsdroid.const import LOG_FILENAME
9
10
11 def main():
12 """The main function."""
13 logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
14 logging.info("="*40)
15 logging.info("Stated application")
16 with OpsDroid() as opsdroid:
17 loader = Loader(opsdroid)
18 opsdroid.config = loader.load_config_file([
19 "./configuration.yaml",
20 "~/.opsdroid/configuration.yaml",
21 "/etc/opsdroid/configuration.yaml"
22 ])
23 if "logging" in opsdroid.config:
24 set_logging_level(opsdroid.config['logging'])
25 loader.load_config(opsdroid.config)
26 opsdroid.exit()
27
28 if __name__ == "__main__":
29 main()
30
[end of opsdroid/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py
--- a/opsdroid/__main__.py
+++ b/opsdroid/__main__.py
@@ -1,6 +1,9 @@
"""Starts opsdroid."""
+import sys
+import os
import logging
+import argparse
from opsdroid.loader import Loader
from opsdroid.core import OpsDroid
@@ -8,11 +11,30 @@
from opsdroid.const import LOG_FILENAME
+def parse_args(args):
+ """Parse command line arguments."""
+ parser = argparse.ArgumentParser(description='Run opsdroid.')
+ parser.add_argument('--gen-config', action="store_true",
+ help='prints out an example configuration file')
+ return parser.parse_args(args)
+
+
def main():
"""The main function."""
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
logging.info("="*40)
logging.info("Stated application")
+
+ args = parse_args(sys.argv[1:])
+
+ if args.gen_config:
+ path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "configuration/example_configuration.yaml")
+ with open(path, 'r') as conf:
+ print(conf.read())
+ sys.exit(0)
+
with OpsDroid() as opsdroid:
loader = Loader(opsdroid)
opsdroid.config = loader.load_config_file([
| {"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -1,6 +1,9 @@\n \"\"\"Starts opsdroid.\"\"\"\n \n+import sys\n+import os\n import logging\n+import argparse\n \n from opsdroid.loader import Loader\n from opsdroid.core import OpsDroid\n@@ -8,11 +11,30 @@\n from opsdroid.const import LOG_FILENAME\n \n \n+def parse_args(args):\n+ \"\"\"Parse command line arguments.\"\"\"\n+ parser = argparse.ArgumentParser(description='Run opsdroid.')\n+ parser.add_argument('--gen-config', action=\"store_true\",\n+ help='prints out an example configuration file')\n+ return parser.parse_args(args)\n+\n+\n def main():\n \"\"\"The main function.\"\"\"\n logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)\n logging.info(\"=\"*40)\n logging.info(\"Stated application\")\n+\n+ args = parse_args(sys.argv[1:])\n+\n+ if args.gen_config:\n+ path = os.path.join(\n+ os.path.dirname(os.path.abspath(__file__)),\n+ \"configuration/example_configuration.yaml\")\n+ with open(path, 'r') as conf:\n+ print(conf.read())\n+ sys.exit(0)\n+\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n opsdroid.config = loader.load_config_file([\n", "issue": "Generate default config\nIt should be possible to generate some basic config with a command line flag to opsdroid. It should cause opsdroid to print out the config so that is can be piped into a file.\n\ne.g\n\n```\nopsdroid --gen-config > configuration.yaml\n```\n\n", "before_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport logging\n\nfrom opsdroid.loader import Loader\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.helper import set_logging_level\nfrom opsdroid.const import LOG_FILENAME\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)\n logging.info(\"=\"*40)\n logging.info(\"Stated application\")\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n opsdroid.config = loader.load_config_file([\n \"./configuration.yaml\",\n \"~/.opsdroid/configuration.yaml\",\n \"/etc/opsdroid/configuration.yaml\"\n ])\n if \"logging\" in opsdroid.config:\n set_logging_level(opsdroid.config['logging'])\n loader.load_config(opsdroid.config)\n opsdroid.exit()\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}]} | 848 | 322 |
gh_patches_debug_6868 | rasdani/github-patches | git_diff | google__clusterfuzz-1785 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Local clusterfuzz instance >=v1.9.0 fails to install properly on Ubuntu 18.04
As requested by @inferno-chromium :)
## Faulty behavior
Local clusterfuzz instance >=v1.9.0 crashes when starting the server via `python butler.py run_server --bootstrap`
## Error
```
google.auth.exceptions.DefaultCredentialsError: Could not automatically determine credentials. Please set GOOGLE_APPLICATION_CREDENTIALS or explicitly create credentials and re-run the application. For more information, please see https://cloud.google.com/docs/authentication/getting-started
```
## Related
#1328
## Reproduction
Following the *'Prerequisites'* steps [here](https://google.github.io/clusterfuzz/getting-started/prerequisites/) with:
* Host: stock Ubuntu 18.04
* Clusterfuzz: `git clone https://github.com/google/clusterfuzz && git checkout tags/v1.9.0`
* Python: 3.7 via `sudo add-apt-repository ppa:deadsnakes/ppa && sudo apt install -y python3.7`
* Golang: v1.14.2, Install as described in [docs](https://golang.org/doc/install?download=go1.14.2.linux-amd64.tar.gz)
Next we run `local/install_deps.bash`. This runs in a couple of errors as shown below:
### Missing dependencies:
* curl (because Ubuntu...)
* pipenv (install_deps.bash tries to install it via *apt* but there is no such package on 18.04)
* Workaround here is fix the installer (`local/install_deps_linux.bash`) using pip insead. E.g. replace the `apt install pipenv` with `sudo -EH pip3 install -U pipenv`
### Failed package installs
* psutil from Pipfile.lock fails to build/install due to missing `#include <Python.h>`
* Fix here is to also install `libpython3.7` and `libpython3.7-dev`
When doing these fixes the installer finishes even though it still prints a bunch of errors regarding incompatible requirements:
```
| ERROR: grpcio-tools 1.17.0 has requirement grpcio>=1.17.0, but you'll have grpcio 1.15.0 which is incompatible.
| ERROR: astroid 2.3.3 has requirement wrapt==1.11.*, but you'll have wrapt 1.12.1 which is incompatible.
| ERROR: google-api-core 1.17.0 has requirement google-auth<2.0dev,>=1.14.0, but you'll have google-auth 1.8.1 which is incompatible.
| ERROR: google-cloud-datastore 1.7.0 has requirement google-cloud-core<0.29dev,>=0.28.0, but you'll have google-cloud-core 1.3.0 which is incompatible.
| ERROR: google-cloud-storage 1.13.2 has requirement google-cloud-core<0.30dev,>=0.29.0, but you'll have google-cloud-core 1.3.0 which is incompatible.
| ERROR: google-cloud-storage 1.13.2 has requirement google-cloud-core<0.30dev,>=0.29.0, but you'll have google-cloud-core 1.3.0 which is incompatible.
| ERROR: google-cloud-datastore 1.7.0 has requirement google-cloud-core<0.29dev,>=0.28.0, but you'll have google-cloud-core 1.3.0 which is incompatible.
| ERROR: google-api-core 1.17.0 has requirement google-auth<2.0dev,>=1.14.0, but you'll have google-auth 1.8.1 which is incompatible.
| ERROR: grpcio-tools 1.17.0 has requirement grpcio>=1.17.0, but you'll have grpcio 1.15.0 which is incompatible.
| ERROR: astroid 2.3.3 has requirement wrapt==1.11.*, but you'll have wrapt 1.12.1 which is incompatible.
```
### Server bootstrap
Once the dependency installer is done we can run:
```
pipenv shell
# python butler.py --help
python butler.py run_server --bootstrap
```
This crashes right at the end.
```
Running: python polymer_bundler.py (cwd='local')
| Building templates for App Engine...
| App Engine templates built successfully.
Created symlink: source: /home/toor/clusterfuzz/local/storage/local_gcs, target /home/toor/clusterfuzz/src/appengine/local_gcs.
Running: gunicorn -b :9000 main:app (cwd='src/appengine')
| [2020-05-14 21:56:30 +0200] [15318] [INFO] Starting gunicorn 20.0.4
| [2020-05-14 21:56:30 +0200] [15318] [INFO] Listening at: http://0.0.0.0:9000 (15318)
| [2020-05-14 21:56:30 +0200] [15318] [INFO] Using worker: sync
| [2020-05-14 21:56:30 +0200] [15321] [INFO] Booting worker with pid: 15321
| [2020-05-14 21:56:36 +0200] [15321] [ERROR] Exception in worker process
| Traceback (most recent call last):
| File "/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/arbiter.py", line 583, in spawn_worker
| worker.init_process()
| File "/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/workers/base.py", line 119, in init_process
| self.load_wsgi()
| File "/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/workers/base.py", line 144, in load_wsgi
| self.wsgi = self.app.wsgi()
| File "/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/app/base.py", line 67, in wsgi
| self.callable = self.load()
| File "/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py", line 49, in load
| return self.load_wsgiapp()
| File "/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py", line 39, in load_wsgiapp
| return util.import_app(self.app_uri)
| File "/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/util.py", line 358, in import_app
| mod = importlib.import_module(module)
| File "/usr/lib/python3.7/importlib/__init__.py", line 127, in import_module
| return _bootstrap._gcd_import(name[level:], package, level)
| File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
| File "<frozen importlib._bootstrap>", line 983, in _find_and_load
| File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked
| File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
| File "<frozen importlib._bootstrap_external>", line 728, in exec_module
| File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
| File "/home/toor/clusterfuzz/src/appengine/main.py", line 32, in <module>
| firebase_admin.initialize_app()
| File "third_party/firebase_admin/__init__.py", line 65, in initialize_app
| credential = credentials.ApplicationDefault()
| File "third_party/firebase_admin/credentials.py", line 133, in __init__
| self._g_credential, self._project_id = google.auth.default(scopes=_scopes)
| File "/home/toor/clusterfuzz/src/third_party/google/auth/_default.py", line 321, in default
| raise exceptions.DefaultCredentialsError(_HELP_MESSAGE)
| google.auth.exceptions.DefaultCredentialsError: Could not automatically determine credentials. Please set GOOGLE_APPLICATION_CREDENTIALS or explicitly create credentials and re-run the application. For more information, please see https://cloud.google.com/docs/authentication/getting-started
| [2020-05-14 21:56:36 +0200] [15321] [INFO] Worker exiting (pid: 15321)
| [2020-05-14 21:56:36 +0200] [15318] [INFO] Shutting down: Master
| [2020-05-14 21:56:36 +0200] [15318] [INFO] Reason: Worker failed to boot.
| Return code is non-zero (3).
| Exit.
Bootstrapping datastore...
Running: python butler.py run setup --non-dry-run --local --config-dir=configs/test
| Creating config
| Creating fuzzer afl
| Creating fuzzer libFuzzer
| Creating fuzzer honggfuzz
| Creating fuzzer syzkaller
| Creating template afl
| Creating template engine_asan
| Creating template engine_msan
| Creating template engine_ubsan
| Creating template honggfuzz
| Creating template libfuzzer
| Creating template syzkaller
| Creating template prune
| Done
```
## Expected behavior
Local instance runs fine when following the steps in the docs as they explicitly state that there is no need for gauth.
## Scope
This is not host OS related but must have been introduced post `tag v1.8.0` as this one runs fine. However, tag v1.9.0+ shows the above behavior.
</issue>
<code>
[start of src/appengine/main.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Python 3 entrypoint."""
15 import importlib
16 import os
17 import sys
18
19 # Add necessary directories to path.
20 sys.path.append('python')
21 sys.path.append('third_party')
22
23 config_modules_path = os.path.join('config', 'modules')
24 if os.path.exists(config_modules_path):
25 sys.path.append(config_modules_path)
26
27 if os.environ.get('GAE_ENV'):
28 import pkg_resources
29 importlib.reload(pkg_resources)
30
31 import firebase_admin
32 firebase_admin.initialize_app()
33
34 try:
35 # Run any module initialization code.
36 import module_init
37 module_init.appengine()
38 except ImportError:
39 pass
40
41 import server
42 app = server.app
43
[end of src/appengine/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/appengine/main.py b/src/appengine/main.py
--- a/src/appengine/main.py
+++ b/src/appengine/main.py
@@ -24,12 +24,14 @@
if os.path.exists(config_modules_path):
sys.path.append(config_modules_path)
-if os.environ.get('GAE_ENV'):
+gae_env = os.environ.get('GAE_ENV')
+if gae_env:
import pkg_resources
importlib.reload(pkg_resources)
- import firebase_admin
- firebase_admin.initialize_app()
+ if gae_env != 'dev':
+ import firebase_admin
+ firebase_admin.initialize_app()
try:
# Run any module initialization code.
| {"golden_diff": "diff --git a/src/appengine/main.py b/src/appengine/main.py\n--- a/src/appengine/main.py\n+++ b/src/appengine/main.py\n@@ -24,12 +24,14 @@\n if os.path.exists(config_modules_path):\n sys.path.append(config_modules_path)\n \n-if os.environ.get('GAE_ENV'):\n+gae_env = os.environ.get('GAE_ENV')\n+if gae_env:\n import pkg_resources\n importlib.reload(pkg_resources)\n \n- import firebase_admin\n- firebase_admin.initialize_app()\n+ if gae_env != 'dev':\n+ import firebase_admin\n+ firebase_admin.initialize_app()\n \n try:\n # Run any module initialization code.\n", "issue": "Local clusterfuzz instance >=v1.9.0 fails to install properly on Ubuntu 18.04\nAs requested by @inferno-chromium :)\r\n\r\n## Faulty behavior\r\nLocal clusterfuzz instance >=v1.9.0 crashes when starting the server via `python butler.py run_server --bootstrap`\r\n\r\n## Error\r\n```\r\ngoogle.auth.exceptions.DefaultCredentialsError: Could not automatically determine credentials. Please set GOOGLE_APPLICATION_CREDENTIALS or explicitly create credentials and re-run the application. For more information, please see https://cloud.google.com/docs/authentication/getting-started\r\n```\r\n\r\n## Related\r\n\r\n#1328 \r\n\r\n## Reproduction\r\nFollowing the *'Prerequisites'* steps [here](https://google.github.io/clusterfuzz/getting-started/prerequisites/) with:\r\n\r\n * Host: stock Ubuntu 18.04\r\n * Clusterfuzz: `git clone https://github.com/google/clusterfuzz && git checkout tags/v1.9.0`\r\n * Python: 3.7 via `sudo add-apt-repository ppa:deadsnakes/ppa && sudo apt install -y python3.7`\r\n * Golang: v1.14.2, Install as described in [docs](https://golang.org/doc/install?download=go1.14.2.linux-amd64.tar.gz)\r\n\r\nNext we run `local/install_deps.bash`. This runs in a couple of errors as shown below:\r\n\r\n\r\n### Missing dependencies:\r\n\r\n* curl (because Ubuntu...)\r\n* pipenv (install_deps.bash tries to install it via *apt* but there is no such package on 18.04)\r\n * Workaround here is fix the installer (`local/install_deps_linux.bash`) using pip insead. E.g. replace the `apt install pipenv` with `sudo -EH pip3 install -U pipenv`\r\n\r\n### Failed package installs\r\n\r\n* psutil from Pipfile.lock fails to build/install due to missing `#include <Python.h>`\r\n * Fix here is to also install `libpython3.7` and `libpython3.7-dev`\r\n\r\nWhen doing these fixes the installer finishes even though it still prints a bunch of errors regarding incompatible requirements:\r\n\r\n```\r\n| ERROR: grpcio-tools 1.17.0 has requirement grpcio>=1.17.0, but you'll have grpcio 1.15.0 which is incompatible.\r\n| ERROR: astroid 2.3.3 has requirement wrapt==1.11.*, but you'll have wrapt 1.12.1 which is incompatible.\r\n| ERROR: google-api-core 1.17.0 has requirement google-auth<2.0dev,>=1.14.0, but you'll have google-auth 1.8.1 which is incompatible.\r\n| ERROR: google-cloud-datastore 1.7.0 has requirement google-cloud-core<0.29dev,>=0.28.0, but you'll have google-cloud-core 1.3.0 which is incompatible.\r\n| ERROR: google-cloud-storage 1.13.2 has requirement google-cloud-core<0.30dev,>=0.29.0, but you'll have google-cloud-core 1.3.0 which is incompatible.\r\n| ERROR: google-cloud-storage 1.13.2 has requirement google-cloud-core<0.30dev,>=0.29.0, but you'll have google-cloud-core 1.3.0 which is incompatible.\r\n| ERROR: google-cloud-datastore 1.7.0 has requirement google-cloud-core<0.29dev,>=0.28.0, but you'll have google-cloud-core 1.3.0 which is incompatible.\r\n| ERROR: google-api-core 1.17.0 has requirement google-auth<2.0dev,>=1.14.0, but you'll have google-auth 1.8.1 which is incompatible.\r\n| ERROR: grpcio-tools 1.17.0 has requirement grpcio>=1.17.0, but you'll have grpcio 1.15.0 which is incompatible.\r\n| ERROR: astroid 2.3.3 has requirement wrapt==1.11.*, but you'll have wrapt 1.12.1 which is incompatible.\r\n```\r\n\r\n### Server bootstrap\r\n\r\nOnce the dependency installer is done we can run:\r\n\r\n```\r\npipenv shell\r\n# python butler.py --help\r\npython butler.py run_server --bootstrap\r\n```\r\n\r\nThis crashes right at the end.\r\n\r\n```\r\nRunning: python polymer_bundler.py (cwd='local')\r\n| Building templates for App Engine...\r\n| App Engine templates built successfully.\r\nCreated symlink: source: /home/toor/clusterfuzz/local/storage/local_gcs, target /home/toor/clusterfuzz/src/appengine/local_gcs.\r\nRunning: gunicorn -b :9000 main:app (cwd='src/appengine')\r\n| [2020-05-14 21:56:30 +0200] [15318] [INFO] Starting gunicorn 20.0.4\r\n| [2020-05-14 21:56:30 +0200] [15318] [INFO] Listening at: http://0.0.0.0:9000 (15318)\r\n| [2020-05-14 21:56:30 +0200] [15318] [INFO] Using worker: sync\r\n| [2020-05-14 21:56:30 +0200] [15321] [INFO] Booting worker with pid: 15321\r\n| [2020-05-14 21:56:36 +0200] [15321] [ERROR] Exception in worker process\r\n| Traceback (most recent call last):\r\n| File \"/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/arbiter.py\", line 583, in spawn_worker\r\n| worker.init_process()\r\n| File \"/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/workers/base.py\", line 119, in init_process\r\n| self.load_wsgi()\r\n| File \"/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/workers/base.py\", line 144, in load_wsgi\r\n| self.wsgi = self.app.wsgi()\r\n| File \"/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/app/base.py\", line 67, in wsgi\r\n| self.callable = self.load()\r\n| File \"/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py\", line 49, in load\r\n| return self.load_wsgiapp()\r\n| File \"/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py\", line 39, in load_wsgiapp\r\n| return util.import_app(self.app_uri)\r\n| File \"/home/toor/.local/share/virtualenvs/clusterfuzz-2lmaEd3m/lib/python3.7/site-packages/gunicorn/util.py\", line 358, in import_app\r\n| mod = importlib.import_module(module)\r\n| File \"/usr/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n| return _bootstrap._gcd_import(name[level:], package, level)\r\n| File \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\n| File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\n| File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\n| File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\n| File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n| File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n| File \"/home/toor/clusterfuzz/src/appengine/main.py\", line 32, in <module>\r\n| firebase_admin.initialize_app()\r\n| File \"third_party/firebase_admin/__init__.py\", line 65, in initialize_app\r\n| credential = credentials.ApplicationDefault()\r\n| File \"third_party/firebase_admin/credentials.py\", line 133, in __init__\r\n| self._g_credential, self._project_id = google.auth.default(scopes=_scopes)\r\n| File \"/home/toor/clusterfuzz/src/third_party/google/auth/_default.py\", line 321, in default\r\n| raise exceptions.DefaultCredentialsError(_HELP_MESSAGE)\r\n| google.auth.exceptions.DefaultCredentialsError: Could not automatically determine credentials. Please set GOOGLE_APPLICATION_CREDENTIALS or explicitly create credentials and re-run the application. For more information, please see https://cloud.google.com/docs/authentication/getting-started\r\n| [2020-05-14 21:56:36 +0200] [15321] [INFO] Worker exiting (pid: 15321)\r\n| [2020-05-14 21:56:36 +0200] [15318] [INFO] Shutting down: Master\r\n| [2020-05-14 21:56:36 +0200] [15318] [INFO] Reason: Worker failed to boot.\r\n| Return code is non-zero (3).\r\n| Exit.\r\nBootstrapping datastore...\r\nRunning: python butler.py run setup --non-dry-run --local --config-dir=configs/test\r\n| Creating config\r\n| Creating fuzzer afl\r\n| Creating fuzzer libFuzzer\r\n| Creating fuzzer honggfuzz\r\n| Creating fuzzer syzkaller\r\n| Creating template afl\r\n| Creating template engine_asan\r\n| Creating template engine_msan\r\n| Creating template engine_ubsan\r\n| Creating template honggfuzz\r\n| Creating template libfuzzer\r\n| Creating template syzkaller\r\n| Creating template prune\r\n| Done\r\n\r\n```\r\n\r\n## Expected behavior\r\n\r\nLocal instance runs fine when following the steps in the docs as they explicitly state that there is no need for gauth.\r\n\r\n## Scope\r\nThis is not host OS related but must have been introduced post `tag v1.8.0` as this one runs fine. However, tag v1.9.0+ shows the above behavior.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Python 3 entrypoint.\"\"\"\nimport importlib\nimport os\nimport sys\n\n# Add necessary directories to path.\nsys.path.append('python')\nsys.path.append('third_party')\n\nconfig_modules_path = os.path.join('config', 'modules')\nif os.path.exists(config_modules_path):\n sys.path.append(config_modules_path)\n\nif os.environ.get('GAE_ENV'):\n import pkg_resources\n importlib.reload(pkg_resources)\n\n import firebase_admin\n firebase_admin.initialize_app()\n\ntry:\n # Run any module initialization code.\n import module_init\n module_init.appengine()\nexcept ImportError:\n pass\n\nimport server\napp = server.app\n", "path": "src/appengine/main.py"}]} | 3,314 | 150 |
gh_patches_debug_38073 | rasdani/github-patches | git_diff | pallets__werkzeug-1647 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SharedDataMiddleware fails with IsADirectory
Minimal example:
```python
from flask import Flask
from werkzeug.middleware.shared_data import SharedDataMiddleware
app = Flask(__name__)
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {'/': ('flask', 'json')})
```
This only seems to happen when using `/` with the tuple syntax to reference a package. When using another mapping like `/test/` it works fine.
```
Traceback (most recent call last):
File "/home/adrian/dev/flask-react-example/.venv/lib/python3.7/site-packages/flask/app.py", line 2328, in __call__
return self.wsgi_app(environ, start_response)
File "/home/adrian/dev/flask-react-example/.venv/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py", line 231, in __call__
f, mtime, file_size = file_loader()
File "/home/adrian/dev/flask-react-example/.venv/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py", line 132, in <lambda>
open(filename, "rb"),
IsADirectoryError: [Errno 21] Is a directory: '/home/adrian/dev/flask-react-example/.venv/lib/python3.7/site-packages/flask/json/'
```
</issue>
<code>
[start of src/werkzeug/middleware/shared_data.py]
1 """
2 Serve Shared Static Files
3 =========================
4
5 .. autoclass:: SharedDataMiddleware
6 :members: is_allowed
7
8 :copyright: 2007 Pallets
9 :license: BSD-3-Clause
10 """
11 import mimetypes
12 import os
13 import posixpath
14 from datetime import datetime
15 from io import BytesIO
16 from time import mktime
17 from time import time
18 from zlib import adler32
19
20 from .._compat import PY2
21 from .._compat import string_types
22 from ..filesystem import get_filesystem_encoding
23 from ..http import http_date
24 from ..http import is_resource_modified
25 from ..security import safe_join
26 from ..wsgi import get_path_info
27 from ..wsgi import wrap_file
28
29
30 class SharedDataMiddleware(object):
31
32 """A WSGI middleware that provides static content for development
33 environments or simple server setups. Usage is quite simple::
34
35 import os
36 from werkzeug.wsgi import SharedDataMiddleware
37
38 app = SharedDataMiddleware(app, {
39 '/static': os.path.join(os.path.dirname(__file__), 'static')
40 })
41
42 The contents of the folder ``./shared`` will now be available on
43 ``http://example.com/shared/``. This is pretty useful during development
44 because a standalone media server is not required. One can also mount
45 files on the root folder and still continue to use the application because
46 the shared data middleware forwards all unhandled requests to the
47 application, even if the requests are below one of the shared folders.
48
49 If `pkg_resources` is available you can also tell the middleware to serve
50 files from package data::
51
52 app = SharedDataMiddleware(app, {
53 '/static': ('myapplication', 'static')
54 })
55
56 This will then serve the ``static`` folder in the `myapplication`
57 Python package.
58
59 The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
60 rules for files that are not accessible from the web. If `cache` is set to
61 `False` no caching headers are sent.
62
63 Currently the middleware does not support non ASCII filenames. If the
64 encoding on the file system happens to be the encoding of the URI it may
65 work but this could also be by accident. We strongly suggest using ASCII
66 only file names for static files.
67
68 The middleware will guess the mimetype using the Python `mimetype`
69 module. If it's unable to figure out the charset it will fall back
70 to `fallback_mimetype`.
71
72 .. versionchanged:: 0.5
73 The cache timeout is configurable now.
74
75 .. versionadded:: 0.6
76 The `fallback_mimetype` parameter was added.
77
78 :param app: the application to wrap. If you don't want to wrap an
79 application you can pass it :exc:`NotFound`.
80 :param exports: a list or dict of exported files and folders.
81 :param disallow: a list of :func:`~fnmatch.fnmatch` rules.
82 :param fallback_mimetype: the fallback mimetype for unknown files.
83 :param cache: enable or disable caching headers.
84 :param cache_timeout: the cache timeout in seconds for the headers.
85 """
86
87 def __init__(
88 self,
89 app,
90 exports,
91 disallow=None,
92 cache=True,
93 cache_timeout=60 * 60 * 12,
94 fallback_mimetype="text/plain",
95 ):
96 self.app = app
97 self.exports = []
98 self.cache = cache
99 self.cache_timeout = cache_timeout
100
101 if hasattr(exports, "items"):
102 exports = exports.items()
103
104 for key, value in exports:
105 if isinstance(value, tuple):
106 loader = self.get_package_loader(*value)
107 elif isinstance(value, string_types):
108 if os.path.isfile(value):
109 loader = self.get_file_loader(value)
110 else:
111 loader = self.get_directory_loader(value)
112 else:
113 raise TypeError("unknown def %r" % value)
114
115 self.exports.append((key, loader))
116
117 if disallow is not None:
118 from fnmatch import fnmatch
119
120 self.is_allowed = lambda x: not fnmatch(x, disallow)
121
122 self.fallback_mimetype = fallback_mimetype
123
124 def is_allowed(self, filename):
125 """Subclasses can override this method to disallow the access to
126 certain files. However by providing `disallow` in the constructor
127 this method is overwritten.
128 """
129 return True
130
131 def _opener(self, filename):
132 return lambda: (
133 open(filename, "rb"),
134 datetime.utcfromtimestamp(os.path.getmtime(filename)),
135 int(os.path.getsize(filename)),
136 )
137
138 def get_file_loader(self, filename):
139 return lambda x: (os.path.basename(filename), self._opener(filename))
140
141 def get_package_loader(self, package, package_path):
142 from pkg_resources import DefaultProvider, ResourceManager, get_provider
143
144 loadtime = datetime.utcnow()
145 provider = get_provider(package)
146 manager = ResourceManager()
147 filesystem_bound = isinstance(provider, DefaultProvider)
148
149 def loader(path):
150 if path is None:
151 return None, None
152
153 path = safe_join(package_path, path)
154
155 if not provider.has_resource(path):
156 return None, None
157
158 basename = posixpath.basename(path)
159
160 if filesystem_bound:
161 return (
162 basename,
163 self._opener(provider.get_resource_filename(manager, path)),
164 )
165
166 s = provider.get_resource_string(manager, path)
167 return basename, lambda: (BytesIO(s), loadtime, len(s))
168
169 return loader
170
171 def get_directory_loader(self, directory):
172 def loader(path):
173 if path is not None:
174 path = safe_join(directory, path)
175 else:
176 path = directory
177
178 if os.path.isfile(path):
179 return os.path.basename(path), self._opener(path)
180
181 return None, None
182
183 return loader
184
185 def generate_etag(self, mtime, file_size, real_filename):
186 if not isinstance(real_filename, bytes):
187 real_filename = real_filename.encode(get_filesystem_encoding())
188
189 return "wzsdm-%d-%s-%s" % (
190 mktime(mtime.timetuple()),
191 file_size,
192 adler32(real_filename) & 0xFFFFFFFF,
193 )
194
195 def __call__(self, environ, start_response):
196 path = get_path_info(environ)
197
198 if PY2:
199 path = path.encode(get_filesystem_encoding())
200
201 file_loader = None
202
203 for search_path, loader in self.exports:
204 if search_path == path:
205 real_filename, file_loader = loader(None)
206
207 if file_loader is not None:
208 break
209
210 if not search_path.endswith("/"):
211 search_path += "/"
212
213 if path.startswith(search_path):
214 real_filename, file_loader = loader(path[len(search_path) :])
215
216 if file_loader is not None:
217 break
218
219 if file_loader is None or not self.is_allowed(real_filename):
220 return self.app(environ, start_response)
221
222 guessed_type = mimetypes.guess_type(real_filename)
223 mime_type = guessed_type[0] or self.fallback_mimetype
224 f, mtime, file_size = file_loader()
225
226 headers = [("Date", http_date())]
227
228 if self.cache:
229 timeout = self.cache_timeout
230 etag = self.generate_etag(mtime, file_size, real_filename)
231 headers += [
232 ("Etag", '"%s"' % etag),
233 ("Cache-Control", "max-age=%d, public" % timeout),
234 ]
235
236 if not is_resource_modified(environ, etag, last_modified=mtime):
237 f.close()
238 start_response("304 Not Modified", headers)
239 return []
240
241 headers.append(("Expires", http_date(time() + timeout)))
242 else:
243 headers.append(("Cache-Control", "public"))
244
245 headers.extend(
246 (
247 ("Content-Type", mime_type),
248 ("Content-Length", str(file_size)),
249 ("Last-Modified", http_date(mtime)),
250 )
251 )
252 start_response("200 OK", headers)
253 return wrap_file(environ, f)
254
[end of src/werkzeug/middleware/shared_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/werkzeug/middleware/shared_data.py b/src/werkzeug/middleware/shared_data.py
--- a/src/werkzeug/middleware/shared_data.py
+++ b/src/werkzeug/middleware/shared_data.py
@@ -10,6 +10,7 @@
"""
import mimetypes
import os
+import pkgutil
import posixpath
from datetime import datetime
from io import BytesIO
@@ -139,32 +140,65 @@
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
- from pkg_resources import DefaultProvider, ResourceManager, get_provider
-
loadtime = datetime.utcnow()
- provider = get_provider(package)
- manager = ResourceManager()
- filesystem_bound = isinstance(provider, DefaultProvider)
+ provider = pkgutil.get_loader(package)
- def loader(path):
- if path is None:
- return None, None
+ if hasattr(provider, "get_resource_reader"):
+ # Python 3
+ reader = provider.get_resource_reader(package)
+
+ def loader(path):
+ if path is None:
+ return None, None
- path = safe_join(package_path, path)
+ path = safe_join(package_path, path)
+ basename = posixpath.basename(path)
- if not provider.has_resource(path):
- return None, None
+ try:
+ resource = reader.open_resource(path)
+ except IOError:
+ return None, None
- basename = posixpath.basename(path)
+ if isinstance(resource, BytesIO):
+ return (
+ basename,
+ lambda: (resource, loadtime, len(resource.getvalue())),
+ )
- if filesystem_bound:
return (
basename,
- self._opener(provider.get_resource_filename(manager, path)),
+ lambda: (
+ resource,
+ datetime.utcfromtimestamp(os.path.getmtime(resource.name)),
+ os.path.getsize(resource.name),
+ ),
)
- s = provider.get_resource_string(manager, path)
- return basename, lambda: (BytesIO(s), loadtime, len(s))
+ else:
+ # Python 2
+ package_filename = provider.get_filename(package)
+ is_filesystem = os.path.exists(package_filename)
+ root = os.path.join(os.path.dirname(package_filename), package_path)
+
+ def loader(path):
+ if path is None:
+ return None, None
+
+ path = safe_join(root, path)
+ basename = posixpath.basename(path)
+
+ if is_filesystem:
+ if not os.path.isfile(path):
+ return None, None
+
+ return basename, self._opener(path)
+
+ try:
+ data = provider.get_data(path)
+ except IOError:
+ return None, None
+
+ return basename, lambda: (BytesIO(data), loadtime, len(data))
return loader
| {"golden_diff": "diff --git a/src/werkzeug/middleware/shared_data.py b/src/werkzeug/middleware/shared_data.py\n--- a/src/werkzeug/middleware/shared_data.py\n+++ b/src/werkzeug/middleware/shared_data.py\n@@ -10,6 +10,7 @@\n \"\"\"\n import mimetypes\n import os\n+import pkgutil\n import posixpath\n from datetime import datetime\n from io import BytesIO\n@@ -139,32 +140,65 @@\n return lambda x: (os.path.basename(filename), self._opener(filename))\n \n def get_package_loader(self, package, package_path):\n- from pkg_resources import DefaultProvider, ResourceManager, get_provider\n-\n loadtime = datetime.utcnow()\n- provider = get_provider(package)\n- manager = ResourceManager()\n- filesystem_bound = isinstance(provider, DefaultProvider)\n+ provider = pkgutil.get_loader(package)\n \n- def loader(path):\n- if path is None:\n- return None, None\n+ if hasattr(provider, \"get_resource_reader\"):\n+ # Python 3\n+ reader = provider.get_resource_reader(package)\n+\n+ def loader(path):\n+ if path is None:\n+ return None, None\n \n- path = safe_join(package_path, path)\n+ path = safe_join(package_path, path)\n+ basename = posixpath.basename(path)\n \n- if not provider.has_resource(path):\n- return None, None\n+ try:\n+ resource = reader.open_resource(path)\n+ except IOError:\n+ return None, None\n \n- basename = posixpath.basename(path)\n+ if isinstance(resource, BytesIO):\n+ return (\n+ basename,\n+ lambda: (resource, loadtime, len(resource.getvalue())),\n+ )\n \n- if filesystem_bound:\n return (\n basename,\n- self._opener(provider.get_resource_filename(manager, path)),\n+ lambda: (\n+ resource,\n+ datetime.utcfromtimestamp(os.path.getmtime(resource.name)),\n+ os.path.getsize(resource.name),\n+ ),\n )\n \n- s = provider.get_resource_string(manager, path)\n- return basename, lambda: (BytesIO(s), loadtime, len(s))\n+ else:\n+ # Python 2\n+ package_filename = provider.get_filename(package)\n+ is_filesystem = os.path.exists(package_filename)\n+ root = os.path.join(os.path.dirname(package_filename), package_path)\n+\n+ def loader(path):\n+ if path is None:\n+ return None, None\n+\n+ path = safe_join(root, path)\n+ basename = posixpath.basename(path)\n+\n+ if is_filesystem:\n+ if not os.path.isfile(path):\n+ return None, None\n+\n+ return basename, self._opener(path)\n+\n+ try:\n+ data = provider.get_data(path)\n+ except IOError:\n+ return None, None\n+\n+ return basename, lambda: (BytesIO(data), loadtime, len(data))\n \n return loader\n", "issue": "SharedDataMiddleware fails with IsADirectory\nMinimal example:\r\n\r\n```python\r\nfrom flask import Flask\r\nfrom werkzeug.middleware.shared_data import SharedDataMiddleware\r\n\r\napp = Flask(__name__)\r\napp.wsgi_app = SharedDataMiddleware(app.wsgi_app, {'/': ('flask', 'json')})\r\n```\r\n\r\nThis only seems to happen when using `/` with the tuple syntax to reference a package. When using another mapping like `/test/` it works fine.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/adrian/dev/flask-react-example/.venv/lib/python3.7/site-packages/flask/app.py\", line 2328, in __call__\r\n return self.wsgi_app(environ, start_response)\r\n File \"/home/adrian/dev/flask-react-example/.venv/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py\", line 231, in __call__\r\n f, mtime, file_size = file_loader()\r\n File \"/home/adrian/dev/flask-react-example/.venv/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py\", line 132, in <lambda>\r\n open(filename, \"rb\"),\r\nIsADirectoryError: [Errno 21] Is a directory: '/home/adrian/dev/flask-react-example/.venv/lib/python3.7/site-packages/flask/json/'\r\n```\n", "before_files": [{"content": "\"\"\"\nServe Shared Static Files\n=========================\n\n.. autoclass:: SharedDataMiddleware\n :members: is_allowed\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nimport mimetypes\nimport os\nimport posixpath\nfrom datetime import datetime\nfrom io import BytesIO\nfrom time import mktime\nfrom time import time\nfrom zlib import adler32\n\nfrom .._compat import PY2\nfrom .._compat import string_types\nfrom ..filesystem import get_filesystem_encoding\nfrom ..http import http_date\nfrom ..http import is_resource_modified\nfrom ..security import safe_join\nfrom ..wsgi import get_path_info\nfrom ..wsgi import wrap_file\n\n\nclass SharedDataMiddleware(object):\n\n \"\"\"A WSGI middleware that provides static content for development\n environments or simple server setups. Usage is quite simple::\n\n import os\n from werkzeug.wsgi import SharedDataMiddleware\n\n app = SharedDataMiddleware(app, {\n '/static': os.path.join(os.path.dirname(__file__), 'static')\n })\n\n The contents of the folder ``./shared`` will now be available on\n ``http://example.com/shared/``. This is pretty useful during development\n because a standalone media server is not required. One can also mount\n files on the root folder and still continue to use the application because\n the shared data middleware forwards all unhandled requests to the\n application, even if the requests are below one of the shared folders.\n\n If `pkg_resources` is available you can also tell the middleware to serve\n files from package data::\n\n app = SharedDataMiddleware(app, {\n '/static': ('myapplication', 'static')\n })\n\n This will then serve the ``static`` folder in the `myapplication`\n Python package.\n\n The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`\n rules for files that are not accessible from the web. If `cache` is set to\n `False` no caching headers are sent.\n\n Currently the middleware does not support non ASCII filenames. If the\n encoding on the file system happens to be the encoding of the URI it may\n work but this could also be by accident. We strongly suggest using ASCII\n only file names for static files.\n\n The middleware will guess the mimetype using the Python `mimetype`\n module. If it's unable to figure out the charset it will fall back\n to `fallback_mimetype`.\n\n .. versionchanged:: 0.5\n The cache timeout is configurable now.\n\n .. versionadded:: 0.6\n The `fallback_mimetype` parameter was added.\n\n :param app: the application to wrap. If you don't want to wrap an\n application you can pass it :exc:`NotFound`.\n :param exports: a list or dict of exported files and folders.\n :param disallow: a list of :func:`~fnmatch.fnmatch` rules.\n :param fallback_mimetype: the fallback mimetype for unknown files.\n :param cache: enable or disable caching headers.\n :param cache_timeout: the cache timeout in seconds for the headers.\n \"\"\"\n\n def __init__(\n self,\n app,\n exports,\n disallow=None,\n cache=True,\n cache_timeout=60 * 60 * 12,\n fallback_mimetype=\"text/plain\",\n ):\n self.app = app\n self.exports = []\n self.cache = cache\n self.cache_timeout = cache_timeout\n\n if hasattr(exports, \"items\"):\n exports = exports.items()\n\n for key, value in exports:\n if isinstance(value, tuple):\n loader = self.get_package_loader(*value)\n elif isinstance(value, string_types):\n if os.path.isfile(value):\n loader = self.get_file_loader(value)\n else:\n loader = self.get_directory_loader(value)\n else:\n raise TypeError(\"unknown def %r\" % value)\n\n self.exports.append((key, loader))\n\n if disallow is not None:\n from fnmatch import fnmatch\n\n self.is_allowed = lambda x: not fnmatch(x, disallow)\n\n self.fallback_mimetype = fallback_mimetype\n\n def is_allowed(self, filename):\n \"\"\"Subclasses can override this method to disallow the access to\n certain files. However by providing `disallow` in the constructor\n this method is overwritten.\n \"\"\"\n return True\n\n def _opener(self, filename):\n return lambda: (\n open(filename, \"rb\"),\n datetime.utcfromtimestamp(os.path.getmtime(filename)),\n int(os.path.getsize(filename)),\n )\n\n def get_file_loader(self, filename):\n return lambda x: (os.path.basename(filename), self._opener(filename))\n\n def get_package_loader(self, package, package_path):\n from pkg_resources import DefaultProvider, ResourceManager, get_provider\n\n loadtime = datetime.utcnow()\n provider = get_provider(package)\n manager = ResourceManager()\n filesystem_bound = isinstance(provider, DefaultProvider)\n\n def loader(path):\n if path is None:\n return None, None\n\n path = safe_join(package_path, path)\n\n if not provider.has_resource(path):\n return None, None\n\n basename = posixpath.basename(path)\n\n if filesystem_bound:\n return (\n basename,\n self._opener(provider.get_resource_filename(manager, path)),\n )\n\n s = provider.get_resource_string(manager, path)\n return basename, lambda: (BytesIO(s), loadtime, len(s))\n\n return loader\n\n def get_directory_loader(self, directory):\n def loader(path):\n if path is not None:\n path = safe_join(directory, path)\n else:\n path = directory\n\n if os.path.isfile(path):\n return os.path.basename(path), self._opener(path)\n\n return None, None\n\n return loader\n\n def generate_etag(self, mtime, file_size, real_filename):\n if not isinstance(real_filename, bytes):\n real_filename = real_filename.encode(get_filesystem_encoding())\n\n return \"wzsdm-%d-%s-%s\" % (\n mktime(mtime.timetuple()),\n file_size,\n adler32(real_filename) & 0xFFFFFFFF,\n )\n\n def __call__(self, environ, start_response):\n path = get_path_info(environ)\n\n if PY2:\n path = path.encode(get_filesystem_encoding())\n\n file_loader = None\n\n for search_path, loader in self.exports:\n if search_path == path:\n real_filename, file_loader = loader(None)\n\n if file_loader is not None:\n break\n\n if not search_path.endswith(\"/\"):\n search_path += \"/\"\n\n if path.startswith(search_path):\n real_filename, file_loader = loader(path[len(search_path) :])\n\n if file_loader is not None:\n break\n\n if file_loader is None or not self.is_allowed(real_filename):\n return self.app(environ, start_response)\n\n guessed_type = mimetypes.guess_type(real_filename)\n mime_type = guessed_type[0] or self.fallback_mimetype\n f, mtime, file_size = file_loader()\n\n headers = [(\"Date\", http_date())]\n\n if self.cache:\n timeout = self.cache_timeout\n etag = self.generate_etag(mtime, file_size, real_filename)\n headers += [\n (\"Etag\", '\"%s\"' % etag),\n (\"Cache-Control\", \"max-age=%d, public\" % timeout),\n ]\n\n if not is_resource_modified(environ, etag, last_modified=mtime):\n f.close()\n start_response(\"304 Not Modified\", headers)\n return []\n\n headers.append((\"Expires\", http_date(time() + timeout)))\n else:\n headers.append((\"Cache-Control\", \"public\"))\n\n headers.extend(\n (\n (\"Content-Type\", mime_type),\n (\"Content-Length\", str(file_size)),\n (\"Last-Modified\", http_date(mtime)),\n )\n )\n start_response(\"200 OK\", headers)\n return wrap_file(environ, f)\n", "path": "src/werkzeug/middleware/shared_data.py"}]} | 3,251 | 647 |
gh_patches_debug_35552 | rasdani/github-patches | git_diff | pfnet__pytorch-pfn-extras-385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Profiler: Automatically fill `tag` in `record`?
Maybe we can use the caller's function name (`inspect.stack()`) if tag is not given.
TODO: Need to measure overhead.
</issue>
<code>
[start of pytorch_pfn_extras/profiler/_record.py]
1 from contextlib import contextmanager
2 from typing import Any, Callable, Generator, Iterable, Optional, TypeVar
3
4 import torch
5
6 from pytorch_pfn_extras.profiler._time_summary import time_summary, _ReportNotification
7
8
9 @contextmanager
10 def record(
11 tag: str,
12 metric: Optional[str] = None,
13 use_cuda: bool = False,
14 ) -> Generator[_ReportNotification, None, None]:
15 if metric is None:
16 metric = tag
17
18 if use_cuda:
19 torch.cuda.nvtx.range_push(tag) # type: ignore[no-untyped-call]
20 try:
21 with torch.autograd.profiler.record_function(tag):
22 with time_summary.report(metric, use_cuda) as ntf:
23 yield ntf
24 finally:
25 if use_cuda:
26 torch.cuda.nvtx.range_pop() # type: ignore[no-untyped-call]
27
28
29 _T = TypeVar('_T')
30
31
32 def record_function(
33 tag: str,
34 use_cuda: bool = False,
35 ) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
36 def wrapper(f: Callable[..., _T]) -> Callable[..., _T]:
37 def wrapped(*args: Any, **kwargs: Any) -> _T:
38 with record(tag, use_cuda=use_cuda):
39 return f(*args, **kwargs)
40
41 return wrapped
42
43 return wrapper
44
45
46 def record_iterable(
47 tag: str,
48 iter: Iterable[_T],
49 divide_metric: bool = False,
50 use_cuda: bool = False,
51 ) -> Iterable[_T]:
52 def wrapped() -> Iterable[_T]:
53 for i, x in enumerate(iter):
54 name = f"{tag}-{i}"
55 metric = name if divide_metric else tag
56 with record(name, metric, use_cuda=use_cuda):
57 yield x
58
59 return wrapped()
60
[end of pytorch_pfn_extras/profiler/_record.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_pfn_extras/profiler/_record.py b/pytorch_pfn_extras/profiler/_record.py
--- a/pytorch_pfn_extras/profiler/_record.py
+++ b/pytorch_pfn_extras/profiler/_record.py
@@ -1,17 +1,35 @@
from contextlib import contextmanager
+import inspect
from typing import Any, Callable, Generator, Iterable, Optional, TypeVar
+import types
import torch
from pytorch_pfn_extras.profiler._time_summary import time_summary, _ReportNotification
+def _infer_tag_name(frame: Optional[types.FrameType], depth: int) -> str:
+ for _ in range(depth):
+ assert frame is not None
+ frame = frame.f_back
+ assert frame is not None
+ frame_info = inspect.getframeinfo(frame, context=0)
+ return '{}:{}:{}'.format(
+ inspect.getmodulename(frame_info.filename),
+ frame_info.lineno,
+ frame_info.function,
+ )
+
+
@contextmanager
def record(
- tag: str,
+ tag: Optional[str],
metric: Optional[str] = None,
use_cuda: bool = False,
) -> Generator[_ReportNotification, None, None]:
+ if tag is None:
+ tag = _infer_tag_name(inspect.currentframe(), depth=2)
+
if metric is None:
metric = tag
@@ -30,12 +48,12 @@
def record_function(
- tag: str,
+ tag: Optional[str],
use_cuda: bool = False,
) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
def wrapper(f: Callable[..., _T]) -> Callable[..., _T]:
def wrapped(*args: Any, **kwargs: Any) -> _T:
- with record(tag, use_cuda=use_cuda):
+ with record(tag or f.__name__, use_cuda=use_cuda):
return f(*args, **kwargs)
return wrapped
@@ -44,11 +62,14 @@
def record_iterable(
- tag: str,
- iter: Iterable[_T],
- divide_metric: bool = False,
- use_cuda: bool = False,
+ tag: Optional[str],
+ iter: Iterable[_T],
+ divide_metric: bool = False,
+ use_cuda: bool = False,
) -> Iterable[_T]:
+ if tag is None:
+ tag = _infer_tag_name(inspect.currentframe(), depth=1)
+
def wrapped() -> Iterable[_T]:
for i, x in enumerate(iter):
name = f"{tag}-{i}"
| {"golden_diff": "diff --git a/pytorch_pfn_extras/profiler/_record.py b/pytorch_pfn_extras/profiler/_record.py\n--- a/pytorch_pfn_extras/profiler/_record.py\n+++ b/pytorch_pfn_extras/profiler/_record.py\n@@ -1,17 +1,35 @@\n from contextlib import contextmanager\n+import inspect\n from typing import Any, Callable, Generator, Iterable, Optional, TypeVar\n+import types\n \n import torch\n \n from pytorch_pfn_extras.profiler._time_summary import time_summary, _ReportNotification\n \n \n+def _infer_tag_name(frame: Optional[types.FrameType], depth: int) -> str:\n+ for _ in range(depth):\n+ assert frame is not None\n+ frame = frame.f_back\n+ assert frame is not None\n+ frame_info = inspect.getframeinfo(frame, context=0)\n+ return '{}:{}:{}'.format(\n+ inspect.getmodulename(frame_info.filename),\n+ frame_info.lineno,\n+ frame_info.function,\n+ )\n+\n+\n @contextmanager\n def record(\n- tag: str,\n+ tag: Optional[str],\n metric: Optional[str] = None,\n use_cuda: bool = False,\n ) -> Generator[_ReportNotification, None, None]:\n+ if tag is None:\n+ tag = _infer_tag_name(inspect.currentframe(), depth=2)\n+\n if metric is None:\n metric = tag\n \n@@ -30,12 +48,12 @@\n \n \n def record_function(\n- tag: str,\n+ tag: Optional[str],\n use_cuda: bool = False,\n ) -> Callable[[Callable[..., _T]], Callable[..., _T]]:\n def wrapper(f: Callable[..., _T]) -> Callable[..., _T]:\n def wrapped(*args: Any, **kwargs: Any) -> _T:\n- with record(tag, use_cuda=use_cuda):\n+ with record(tag or f.__name__, use_cuda=use_cuda):\n return f(*args, **kwargs)\n \n return wrapped\n@@ -44,11 +62,14 @@\n \n \n def record_iterable(\n- tag: str,\n- iter: Iterable[_T],\n- divide_metric: bool = False,\n- use_cuda: bool = False,\n+ tag: Optional[str],\n+ iter: Iterable[_T],\n+ divide_metric: bool = False,\n+ use_cuda: bool = False,\n ) -> Iterable[_T]:\n+ if tag is None:\n+ tag = _infer_tag_name(inspect.currentframe(), depth=1)\n+\n def wrapped() -> Iterable[_T]:\n for i, x in enumerate(iter):\n name = f\"{tag}-{i}\"\n", "issue": "Profiler: Automatically fill `tag` in `record`?\nMaybe we can use the caller's function name (`inspect.stack()`) if tag is not given.\r\n\r\nTODO: Need to measure overhead.\n", "before_files": [{"content": "from contextlib import contextmanager\nfrom typing import Any, Callable, Generator, Iterable, Optional, TypeVar\n\nimport torch\n\nfrom pytorch_pfn_extras.profiler._time_summary import time_summary, _ReportNotification\n\n\n@contextmanager\ndef record(\n tag: str,\n metric: Optional[str] = None,\n use_cuda: bool = False,\n) -> Generator[_ReportNotification, None, None]:\n if metric is None:\n metric = tag\n\n if use_cuda:\n torch.cuda.nvtx.range_push(tag) # type: ignore[no-untyped-call]\n try:\n with torch.autograd.profiler.record_function(tag):\n with time_summary.report(metric, use_cuda) as ntf:\n yield ntf\n finally:\n if use_cuda:\n torch.cuda.nvtx.range_pop() # type: ignore[no-untyped-call]\n\n\n_T = TypeVar('_T')\n\n\ndef record_function(\n tag: str,\n use_cuda: bool = False,\n) -> Callable[[Callable[..., _T]], Callable[..., _T]]:\n def wrapper(f: Callable[..., _T]) -> Callable[..., _T]:\n def wrapped(*args: Any, **kwargs: Any) -> _T:\n with record(tag, use_cuda=use_cuda):\n return f(*args, **kwargs)\n\n return wrapped\n\n return wrapper\n\n\ndef record_iterable(\n tag: str,\n iter: Iterable[_T],\n divide_metric: bool = False,\n use_cuda: bool = False,\n) -> Iterable[_T]:\n def wrapped() -> Iterable[_T]:\n for i, x in enumerate(iter):\n name = f\"{tag}-{i}\"\n metric = name if divide_metric else tag\n with record(name, metric, use_cuda=use_cuda):\n yield x\n\n return wrapped()\n", "path": "pytorch_pfn_extras/profiler/_record.py"}]} | 1,092 | 585 |
gh_patches_debug_35945 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-980 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Device Support Request] Tuya Smart Zigbee Curtain Switch Module (_TZ3000_mowcrwsa)
**Is your feature request related to a problem? Please describe.**
Device can be paired in ZHA but not working properly in HA.
Manufacturer ID _TZ3000_mowcrwsa is not added as MODELS_INFO in ts130f.py
**Describe the solution you'd like**
Add manufacturer ID of the below device signature to class TuyaZemismartTS130F(CustomDevice): of zha-device-handlers/zhaquirks/tuya/ts130f.py
**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**
{
"node_descriptor": "NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x0202",
"in_clusters": [
"0x0000",
"0x0004",
"0x0005",
"0x0006",
"0x0102"
],
"out_clusters": [
"0x000a",
"0x0019"
]
}
},
"manufacturer": "_TZ3000_mowcrwsa",
"model": "TS130F",
"class": "zigpy.device.Device"
}
**Additional context**
Add any other context or screenshots about the feature request here.
</issue>
<code>
[start of zhaquirks/tuya/ts130f.py]
1 """Device handler for loratap TS130F smart curtain switch."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomCluster, CustomDevice
4 import zigpy.types as t
5 from zigpy.zcl.clusters.closures import WindowCovering
6 from zigpy.zcl.clusters.general import Basic, Groups, OnOff, Ota, Scenes, Time
7
8 from zhaquirks.const import (
9 DEVICE_TYPE,
10 ENDPOINTS,
11 INPUT_CLUSTERS,
12 MODELS_INFO,
13 OUTPUT_CLUSTERS,
14 PROFILE_ID,
15 )
16
17 ATTR_CURRENT_POSITION_LIFT_PERCENTAGE = 0x0008
18 CMD_GO_TO_LIFT_PERCENTAGE = 0x0005
19
20
21 class TuyaWithBacklightOnOffCluster(CustomCluster):
22 """TuyaSmartCurtainOnOffCluster: fire events corresponding to press type."""
23
24 cluster_id = OnOff.cluster_id
25
26 LIGHT_MODE_1 = {0x8001: 0}
27 LIGHT_MODE_2 = {0x8001: 1}
28 LIGHT_MODE_3 = {0x8001: 2}
29
30 attributes = {0x8001: ("backlight_mode", t.enum8)}
31
32
33 class TuyaCoveringCluster(CustomCluster, WindowCovering):
34 """TuyaSmartCurtainWindowCoveringCluster: Allow to setup Window covering tuya devices."""
35
36 attributes = WindowCovering.attributes.copy()
37 attributes.update({0xF000: ("tuya_moving_state", t.enum8)})
38 attributes.update({0xF001: ("calibration", t.enum8)})
39 attributes.update({0xF002: ("motor_reversal", t.enum8)})
40
41 def _update_attribute(self, attrid, value):
42 if attrid == ATTR_CURRENT_POSITION_LIFT_PERCENTAGE:
43 # Invert the percentage value (cf https://github.com/dresden-elektronik/deconz-rest-plugin/issues/3757)
44 value = 100 - value
45 super()._update_attribute(attrid, value)
46
47 async def command(
48 self, command_id, *args, manufacturer=None, expect_reply=True, tsn=None
49 ):
50 """Override default command to invert percent lift value."""
51 if command_id == CMD_GO_TO_LIFT_PERCENTAGE:
52 percent = args[0]
53 # Invert the percentage value (cf https://github.com/dresden-elektronik/deconz-rest-plugin/issues/3757)
54 percent = 100 - percent
55 v = (percent,)
56 return await super().command(command_id, *v)
57 return await super().command(
58 command_id,
59 *args,
60 manufacturer=manufacturer,
61 expect_reply=expect_reply,
62 tsn=tsn
63 )
64
65
66 class TuyaTS130F(CustomDevice):
67 """Tuya smart curtain roller shutter."""
68
69 signature = {
70 # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0, 4, 5, 6, 10, 0x0102], output_clusters=[25]))
71 MODELS_INFO: [
72 ("_TZ3000_8kzqqzu4", "TS130F"),
73 ("_TZ3000_vd43bbfq", "TS130F"),
74 ("_TZ3000_egq7y6pr", "TS130F"),
75 ],
76 ENDPOINTS: {
77 1: {
78 PROFILE_ID: zha.PROFILE_ID,
79 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
80 INPUT_CLUSTERS: [
81 Basic.cluster_id,
82 Groups.cluster_id,
83 Scenes.cluster_id,
84 Time.cluster_id,
85 OnOff.cluster_id,
86 WindowCovering.cluster_id,
87 ],
88 OUTPUT_CLUSTERS: [Ota.cluster_id],
89 },
90 },
91 }
92 replacement = {
93 ENDPOINTS: {
94 1: {
95 PROFILE_ID: zha.PROFILE_ID,
96 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
97 INPUT_CLUSTERS: [
98 Basic.cluster_id,
99 Groups.cluster_id,
100 Scenes.cluster_id,
101 Time.cluster_id,
102 TuyaWithBacklightOnOffCluster,
103 TuyaCoveringCluster,
104 ],
105 OUTPUT_CLUSTERS: [Ota.cluster_id],
106 },
107 },
108 }
109
110
111 class TuyaZemismartTS130F(CustomDevice):
112 """Tuya ZemiSmart smart curtain roller shutter."""
113
114 signature = {
115 # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0x0000, 0x0004, 0x0005, 0x0006, 0x0102], output_clusters=[0x000a, 0x0019]))
116 MODELS_INFO: [("_TZ3000_ltiqubue", "TS130F")],
117 ENDPOINTS: {
118 1: {
119 PROFILE_ID: zha.PROFILE_ID,
120 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
121 INPUT_CLUSTERS: [
122 Basic.cluster_id,
123 Groups.cluster_id,
124 Scenes.cluster_id,
125 OnOff.cluster_id,
126 WindowCovering.cluster_id,
127 ],
128 OUTPUT_CLUSTERS: [
129 Time.cluster_id,
130 Ota.cluster_id,
131 ],
132 },
133 },
134 }
135 replacement = {
136 ENDPOINTS: {
137 1: {
138 PROFILE_ID: zha.PROFILE_ID,
139 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
140 INPUT_CLUSTERS: [
141 Basic.cluster_id,
142 Groups.cluster_id,
143 Scenes.cluster_id,
144 TuyaWithBacklightOnOffCluster,
145 TuyaCoveringCluster,
146 ],
147 OUTPUT_CLUSTERS: [
148 Time.cluster_id,
149 Ota.cluster_id,
150 ],
151 },
152 },
153 }
154
155
156 class TuyaTS130F_Module(CustomDevice):
157 """Tuya smart curtain roller shutter."""
158
159 signature = {
160 # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0, 4, 5, 6, 10, 0x0102], output_clusters=[25]))
161 MODELS_INFO: [("_TZ3000_vd43bbfq", "TS130F")],
162 ENDPOINTS: {
163 1: {
164 PROFILE_ID: zha.PROFILE_ID,
165 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
166 INPUT_CLUSTERS: [
167 Basic.cluster_id,
168 Groups.cluster_id,
169 Scenes.cluster_id,
170 WindowCovering.cluster_id,
171 ],
172 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
173 },
174 },
175 }
176 replacement = {
177 ENDPOINTS: {
178 1: {
179 PROFILE_ID: zha.PROFILE_ID,
180 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
181 INPUT_CLUSTERS: [
182 Basic.cluster_id,
183 Groups.cluster_id,
184 Scenes.cluster_id,
185 TuyaCoveringCluster,
186 ],
187 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
188 },
189 },
190 }
191
[end of zhaquirks/tuya/ts130f.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zhaquirks/tuya/ts130f.py b/zhaquirks/tuya/ts130f.py
--- a/zhaquirks/tuya/ts130f.py
+++ b/zhaquirks/tuya/ts130f.py
@@ -9,7 +9,7 @@
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
- MODELS_INFO,
+ MODEL,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
@@ -63,16 +63,12 @@
)
-class TuyaTS130F(CustomDevice):
- """Tuya smart curtain roller shutter."""
+class TuyaTS130FTI(CustomDevice):
+ """Tuya smart curtain roller shutter Time In."""
signature = {
# SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0, 4, 5, 6, 10, 0x0102], output_clusters=[25]))
- MODELS_INFO: [
- ("_TZ3000_8kzqqzu4", "TS130F"),
- ("_TZ3000_vd43bbfq", "TS130F"),
- ("_TZ3000_egq7y6pr", "TS130F"),
- ],
+ MODEL: "TS130F",
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
@@ -113,7 +109,7 @@
signature = {
# SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0x0000, 0x0004, 0x0005, 0x0006, 0x0102], output_clusters=[0x000a, 0x0019]))
- MODELS_INFO: [("_TZ3000_ltiqubue", "TS130F")],
+ MODEL: "TS130F",
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
@@ -153,12 +149,13 @@
}
-class TuyaTS130F_Module(CustomDevice):
- """Tuya smart curtain roller shutter."""
+class TuyaTS130FTO(CustomDevice):
+ """Tuya smart curtain roller shutter Time Out."""
signature = {
# SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0, 4, 5, 6, 10, 0x0102], output_clusters=[25]))
- MODELS_INFO: [("_TZ3000_vd43bbfq", "TS130F")],
+ # This singnature is not correct is one copy of the first one and the cluster is not inline with the device.
+ MODEL: "TS130F",
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
| {"golden_diff": "diff --git a/zhaquirks/tuya/ts130f.py b/zhaquirks/tuya/ts130f.py\n--- a/zhaquirks/tuya/ts130f.py\n+++ b/zhaquirks/tuya/ts130f.py\n@@ -9,7 +9,7 @@\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n- MODELS_INFO,\n+ MODEL,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n )\n@@ -63,16 +63,12 @@\n )\n \n \n-class TuyaTS130F(CustomDevice):\n- \"\"\"Tuya smart curtain roller shutter.\"\"\"\n+class TuyaTS130FTI(CustomDevice):\n+ \"\"\"Tuya smart curtain roller shutter Time In.\"\"\"\n \n signature = {\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0, 4, 5, 6, 10, 0x0102], output_clusters=[25]))\n- MODELS_INFO: [\n- (\"_TZ3000_8kzqqzu4\", \"TS130F\"),\n- (\"_TZ3000_vd43bbfq\", \"TS130F\"),\n- (\"_TZ3000_egq7y6pr\", \"TS130F\"),\n- ],\n+ MODEL: \"TS130F\",\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n@@ -113,7 +109,7 @@\n \n signature = {\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0x0000, 0x0004, 0x0005, 0x0006, 0x0102], output_clusters=[0x000a, 0x0019]))\n- MODELS_INFO: [(\"_TZ3000_ltiqubue\", \"TS130F\")],\n+ MODEL: \"TS130F\",\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n@@ -153,12 +149,13 @@\n }\n \n \n-class TuyaTS130F_Module(CustomDevice):\n- \"\"\"Tuya smart curtain roller shutter.\"\"\"\n+class TuyaTS130FTO(CustomDevice):\n+ \"\"\"Tuya smart curtain roller shutter Time Out.\"\"\"\n \n signature = {\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0, 4, 5, 6, 10, 0x0102], output_clusters=[25]))\n- MODELS_INFO: [(\"_TZ3000_vd43bbfq\", \"TS130F\")],\n+ # This singnature is not correct is one copy of the first one and the cluster is not inline with the device.\n+ MODEL: \"TS130F\",\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n", "issue": "[Device Support Request] Tuya Smart Zigbee Curtain Switch Module (_TZ3000_mowcrwsa)\n**Is your feature request related to a problem? Please describe.**\r\nDevice can be paired in ZHA but not working properly in HA.\r\nManufacturer ID _TZ3000_mowcrwsa is not added as MODELS_INFO in ts130f.py\r\n\r\n**Describe the solution you'd like**\r\nAdd manufacturer ID of the below device signature to class TuyaZemismartTS130F(CustomDevice): of zha-device-handlers/zhaquirks/tuya/ts130f.py\r\n\r\n**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**\r\n{\r\n \"node_descriptor\": \"NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.0: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0202\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0004\",\r\n \"0x0005\",\r\n \"0x0006\",\r\n \"0x0102\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x000a\",\r\n \"0x0019\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \"_TZ3000_mowcrwsa\",\r\n \"model\": \"TS130F\",\r\n \"class\": \"zigpy.device.Device\"\r\n}\r\n\r\n**Additional context**\r\nAdd any other context or screenshots about the feature request here.\r\n\n", "before_files": [{"content": "\"\"\"Device handler for loratap TS130F smart curtain switch.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nimport zigpy.types as t\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import Basic, Groups, OnOff, Ota, Scenes, Time\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nATTR_CURRENT_POSITION_LIFT_PERCENTAGE = 0x0008\nCMD_GO_TO_LIFT_PERCENTAGE = 0x0005\n\n\nclass TuyaWithBacklightOnOffCluster(CustomCluster):\n \"\"\"TuyaSmartCurtainOnOffCluster: fire events corresponding to press type.\"\"\"\n\n cluster_id = OnOff.cluster_id\n\n LIGHT_MODE_1 = {0x8001: 0}\n LIGHT_MODE_2 = {0x8001: 1}\n LIGHT_MODE_3 = {0x8001: 2}\n\n attributes = {0x8001: (\"backlight_mode\", t.enum8)}\n\n\nclass TuyaCoveringCluster(CustomCluster, WindowCovering):\n \"\"\"TuyaSmartCurtainWindowCoveringCluster: Allow to setup Window covering tuya devices.\"\"\"\n\n attributes = WindowCovering.attributes.copy()\n attributes.update({0xF000: (\"tuya_moving_state\", t.enum8)})\n attributes.update({0xF001: (\"calibration\", t.enum8)})\n attributes.update({0xF002: (\"motor_reversal\", t.enum8)})\n\n def _update_attribute(self, attrid, value):\n if attrid == ATTR_CURRENT_POSITION_LIFT_PERCENTAGE:\n # Invert the percentage value (cf https://github.com/dresden-elektronik/deconz-rest-plugin/issues/3757)\n value = 100 - value\n super()._update_attribute(attrid, value)\n\n async def command(\n self, command_id, *args, manufacturer=None, expect_reply=True, tsn=None\n ):\n \"\"\"Override default command to invert percent lift value.\"\"\"\n if command_id == CMD_GO_TO_LIFT_PERCENTAGE:\n percent = args[0]\n # Invert the percentage value (cf https://github.com/dresden-elektronik/deconz-rest-plugin/issues/3757)\n percent = 100 - percent\n v = (percent,)\n return await super().command(command_id, *v)\n return await super().command(\n command_id,\n *args,\n manufacturer=manufacturer,\n expect_reply=expect_reply,\n tsn=tsn\n )\n\n\nclass TuyaTS130F(CustomDevice):\n \"\"\"Tuya smart curtain roller shutter.\"\"\"\n\n signature = {\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0, 4, 5, 6, 10, 0x0102], output_clusters=[25]))\n MODELS_INFO: [\n (\"_TZ3000_8kzqqzu4\", \"TS130F\"),\n (\"_TZ3000_vd43bbfq\", \"TS130F\"),\n (\"_TZ3000_egq7y6pr\", \"TS130F\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n Time.cluster_id,\n OnOff.cluster_id,\n WindowCovering.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n },\n }\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n Time.cluster_id,\n TuyaWithBacklightOnOffCluster,\n TuyaCoveringCluster,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n },\n }\n\n\nclass TuyaZemismartTS130F(CustomDevice):\n \"\"\"Tuya ZemiSmart smart curtain roller shutter.\"\"\"\n\n signature = {\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0x0000, 0x0004, 0x0005, 0x0006, 0x0102], output_clusters=[0x000a, 0x0019]))\n MODELS_INFO: [(\"_TZ3000_ltiqubue\", \"TS130F\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n WindowCovering.cluster_id,\n ],\n OUTPUT_CLUSTERS: [\n Time.cluster_id,\n Ota.cluster_id,\n ],\n },\n },\n }\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaWithBacklightOnOffCluster,\n TuyaCoveringCluster,\n ],\n OUTPUT_CLUSTERS: [\n Time.cluster_id,\n Ota.cluster_id,\n ],\n },\n },\n }\n\n\nclass TuyaTS130F_Module(CustomDevice):\n \"\"\"Tuya smart curtain roller shutter.\"\"\"\n\n signature = {\n # SizePrefixedSimpleDescriptor(endpoint=1, profile=260, device_type=0x0202, device_version=1, input_clusters=[0, 4, 5, 6, 10, 0x0102], output_clusters=[25]))\n MODELS_INFO: [(\"_TZ3000_vd43bbfq\", \"TS130F\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n WindowCovering.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n },\n }\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n TuyaCoveringCluster,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n },\n }\n", "path": "zhaquirks/tuya/ts130f.py"}]} | 3,201 | 741 |
gh_patches_debug_35093 | rasdani/github-patches | git_diff | hydroshare__hydroshare-4819 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rewrite author order test
**Description of the bug**
This test fails occasionally. Rewrite it removing 2 assertions:
[https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_c[…]ore/tests/api/native/test_reorder_authors_management_command.py](https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/tests/api/native/test_reorder_authors_management_command.py#L180)
[https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_c[…]ore/tests/api/native/test_reorder_authors_management_command.py](https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/tests/api/native/test_reorder_authors_management_command.py#L152)
Also: rewrite this management command so that it takes a res ID as a param:
https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/management/commands/reorder_authors.py#L24
Steps to reproduce the bug:
http://ci.hydroshare.org:8080/job/hydroshare-pull-requests/5750/testReport/junit/hs_core.tests.api.native.test_reorder_authors_management_command/TestReorderAuthorsCommand/test_command_fixes_triplicate_authors/
**Expected behavior**
Test should not be dependent on django .get() order
</issue>
<code>
[start of hs_core/management/commands/reorder_authors.py]
1 # -*- coding: utf-8 -*-
2
3 """
4 Fix duplicate author "order" values
5
6 Related to https://github.com/hydroshare/hydroshare/issues/4695
7 """
8
9 from django.core.management.base import BaseCommand
10 from hs_core.models import BaseResource
11 from hs_core.hydroshare.utils import set_dirty_bag_flag
12
13
14 class Command(BaseCommand):
15 help = "Fix duplicate author 'order' values"
16
17 def handle(self, *args, **options):
18 resources = BaseResource.objects.filter(raccess__published=False).only('object_id', 'short_id')
19 for res in resources:
20 if res.metadata is not None:
21 creators = res.metadata.creators.all()
22 is_dirty = False
23 for index, creator in enumerate(creators, start=1):
24 if creator.order != index:
25 print("*" * 100)
26 print(f"Author out of order.\nR:{res.short_id}"
27 f"\nExpected: {index}, got: {creator.order}")
28 creator.order = index
29 creator.save()
30 is_dirty = True
31 if is_dirty:
32 set_dirty_bag_flag(res)
33
[end of hs_core/management/commands/reorder_authors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hs_core/management/commands/reorder_authors.py b/hs_core/management/commands/reorder_authors.py
--- a/hs_core/management/commands/reorder_authors.py
+++ b/hs_core/management/commands/reorder_authors.py
@@ -6,7 +6,7 @@
Related to https://github.com/hydroshare/hydroshare/issues/4695
"""
-from django.core.management.base import BaseCommand
+from django.core.management.base import BaseCommand, CommandError
from hs_core.models import BaseResource
from hs_core.hydroshare.utils import set_dirty_bag_flag
@@ -14,19 +14,30 @@
class Command(BaseCommand):
help = "Fix duplicate author 'order' values"
+ def add_arguments(self, parser):
+ # ID of a resource for which users should be re-ordered
+ parser.add_argument('--resource_id', type=str, help=('Required. The id (short_id) of'
+ ' the resource'))
+
def handle(self, *args, **options):
- resources = BaseResource.objects.filter(raccess__published=False).only('object_id', 'short_id')
- for res in resources:
- if res.metadata is not None:
- creators = res.metadata.creators.all()
- is_dirty = False
- for index, creator in enumerate(creators, start=1):
- if creator.order != index:
- print("*" * 100)
- print(f"Author out of order.\nR:{res.short_id}"
- f"\nExpected: {index}, got: {creator.order}")
- creator.order = index
- creator.save()
- is_dirty = True
- if is_dirty:
- set_dirty_bag_flag(res)
+ if not options['resource_id']:
+ raise CommandError('resource_id argument is required')
+ res_id = options['resource_id']
+ res = BaseResource.objects.filter(short_id=res_id).first()
+ if not res:
+ raise CommandError('No resource found for the provided resource_id')
+ if res.raccess.published:
+ raise CommandError(f"Resource id: {res_id} is already published--can't update author order.")
+ if res.metadata is not None:
+ creators = res.metadata.creators.all()
+ is_dirty = False
+ for index, creator in enumerate(creators, start=1):
+ if creator.order != index:
+ print("*" * 100)
+ print(f"Author out of order.\nR:{res.short_id}"
+ f"\nExpected: {index}, got: {creator.order}")
+ creator.order = index
+ creator.save()
+ is_dirty = True
+ if is_dirty:
+ set_dirty_bag_flag(res)
| {"golden_diff": "diff --git a/hs_core/management/commands/reorder_authors.py b/hs_core/management/commands/reorder_authors.py\n--- a/hs_core/management/commands/reorder_authors.py\n+++ b/hs_core/management/commands/reorder_authors.py\n@@ -6,7 +6,7 @@\n Related to https://github.com/hydroshare/hydroshare/issues/4695\n \"\"\"\n \n-from django.core.management.base import BaseCommand\n+from django.core.management.base import BaseCommand, CommandError\n from hs_core.models import BaseResource\n from hs_core.hydroshare.utils import set_dirty_bag_flag\n \n@@ -14,19 +14,30 @@\n class Command(BaseCommand):\n help = \"Fix duplicate author 'order' values\"\n \n+ def add_arguments(self, parser):\n+ # ID of a resource for which users should be re-ordered\n+ parser.add_argument('--resource_id', type=str, help=('Required. The id (short_id) of'\n+ ' the resource'))\n+\n def handle(self, *args, **options):\n- resources = BaseResource.objects.filter(raccess__published=False).only('object_id', 'short_id')\n- for res in resources:\n- if res.metadata is not None:\n- creators = res.metadata.creators.all()\n- is_dirty = False\n- for index, creator in enumerate(creators, start=1):\n- if creator.order != index:\n- print(\"*\" * 100)\n- print(f\"Author out of order.\\nR:{res.short_id}\"\n- f\"\\nExpected: {index}, got: {creator.order}\")\n- creator.order = index\n- creator.save()\n- is_dirty = True\n- if is_dirty:\n- set_dirty_bag_flag(res)\n+ if not options['resource_id']:\n+ raise CommandError('resource_id argument is required')\n+ res_id = options['resource_id']\n+ res = BaseResource.objects.filter(short_id=res_id).first()\n+ if not res:\n+ raise CommandError('No resource found for the provided resource_id')\n+ if res.raccess.published:\n+ raise CommandError(f\"Resource id: {res_id} is already published--can't update author order.\")\n+ if res.metadata is not None:\n+ creators = res.metadata.creators.all()\n+ is_dirty = False\n+ for index, creator in enumerate(creators, start=1):\n+ if creator.order != index:\n+ print(\"*\" * 100)\n+ print(f\"Author out of order.\\nR:{res.short_id}\"\n+ f\"\\nExpected: {index}, got: {creator.order}\")\n+ creator.order = index\n+ creator.save()\n+ is_dirty = True\n+ if is_dirty:\n+ set_dirty_bag_flag(res)\n", "issue": "rewrite author order test\n**Description of the bug**\r\nThis test fails occasionally. Rewrite it removing 2 assertions:\r\n[https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_c[\u2026]ore/tests/api/native/test_reorder_authors_management_command.py](https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/tests/api/native/test_reorder_authors_management_command.py#L180)\r\n\r\n[https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_c[\u2026]ore/tests/api/native/test_reorder_authors_management_command.py](https://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/tests/api/native/test_reorder_authors_management_command.py#L152)\r\n\r\nAlso: rewrite this management command so that it takes a res ID as a param:\r\nhttps://github.com/hydroshare/hydroshare/blob/4372-communities-and-groups-2.0/hs_core/management/commands/reorder_authors.py#L24\r\n\r\nSteps to reproduce the bug:\r\nhttp://ci.hydroshare.org:8080/job/hydroshare-pull-requests/5750/testReport/junit/hs_core.tests.api.native.test_reorder_authors_management_command/TestReorderAuthorsCommand/test_command_fixes_triplicate_authors/\r\n\r\n**Expected behavior**\r\nTest should not be dependent on django .get() order\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nFix duplicate author \"order\" values\n\nRelated to https://github.com/hydroshare/hydroshare/issues/4695\n\"\"\"\n\nfrom django.core.management.base import BaseCommand\nfrom hs_core.models import BaseResource\nfrom hs_core.hydroshare.utils import set_dirty_bag_flag\n\n\nclass Command(BaseCommand):\n help = \"Fix duplicate author 'order' values\"\n\n def handle(self, *args, **options):\n resources = BaseResource.objects.filter(raccess__published=False).only('object_id', 'short_id')\n for res in resources:\n if res.metadata is not None:\n creators = res.metadata.creators.all()\n is_dirty = False\n for index, creator in enumerate(creators, start=1):\n if creator.order != index:\n print(\"*\" * 100)\n print(f\"Author out of order.\\nR:{res.short_id}\"\n f\"\\nExpected: {index}, got: {creator.order}\")\n creator.order = index\n creator.save()\n is_dirty = True\n if is_dirty:\n set_dirty_bag_flag(res)\n", "path": "hs_core/management/commands/reorder_authors.py"}]} | 1,181 | 618 |
gh_patches_debug_3501 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-1068 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mail controlpanel: doesn't keep password field
Saving the mail settings in the controlpanel doesn't keep the password field value, as it is obviously never shown in ESMTP password.
Steps to reproduce:
1. Fill in ESMTP username and ESMTP password. Save settings. They are correctly stored.
2. Apply save settings again. ESMTP password is incorrectly stored as None.
</issue>
<code>
[start of Products/CMFPlone/controlpanel/browser/mail.py]
1 from Products.CMFCore.utils import getToolByName
2 from Products.CMFPlone import PloneMessageFactory as _
3 from Products.CMFPlone.interfaces.controlpanel import IMailSchema
4 from Products.MailHost.MailHost import MailHostError
5 from Products.statusmessages.interfaces import IStatusMessage
6 from logging import getLogger
7 from plone.app.registry.browser import controlpanel
8 from plone.registry.interfaces import IRegistry
9 from z3c.form import button
10 from zope.component import getUtility
11
12 import smtplib
13 import socket
14 import sys
15
16 log = getLogger('Plone')
17
18
19 class MailControlPanelForm(controlpanel.RegistryEditForm):
20
21 id = "MailControlPanel"
22 label = _(u"Mail Settings")
23 schema = IMailSchema
24 schema_prefix = "plone"
25
26 @button.buttonAndHandler(_('Save'), name=None)
27 def handleSave(self, action):
28 self.save()
29
30 @button.buttonAndHandler(_('Cancel'), name='cancel')
31 def handleCancel(self, action):
32 super(MailControlPanelForm, self).handleCancel(self, action)
33
34 def save(self):
35 data, errors = self.extractData()
36 if errors:
37 self.status = self.formErrorsMessage
38 return False
39 self.applyChanges(data)
40 return True
41
42 @button.buttonAndHandler(
43 _('label_smtp_test', default='Save and send test e-mail'),
44 name='test')
45 def handle_test_action(self, action):
46 # Save data first
47 if not self.save():
48 return
49 mailhost = getToolByName(self.context, 'MailHost')
50
51 registry = getUtility(IRegistry)
52 mail_settings = registry.forInterface(IMailSchema, prefix='plone')
53 fromaddr = mail_settings.email_from_address
54 fromname = mail_settings.email_from_name
55
56 message = ("Hi,\n\nThis is a test message sent from the Plone "
57 "'Mail settings' control panel. Your receipt of this "
58 "message (at the address specified in the Site 'From' "
59 "address field) indicates that your e-mail server is "
60 "working!\n\n"
61 "Have a nice day.\n\n"
62 "Love,\n\nPlone")
63 email_charset = mail_settings.email_charset
64 subject = "Test e-mail from Plone"
65
66 # Make the timeout incredibly short. This is enough time for most mail
67 # servers, wherever they may be in the world, to respond to the
68 # connection request. Make sure we save the current value
69 # and restore it afterward.
70 timeout = socket.getdefaulttimeout()
71 try:
72 socket.setdefaulttimeout(3)
73 try:
74 mailhost.send(message,
75 mto=fromaddr,
76 mfrom=fromaddr,
77 subject=subject,
78 charset=email_charset,
79 immediate=True)
80
81 except (socket.error, MailHostError, smtplib.SMTPException):
82 # Connection refused or timeout.
83 log.exception('Unable to send test e-mail.')
84 value = sys.exc_info()[1]
85 msg = _(u'Unable to send test e-mail ${error}.',
86 mapping={'error': unicode(value)})
87 IStatusMessage(self.request).addStatusMessage(
88 msg, type='error')
89 else:
90 IStatusMessage(self.request).addStatusMessage(
91 _(u'Success! Check your mailbox for the test message.'),
92 type='info')
93 finally:
94 # Restore timeout to default value
95 socket.setdefaulttimeout(timeout)
96
97
98 class MailControlPanel(controlpanel.ControlPanelFormWrapper):
99 form = MailControlPanelForm
100
[end of Products/CMFPlone/controlpanel/browser/mail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Products/CMFPlone/controlpanel/browser/mail.py b/Products/CMFPlone/controlpanel/browser/mail.py
--- a/Products/CMFPlone/controlpanel/browser/mail.py
+++ b/Products/CMFPlone/controlpanel/browser/mail.py
@@ -36,6 +36,10 @@
if errors:
self.status = self.formErrorsMessage
return False
+ #keep password field
+ if (data.get('smtp_userid') is not None
+ and data.get('smtp_pass') is None):
+ del data['smtp_pass']
self.applyChanges(data)
return True
| {"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/mail.py b/Products/CMFPlone/controlpanel/browser/mail.py\n--- a/Products/CMFPlone/controlpanel/browser/mail.py\n+++ b/Products/CMFPlone/controlpanel/browser/mail.py\n@@ -36,6 +36,10 @@\n if errors:\n self.status = self.formErrorsMessage\n return False\n+ #keep password field\n+ if (data.get('smtp_userid') is not None\n+ and data.get('smtp_pass') is None):\n+ del data['smtp_pass']\n self.applyChanges(data)\n return True\n", "issue": "mail controlpanel: doesn't keep password field\nSaving the mail settings in the controlpanel doesn't keep the password field value, as it is obviously never shown in ESMTP password.\n\nSteps to reproduce:\n1. Fill in ESMTP username and ESMTP password. Save settings. They are correctly stored.\n2. Apply save settings again. ESMTP password is incorrectly stored as None.\n\n", "before_files": [{"content": "from Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces.controlpanel import IMailSchema\nfrom Products.MailHost.MailHost import MailHostError\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom logging import getLogger\nfrom plone.app.registry.browser import controlpanel\nfrom plone.registry.interfaces import IRegistry\nfrom z3c.form import button\nfrom zope.component import getUtility\n\nimport smtplib\nimport socket\nimport sys\n\nlog = getLogger('Plone')\n\n\nclass MailControlPanelForm(controlpanel.RegistryEditForm):\n\n id = \"MailControlPanel\"\n label = _(u\"Mail Settings\")\n schema = IMailSchema\n schema_prefix = \"plone\"\n\n @button.buttonAndHandler(_('Save'), name=None)\n def handleSave(self, action):\n self.save()\n\n @button.buttonAndHandler(_('Cancel'), name='cancel')\n def handleCancel(self, action):\n super(MailControlPanelForm, self).handleCancel(self, action)\n\n def save(self):\n data, errors = self.extractData()\n if errors:\n self.status = self.formErrorsMessage\n return False\n self.applyChanges(data)\n return True\n\n @button.buttonAndHandler(\n _('label_smtp_test', default='Save and send test e-mail'),\n name='test')\n def handle_test_action(self, action):\n # Save data first\n if not self.save():\n return\n mailhost = getToolByName(self.context, 'MailHost')\n\n registry = getUtility(IRegistry)\n mail_settings = registry.forInterface(IMailSchema, prefix='plone')\n fromaddr = mail_settings.email_from_address\n fromname = mail_settings.email_from_name\n\n message = (\"Hi,\\n\\nThis is a test message sent from the Plone \"\n \"'Mail settings' control panel. Your receipt of this \"\n \"message (at the address specified in the Site 'From' \"\n \"address field) indicates that your e-mail server is \"\n \"working!\\n\\n\"\n \"Have a nice day.\\n\\n\"\n \"Love,\\n\\nPlone\")\n email_charset = mail_settings.email_charset\n subject = \"Test e-mail from Plone\"\n\n # Make the timeout incredibly short. This is enough time for most mail\n # servers, wherever they may be in the world, to respond to the\n # connection request. Make sure we save the current value\n # and restore it afterward.\n timeout = socket.getdefaulttimeout()\n try:\n socket.setdefaulttimeout(3)\n try:\n mailhost.send(message,\n mto=fromaddr,\n mfrom=fromaddr,\n subject=subject,\n charset=email_charset,\n immediate=True)\n\n except (socket.error, MailHostError, smtplib.SMTPException):\n # Connection refused or timeout.\n log.exception('Unable to send test e-mail.')\n value = sys.exc_info()[1]\n msg = _(u'Unable to send test e-mail ${error}.',\n mapping={'error': unicode(value)})\n IStatusMessage(self.request).addStatusMessage(\n msg, type='error')\n else:\n IStatusMessage(self.request).addStatusMessage(\n _(u'Success! Check your mailbox for the test message.'),\n type='info')\n finally:\n # Restore timeout to default value\n socket.setdefaulttimeout(timeout)\n\n\nclass MailControlPanel(controlpanel.ControlPanelFormWrapper):\n form = MailControlPanelForm\n", "path": "Products/CMFPlone/controlpanel/browser/mail.py"}]} | 1,570 | 141 |
gh_patches_debug_32239 | rasdani/github-patches | git_diff | Textualize__textual-2112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`)` cannot appear as part of the parameter passed to an action
Reproduction:
```python
from textual.app import App
class ActionBugApp(App):
BINDINGS = [("a", "test(')')", "Test")]
def action_test(self, _: str) -> None:
pass
if __name__ == '__main__':
app = ActionBugApp()
app.run()
```
Omitting the full stack trace (since it's fairly easy to reproduce), the key error message is:
```
ActionError: unable to parse "(')" in action "test(')')"
```
Seems that [this regex](https://github.com/Textualize/textual/blob/2a6368754a8b3a11f1772b52298b5d3b50ceebaa/src/textual/actions.py#L20) is not general enough.
</issue>
<code>
[start of src/textual/actions.py]
1 from __future__ import annotations
2
3 import ast
4 import re
5
6 from typing_extensions import Any, TypeAlias
7
8 ActionParseResult: TypeAlias = "tuple[str, tuple[Any, ...]]"
9 """An action is its name and the arbitrary tuple of its parameters."""
10
11
12 class SkipAction(Exception):
13 """Raise in an action to skip the action (and allow any parent bindings to run)."""
14
15
16 class ActionError(Exception):
17 pass
18
19
20 re_action_params = re.compile(r"([\w\.]+)(\(.*?\))")
21
22
23 def parse(action: str) -> ActionParseResult:
24 """Parses an action string.
25
26 Args:
27 action: String containing action.
28
29 Raises:
30 ActionError: If the action has invalid syntax.
31
32 Returns:
33 Action name and parameters
34 """
35 params_match = re_action_params.match(action)
36 if params_match is not None:
37 action_name, action_params_str = params_match.groups()
38 try:
39 action_params = ast.literal_eval(action_params_str)
40 except Exception:
41 raise ActionError(
42 f"unable to parse {action_params_str!r} in action {action!r}"
43 )
44 else:
45 action_name = action
46 action_params = ()
47
48 return (
49 action_name,
50 action_params if isinstance(action_params, tuple) else (action_params,),
51 )
52
[end of src/textual/actions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/actions.py b/src/textual/actions.py
--- a/src/textual/actions.py
+++ b/src/textual/actions.py
@@ -6,7 +6,7 @@
from typing_extensions import Any, TypeAlias
ActionParseResult: TypeAlias = "tuple[str, tuple[Any, ...]]"
-"""An action is its name and the arbitrary tuple of its parameters."""
+"""An action is its name and the arbitrary tuple of its arguments."""
class SkipAction(Exception):
@@ -17,7 +17,7 @@
pass
-re_action_params = re.compile(r"([\w\.]+)(\(.*?\))")
+re_action_args = re.compile(r"([\w\.]+)\((.*)\)")
def parse(action: str) -> ActionParseResult:
@@ -30,22 +30,25 @@
ActionError: If the action has invalid syntax.
Returns:
- Action name and parameters
+ Action name and arguments.
"""
- params_match = re_action_params.match(action)
- if params_match is not None:
- action_name, action_params_str = params_match.groups()
- try:
- action_params = ast.literal_eval(action_params_str)
- except Exception:
- raise ActionError(
- f"unable to parse {action_params_str!r} in action {action!r}"
- )
+ args_match = re_action_args.match(action)
+ if args_match is not None:
+ action_name, action_args_str = args_match.groups()
+ if action_args_str:
+ try:
+ # We wrap `action_args_str` to be able to disambiguate the cases where
+ # the list of arguments is a comma-separated list of values from the
+ # case where the argument is a single tuple.
+ action_args: tuple[Any, ...] = ast.literal_eval(f"({action_args_str},)")
+ except Exception:
+ raise ActionError(
+ f"unable to parse {action_args_str!r} in action {action!r}"
+ )
+ else:
+ action_args = ()
else:
action_name = action
- action_params = ()
+ action_args = ()
- return (
- action_name,
- action_params if isinstance(action_params, tuple) else (action_params,),
- )
+ return action_name, action_args
| {"golden_diff": "diff --git a/src/textual/actions.py b/src/textual/actions.py\n--- a/src/textual/actions.py\n+++ b/src/textual/actions.py\n@@ -6,7 +6,7 @@\n from typing_extensions import Any, TypeAlias\n \n ActionParseResult: TypeAlias = \"tuple[str, tuple[Any, ...]]\"\n-\"\"\"An action is its name and the arbitrary tuple of its parameters.\"\"\"\n+\"\"\"An action is its name and the arbitrary tuple of its arguments.\"\"\"\n \n \n class SkipAction(Exception):\n@@ -17,7 +17,7 @@\n pass\n \n \n-re_action_params = re.compile(r\"([\\w\\.]+)(\\(.*?\\))\")\n+re_action_args = re.compile(r\"([\\w\\.]+)\\((.*)\\)\")\n \n \n def parse(action: str) -> ActionParseResult:\n@@ -30,22 +30,25 @@\n ActionError: If the action has invalid syntax.\n \n Returns:\n- Action name and parameters\n+ Action name and arguments.\n \"\"\"\n- params_match = re_action_params.match(action)\n- if params_match is not None:\n- action_name, action_params_str = params_match.groups()\n- try:\n- action_params = ast.literal_eval(action_params_str)\n- except Exception:\n- raise ActionError(\n- f\"unable to parse {action_params_str!r} in action {action!r}\"\n- )\n+ args_match = re_action_args.match(action)\n+ if args_match is not None:\n+ action_name, action_args_str = args_match.groups()\n+ if action_args_str:\n+ try:\n+ # We wrap `action_args_str` to be able to disambiguate the cases where\n+ # the list of arguments is a comma-separated list of values from the\n+ # case where the argument is a single tuple.\n+ action_args: tuple[Any, ...] = ast.literal_eval(f\"({action_args_str},)\")\n+ except Exception:\n+ raise ActionError(\n+ f\"unable to parse {action_args_str!r} in action {action!r}\"\n+ )\n+ else:\n+ action_args = ()\n else:\n action_name = action\n- action_params = ()\n+ action_args = ()\n \n- return (\n- action_name,\n- action_params if isinstance(action_params, tuple) else (action_params,),\n- )\n+ return action_name, action_args\n", "issue": "`)` cannot appear as part of the parameter passed to an action\nReproduction:\r\n\r\n```python\r\nfrom textual.app import App\r\n\r\n\r\nclass ActionBugApp(App):\r\n BINDINGS = [(\"a\", \"test(')')\", \"Test\")]\r\n\r\n def action_test(self, _: str) -> None:\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n app = ActionBugApp()\r\n app.run()\r\n```\r\n\r\nOmitting the full stack trace (since it's fairly easy to reproduce), the key error message is:\r\n\r\n```\r\nActionError: unable to parse \"(')\" in action \"test(')')\"\r\n```\r\n\r\nSeems that [this regex](https://github.com/Textualize/textual/blob/2a6368754a8b3a11f1772b52298b5d3b50ceebaa/src/textual/actions.py#L20) is not general enough.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport ast\nimport re\n\nfrom typing_extensions import Any, TypeAlias\n\nActionParseResult: TypeAlias = \"tuple[str, tuple[Any, ...]]\"\n\"\"\"An action is its name and the arbitrary tuple of its parameters.\"\"\"\n\n\nclass SkipAction(Exception):\n \"\"\"Raise in an action to skip the action (and allow any parent bindings to run).\"\"\"\n\n\nclass ActionError(Exception):\n pass\n\n\nre_action_params = re.compile(r\"([\\w\\.]+)(\\(.*?\\))\")\n\n\ndef parse(action: str) -> ActionParseResult:\n \"\"\"Parses an action string.\n\n Args:\n action: String containing action.\n\n Raises:\n ActionError: If the action has invalid syntax.\n\n Returns:\n Action name and parameters\n \"\"\"\n params_match = re_action_params.match(action)\n if params_match is not None:\n action_name, action_params_str = params_match.groups()\n try:\n action_params = ast.literal_eval(action_params_str)\n except Exception:\n raise ActionError(\n f\"unable to parse {action_params_str!r} in action {action!r}\"\n )\n else:\n action_name = action\n action_params = ()\n\n return (\n action_name,\n action_params if isinstance(action_params, tuple) else (action_params,),\n )\n", "path": "src/textual/actions.py"}]} | 1,115 | 518 |
gh_patches_debug_7276 | rasdani/github-patches | git_diff | pyodide__pyodide-3013 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Relative URLs in pyodide.loadPackage
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
The documentation states that [pyodide.loadPackage](https://pyodide.org/en/stable/usage/api/js-api.html#pyodide.loadPackage) supports relative URLs. I'm trying to load an out-of-tree wheel from my local webserver, but this doesn't seem to work out well.
### To Reproduce
<!-- Minimal code example to reproduce the bug. -->
```js
await pyodide.loadPackage("dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl");
```
or
```js
await pyodide.loadPackage("./dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl");
```
Pyodide tries to load the wheel from `https://cdn.jsdelivr.net/pyodide/v0.21.1/full/dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl`.
### Expected behavior
<!-- FILL IN -->
Load the wheel from the relative URL.
### Environment
- Pyodide Version<!-- (e.g. 1.8.1) -->: 0.21.1
- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: Firefox ESR 91.12.0, Chromium 104.0.5112.101
- Any other relevant information:
<!-- If you are building Pyodide by yourself, please also include these information: -->
<!--
- Commit hash of Pyodide git repository:
- Build environment<!--(e.g. Ubuntu 18.04, pyodide/pyodide-env:19 docker)- ->:
-->
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of packages/micropip/src/micropip/_compat_in_pyodide.py]
1 from io import BytesIO
2 from typing import IO
3 from urllib.parse import urlparse
4
5 from pyodide._core import IN_BROWSER
6 from pyodide.http import pyfetch
7
8 try:
9 import pyodide_js
10 from pyodide_js import loadedPackages, loadPackage
11 from pyodide_js._api import loadBinaryFile, loadDynlib # type: ignore[import]
12
13 REPODATA_PACKAGES = pyodide_js._api.repodata_packages.to_py()
14 REPODATA_INFO = pyodide_js._api.repodata_info.to_py()
15 except ImportError:
16 if IN_BROWSER:
17 raise
18 # Otherwise, this is pytest test collection so let it go.
19
20
21 async def fetch_bytes(url: str, kwargs: dict[str, str]) -> IO[bytes]:
22 parsed_url = urlparse(url)
23 if parsed_url.scheme == "emfs":
24 return open(parsed_url.path, "rb")
25 if parsed_url.scheme == "file":
26 result_bytes = (await loadBinaryFile("", parsed_url.path)).to_bytes()
27 else:
28 result_bytes = await (await pyfetch(url, **kwargs)).bytes()
29 return BytesIO(result_bytes)
30
31
32 async def fetch_string(url: str, kwargs: dict[str, str]) -> str:
33 return await (await pyfetch(url, **kwargs)).string()
34
35
36 __all__ = [
37 "fetch_bytes",
38 "fetch_string",
39 "REPODATA_INFO",
40 "REPODATA_PACKAGES",
41 "loadedPackages",
42 "loadDynlib",
43 "loadPackage",
44 ]
45
[end of packages/micropip/src/micropip/_compat_in_pyodide.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/micropip/src/micropip/_compat_in_pyodide.py b/packages/micropip/src/micropip/_compat_in_pyodide.py
--- a/packages/micropip/src/micropip/_compat_in_pyodide.py
+++ b/packages/micropip/src/micropip/_compat_in_pyodide.py
@@ -23,7 +23,7 @@
if parsed_url.scheme == "emfs":
return open(parsed_url.path, "rb")
if parsed_url.scheme == "file":
- result_bytes = (await loadBinaryFile("", parsed_url.path)).to_bytes()
+ result_bytes = (await loadBinaryFile(parsed_url.path)).to_bytes()
else:
result_bytes = await (await pyfetch(url, **kwargs)).bytes()
return BytesIO(result_bytes)
| {"golden_diff": "diff --git a/packages/micropip/src/micropip/_compat_in_pyodide.py b/packages/micropip/src/micropip/_compat_in_pyodide.py\n--- a/packages/micropip/src/micropip/_compat_in_pyodide.py\n+++ b/packages/micropip/src/micropip/_compat_in_pyodide.py\n@@ -23,7 +23,7 @@\n if parsed_url.scheme == \"emfs\":\n return open(parsed_url.path, \"rb\")\n if parsed_url.scheme == \"file\":\n- result_bytes = (await loadBinaryFile(\"\", parsed_url.path)).to_bytes()\n+ result_bytes = (await loadBinaryFile(parsed_url.path)).to_bytes()\n else:\n result_bytes = await (await pyfetch(url, **kwargs)).bytes()\n return BytesIO(result_bytes)\n", "issue": "Relative URLs in pyodide.loadPackage\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\nThe documentation states that [pyodide.loadPackage](https://pyodide.org/en/stable/usage/api/js-api.html#pyodide.loadPackage) supports relative URLs. I'm trying to load an out-of-tree wheel from my local webserver, but this doesn't seem to work out well.\r\n\r\n### To Reproduce\r\n\r\n<!-- Minimal code example to reproduce the bug. -->\r\n```js\r\nawait pyodide.loadPackage(\"dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl\");\r\n```\r\nor\r\n```js\r\nawait pyodide.loadPackage(\"./dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl\");\r\n```\r\nPyodide tries to load the wheel from `https://cdn.jsdelivr.net/pyodide/v0.21.1/full/dist/igraph-0.9.11-cp310-cp310-emscripten_3_1_14_wasm32.whl`.\r\n\r\n### Expected behavior\r\n\r\n<!-- FILL IN -->\r\nLoad the wheel from the relative URL.\r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->: 0.21.1\r\n- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: Firefox ESR 91.12.0, Chromium 104.0.5112.101\r\n- Any other relevant information:\r\n\r\n<!-- If you are building Pyodide by yourself, please also include these information: -->\r\n\r\n<!--\r\n- Commit hash of Pyodide git repository:\r\n- Build environment<!--(e.g. Ubuntu 18.04, pyodide/pyodide-env:19 docker)- ->:\r\n-->\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "from io import BytesIO\nfrom typing import IO\nfrom urllib.parse import urlparse\n\nfrom pyodide._core import IN_BROWSER\nfrom pyodide.http import pyfetch\n\ntry:\n import pyodide_js\n from pyodide_js import loadedPackages, loadPackage\n from pyodide_js._api import loadBinaryFile, loadDynlib # type: ignore[import]\n\n REPODATA_PACKAGES = pyodide_js._api.repodata_packages.to_py()\n REPODATA_INFO = pyodide_js._api.repodata_info.to_py()\nexcept ImportError:\n if IN_BROWSER:\n raise\n # Otherwise, this is pytest test collection so let it go.\n\n\nasync def fetch_bytes(url: str, kwargs: dict[str, str]) -> IO[bytes]:\n parsed_url = urlparse(url)\n if parsed_url.scheme == \"emfs\":\n return open(parsed_url.path, \"rb\")\n if parsed_url.scheme == \"file\":\n result_bytes = (await loadBinaryFile(\"\", parsed_url.path)).to_bytes()\n else:\n result_bytes = await (await pyfetch(url, **kwargs)).bytes()\n return BytesIO(result_bytes)\n\n\nasync def fetch_string(url: str, kwargs: dict[str, str]) -> str:\n return await (await pyfetch(url, **kwargs)).string()\n\n\n__all__ = [\n \"fetch_bytes\",\n \"fetch_string\",\n \"REPODATA_INFO\",\n \"REPODATA_PACKAGES\",\n \"loadedPackages\",\n \"loadDynlib\",\n \"loadPackage\",\n]\n", "path": "packages/micropip/src/micropip/_compat_in_pyodide.py"}]} | 1,429 | 182 |
gh_patches_debug_35885 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[master]Use tf.ResourceVariable to store model
Currently we store model as a <string, ndarray> map. when using tf.optimizer.apply_gradient() to update model, we need to convert the map to ResourceVariable and back. It is better to change model to a <string, ResourceVariable> map to avoid copy and conversion.
</issue>
<code>
[start of elasticdl/master/servicer.py]
1 import threading
2
3 from proto import master_pb2
4 from proto import master_pb2_grpc
5 from util.converter import NdarrayToTensor, TensorToNdarray
6
7
8 class MasterServicer(master_pb2_grpc.MasterServicer):
9 """Master service implementation"""
10
11 def __init__(self, logger, grads_to_wait):
12 self.logger = logger
13 self._lock = threading.Lock()
14 # TODO: random initialization
15 self._model = {}
16 self._version = 0
17 self._gradient_sum = {}
18 self._grad_to_wait = grads_to_wait
19 self._grad_n = 0
20
21 def GetTask(self, request, context):
22 # TODO: implent task queues. Return an empty task for now.
23 res = master_pb2.Task()
24 res.shard_file_name = ""
25 res.model_version = self._version
26 return res
27
28 def GetModel(self, request, context):
29 if request.min_version > self._version:
30 err_msg = (
31 "Requested version %d not available yet, current version: %d"
32 % (request.min_version, self._version)
33 )
34 self.logger.warning(err_msg)
35 raise ValueError(err_msg)
36
37 res = master_pb2.Model()
38 with self._lock:
39 res.version = self._version
40 for k, v in self._model.items():
41 res.param[k].CopyFrom(NdarrayToTensor(v))
42 return res
43
44 def ReportTaskResult(self, request, context):
45 if request.model_version > self._version:
46 err_msg = "Model version %d out of range, current version: %d" % (
47 request.model_version,
48 self._version,
49 )
50 self.logger.warning(err_msg)
51 raise ValueError(err_msg)
52
53 res = master_pb2.ReportTaskResultReply()
54 if request.model_version < self._version:
55 self.logger.warning(
56 "Task result for outdated version %d dropped",
57 request.model_version,
58 )
59 res.accepted = False
60 res.model_version = self._version
61 return res
62
63 if request.err_message:
64 self.logger.warning("Worker error: %s" % request.err_message)
65 res.accepted = False
66 res.model_version = self._version
67 return res
68
69 # TODO: Update task queue with task_id
70 with self._lock:
71 tmp = {}
72 # Do sanity check before accumulating gradients.
73 for k, v in request.gradient.items():
74 if k not in self._model:
75 raise ValueError(
76 "Gradient key: %s is not part of model", k
77 )
78 arr = TensorToNdarray(v)
79 if arr.shape != self._model[k].shape:
80 raise ValueError(
81 "Gradient key: %s has incompatible dimension", k
82 )
83 tmp[k] = arr
84
85 for k, v in tmp.items():
86 if k in self._gradient_sum:
87 self._gradient_sum[k] = self._gradient_sum[k] + v
88 else:
89 self._gradient_sum[k] = v
90
91 self._grad_n += 1
92 if self._grad_n >= self._grad_to_wait:
93 # TODO: update model
94 self._version += 1
95 self._gradient_sum.clear()
96 self._grad_n = 0
97 res.accepted = True
98 res.model_version = self._version
99 return res
100
[end of elasticdl/master/servicer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py
--- a/elasticdl/master/servicer.py
+++ b/elasticdl/master/servicer.py
@@ -1,5 +1,7 @@
import threading
+import numpy as np
+import tensorflow as tf
from proto import master_pb2
from proto import master_pb2_grpc
from util.converter import NdarrayToTensor, TensorToNdarray
@@ -12,12 +14,21 @@
self.logger = logger
self._lock = threading.Lock()
# TODO: random initialization
+ # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable
+ # instead ndarray to avoid copying and conversion when calling
+ # optimizer's apply_gradients() function.
self._model = {}
self._version = 0
self._gradient_sum = {}
self._grad_to_wait = grads_to_wait
self._grad_n = 0
+ def _set_model_var(self, name, value):
+ """Add or set model variable. Value should be a float32 ndarray"""
+ if value.dtype != np.float32:
+ raise ValueError("Value should be a float32 numpy array")
+ self._model[name] = tf.Variable(value, name=name, use_resource=True)
+
def GetTask(self, request, context):
# TODO: implent task queues. Return an empty task for now.
res = master_pb2.Task()
@@ -38,7 +49,7 @@
with self._lock:
res.version = self._version
for k, v in self._model.items():
- res.param[k].CopyFrom(NdarrayToTensor(v))
+ res.param[k].CopyFrom(NdarrayToTensor(v.numpy()))
return res
def ReportTaskResult(self, request, context):
@@ -76,7 +87,7 @@
"Gradient key: %s is not part of model", k
)
arr = TensorToNdarray(v)
- if arr.shape != self._model[k].shape:
+ if arr.shape != self._model[k].numpy().shape:
raise ValueError(
"Gradient key: %s has incompatible dimension", k
)
| {"golden_diff": "diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py\n--- a/elasticdl/master/servicer.py\n+++ b/elasticdl/master/servicer.py\n@@ -1,5 +1,7 @@\n import threading\n+import numpy as np\n \n+import tensorflow as tf\n from proto import master_pb2\n from proto import master_pb2_grpc\n from util.converter import NdarrayToTensor, TensorToNdarray\n@@ -12,12 +14,21 @@\n self.logger = logger\n self._lock = threading.Lock()\n # TODO: random initialization\n+ # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable\n+ # instead ndarray to avoid copying and conversion when calling\n+ # optimizer's apply_gradients() function.\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n \n+ def _set_model_var(self, name, value):\n+ \"\"\"Add or set model variable. Value should be a float32 ndarray\"\"\"\n+ if value.dtype != np.float32:\n+ raise ValueError(\"Value should be a float32 numpy array\")\n+ self._model[name] = tf.Variable(value, name=name, use_resource=True)\n+\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n@@ -38,7 +49,7 @@\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n- res.param[k].CopyFrom(NdarrayToTensor(v))\n+ res.param[k].CopyFrom(NdarrayToTensor(v.numpy()))\n return res\n \n def ReportTaskResult(self, request, context):\n@@ -76,7 +87,7 @@\n \"Gradient key: %s is not part of model\", k\n )\n arr = TensorToNdarray(v)\n- if arr.shape != self._model[k].shape:\n+ if arr.shape != self._model[k].numpy().shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n", "issue": "[master]Use tf.ResourceVariable to store model\nCurrently we store model as a <string, ndarray> map. when using tf.optimizer.apply_gradient() to update model, we need to convert the map to ResourceVariable and back. It is better to change model to a <string, ResourceVariable> map to avoid copy and conversion.\n", "before_files": [{"content": "import threading\n\nfrom proto import master_pb2\nfrom proto import master_pb2_grpc\nfrom util.converter import NdarrayToTensor, TensorToNdarray\n\n\nclass MasterServicer(master_pb2_grpc.MasterServicer):\n \"\"\"Master service implementation\"\"\"\n\n def __init__(self, logger, grads_to_wait):\n self.logger = logger\n self._lock = threading.Lock()\n # TODO: random initialization\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n res.shard_file_name = \"\"\n res.model_version = self._version\n return res\n\n def GetModel(self, request, context):\n if request.min_version > self._version:\n err_msg = (\n \"Requested version %d not available yet, current version: %d\"\n % (request.min_version, self._version)\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.Model()\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n res.param[k].CopyFrom(NdarrayToTensor(v))\n return res\n\n def ReportTaskResult(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n self._version,\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.ReportTaskResultReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n request.model_version,\n )\n res.accepted = False\n res.model_version = self._version\n return res\n\n if request.err_message:\n self.logger.warning(\"Worker error: %s\" % request.err_message)\n res.accepted = False\n res.model_version = self._version\n return res\n\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n # Do sanity check before accumulating gradients.\n for k, v in request.gradient.items():\n if k not in self._model:\n raise ValueError(\n \"Gradient key: %s is not part of model\", k\n )\n arr = TensorToNdarray(v)\n if arr.shape != self._model[k].shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n tmp[k] = arr\n\n for k, v in tmp.items():\n if k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] + v\n else:\n self._gradient_sum[k] = v\n\n self._grad_n += 1\n if self._grad_n >= self._grad_to_wait:\n # TODO: update model\n self._version += 1\n self._gradient_sum.clear()\n self._grad_n = 0\n res.accepted = True\n res.model_version = self._version\n return res\n", "path": "elasticdl/master/servicer.py"}]} | 1,523 | 496 |
gh_patches_debug_189 | rasdani/github-patches | git_diff | qtile__qtile-1837 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0.16.0: impossible to build from github sources (to run tests)
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
-->
# Issue description
Hi! I package qtile for Arch Linux. I'm currently trying to build 0.16.0.
Usually I also run the test suite against the release (although there are still problems: #1352 and #1130) to be able to at least ensure some kind of compatibility with the Arch Linux provided python3 ecosystem.
However, running tests is only possible with the github source tarballs (because the test files are included), which unfortunately is not the case for the pypi tarballs.
When running `python setup.py build` for 0.16.0 I am now getting this:
```
Traceback (most recent call last):
File "setup.py", line 91, in <module>
setup(
File "/usr/lib/python3.8/site-packages/setuptools/__init__.py", line 165, in setup
return distutils.core.setup(**attrs)
File "/usr/lib/python3.8/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/usr/lib/python3.8/site-packages/setuptools/dist.py", line 429, in __init__
_Distribution.__init__(self, {
File "/usr/lib/python3.8/distutils/dist.py", line 292, in __init__
self.finalize_options()
File "/usr/lib/python3.8/site-packages/setuptools/dist.py", line 721, in finalize_options
ep(self)
File "/usr/lib/python3.8/site-packages/setuptools/dist.py", line 728, in _finalize_setup_keywords
ep.load()(self, ep.name, value)
File "/usr/lib/python3.8/site-packages/setuptools_scm/integration.py", line 17, in version_keyword
dist.metadata.version = _get_version(config)
File "/usr/lib/python3.8/site-packages/setuptools_scm/__init__.py", line 148, in _get_version
parsed_version = _do_parse(config)
File "/usr/lib/python3.8/site-packages/setuptools_scm/__init__.py", line 110, in _do_parse
raise LookupError(
LookupError: setuptools-scm was unable to detect version for '/build/qtile/src/qtile-0.16.0'.
Make sure you're either building from a fully intact git repository or PyPI tarballs. Most other sources (such as GitHub's tarballs, a git checkout without the .git folder) don't contain the necessary metadata and will not work.
For example, if you're using pip, instead of https://github.com/user/proj/archive/master.zip use git+https://github.com/user/proj.git#egg=proj
```
It seems that setuptools_scm has been introduced. Unfortunately, this breaks the build for me.
It would be great to either include the tests in the pypi sdist tarballs or to start using [signed tags](https://github.com/qtile/qtile/tags) again, as then I can rely upon signed tags and a git repository (note: the latter might not help other distributions, as they have different policies).
If you choose the latter (both would be great too), please make sure to have @flacjacket sign the key of @tych0 so that a clear chain of trust can be established.
# Qtile version
0.16.0
# Stack traces
n/a
# Configuration
n/a
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) 2008 Aldo Cortesi
4 # Copyright (c) 2011 Mounier Florian
5 # Copyright (c) 2012 dmpayton
6 # Copyright (c) 2014 Sean Vig
7 # Copyright (c) 2014 roger
8 # Copyright (c) 2014 Pedro Algarvio
9 # Copyright (c) 2014-2015 Tycho Andersen
10 #
11 # Permission is hereby granted, free of charge, to any person obtaining a copy
12 # of this software and associated documentation files (the "Software"), to deal
13 # in the Software without restriction, including without limitation the rights
14 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 # copies of the Software, and to permit persons to whom the Software is
16 # furnished to do so, subject to the following conditions:
17 #
18 # The above copyright notice and this permission notice shall be included in
19 # all copies or substantial portions of the Software.
20 #
21 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 # SOFTWARE.
28
29 import sys
30 import textwrap
31
32 from setuptools import setup
33 from setuptools.command.install import install
34
35
36 class CheckCairoXcb(install):
37 def cairo_xcb_check(self):
38 try:
39 from cairocffi import cairo
40 cairo.cairo_xcb_surface_create
41 return True
42 except AttributeError:
43 return False
44
45 def finalize_options(self):
46 if not self.cairo_xcb_check():
47
48 print(textwrap.dedent("""
49
50 It looks like your cairocffi was not built with xcffib support. To fix this:
51
52 - Ensure a recent xcffib is installed (pip install 'xcffib>=0.5.0')
53 - The pip cache is cleared (remove ~/.cache/pip, if it exists)
54 - Reinstall cairocffi, either:
55
56 pip install --no-deps --ignore-installed cairocffi
57
58 or
59
60 pip uninstall cairocffi && pip install cairocffi
61 """))
62
63 sys.exit(1)
64 install.finalize_options(self)
65
66
67 def get_cffi_modules():
68 cffi_modules = [
69 'libqtile/pango_ffi_build.py:pango_ffi',
70 'libqtile/backend/x11/xcursors_ffi_build.py:xcursors_ffi',
71 ]
72 try:
73 from cffi.error import PkgConfigError
74 from cffi.pkgconfig import call
75 except ImportError:
76 # technically all ffi defined above wont be built
77 print('CFFI package is missing')
78 else:
79 try:
80 call('libpulse', '--libs')
81 except PkgConfigError:
82 print('Failed to find pulseaudio headers. '
83 'PulseVolume widget will be unavailable')
84 else:
85 cffi_modules.append(
86 'libqtile/widget/pulseaudio_ffi.py:pulseaudio_ffi'
87 )
88 return cffi_modules
89
90
91 setup(
92 cmdclass={'install': CheckCairoXcb},
93 use_scm_version=True,
94 cffi_modules=get_cffi_modules(),
95 install_requires=["cffi>=1.0.0"],
96 )
97
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -93,4 +93,5 @@
use_scm_version=True,
cffi_modules=get_cffi_modules(),
install_requires=["cffi>=1.0.0"],
+ include_package_data=True,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -93,4 +93,5 @@\n use_scm_version=True,\n cffi_modules=get_cffi_modules(),\n install_requires=[\"cffi>=1.0.0\"],\n+ include_package_data=True,\n )\n", "issue": "0.16.0: impossible to build from github sources (to run tests)\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n-->\r\n\r\n# Issue description\r\n\r\nHi! I package qtile for Arch Linux. I'm currently trying to build 0.16.0.\r\nUsually I also run the test suite against the release (although there are still problems: #1352 and #1130) to be able to at least ensure some kind of compatibility with the Arch Linux provided python3 ecosystem.\r\nHowever, running tests is only possible with the github source tarballs (because the test files are included), which unfortunately is not the case for the pypi tarballs.\r\n\r\nWhen running `python setup.py build` for 0.16.0 I am now getting this:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"setup.py\", line 91, in <module>\r\n setup(\r\n File \"/usr/lib/python3.8/site-packages/setuptools/__init__.py\", line 165, in setup\r\n return distutils.core.setup(**attrs)\r\n File \"/usr/lib/python3.8/distutils/core.py\", line 108, in setup\r\n _setup_distribution = dist = klass(attrs)\r\n File \"/usr/lib/python3.8/site-packages/setuptools/dist.py\", line 429, in __init__\r\n _Distribution.__init__(self, {\r\n File \"/usr/lib/python3.8/distutils/dist.py\", line 292, in __init__\r\n self.finalize_options()\r\n File \"/usr/lib/python3.8/site-packages/setuptools/dist.py\", line 721, in finalize_options\r\n ep(self)\r\n File \"/usr/lib/python3.8/site-packages/setuptools/dist.py\", line 728, in _finalize_setup_keywords\r\n ep.load()(self, ep.name, value)\r\n File \"/usr/lib/python3.8/site-packages/setuptools_scm/integration.py\", line 17, in version_keyword\r\n dist.metadata.version = _get_version(config)\r\n File \"/usr/lib/python3.8/site-packages/setuptools_scm/__init__.py\", line 148, in _get_version\r\n parsed_version = _do_parse(config)\r\n File \"/usr/lib/python3.8/site-packages/setuptools_scm/__init__.py\", line 110, in _do_parse\r\n raise LookupError(\r\nLookupError: setuptools-scm was unable to detect version for '/build/qtile/src/qtile-0.16.0'.\r\n\r\nMake sure you're either building from a fully intact git repository or PyPI tarballs. Most other sources (such as GitHub's tarballs, a git checkout without the .git folder) don't contain the necessary metadata and will not work.\r\n\r\nFor example, if you're using pip, instead of https://github.com/user/proj/archive/master.zip use git+https://github.com/user/proj.git#egg=proj\r\n```\r\n\r\nIt seems that setuptools_scm has been introduced. Unfortunately, this breaks the build for me.\r\n\r\nIt would be great to either include the tests in the pypi sdist tarballs or to start using [signed tags](https://github.com/qtile/qtile/tags) again, as then I can rely upon signed tags and a git repository (note: the latter might not help other distributions, as they have different policies).\r\nIf you choose the latter (both would be great too), please make sure to have @flacjacket sign the key of @tych0 so that a clear chain of trust can be established.\r\n\r\n# Qtile version\r\n\r\n0.16.0\r\n\r\n# Stack traces\r\n\r\nn/a\r\n\r\n# Configuration\r\n\r\nn/a\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) 2008 Aldo Cortesi\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 dmpayton\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 roger\n# Copyright (c) 2014 Pedro Algarvio\n# Copyright (c) 2014-2015 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport sys\nimport textwrap\n\nfrom setuptools import setup\nfrom setuptools.command.install import install\n\n\nclass CheckCairoXcb(install):\n def cairo_xcb_check(self):\n try:\n from cairocffi import cairo\n cairo.cairo_xcb_surface_create\n return True\n except AttributeError:\n return False\n\n def finalize_options(self):\n if not self.cairo_xcb_check():\n\n print(textwrap.dedent(\"\"\"\n\n It looks like your cairocffi was not built with xcffib support. To fix this:\n\n - Ensure a recent xcffib is installed (pip install 'xcffib>=0.5.0')\n - The pip cache is cleared (remove ~/.cache/pip, if it exists)\n - Reinstall cairocffi, either:\n\n pip install --no-deps --ignore-installed cairocffi\n\n or\n\n pip uninstall cairocffi && pip install cairocffi\n \"\"\"))\n\n sys.exit(1)\n install.finalize_options(self)\n\n\ndef get_cffi_modules():\n cffi_modules = [\n 'libqtile/pango_ffi_build.py:pango_ffi',\n 'libqtile/backend/x11/xcursors_ffi_build.py:xcursors_ffi',\n ]\n try:\n from cffi.error import PkgConfigError\n from cffi.pkgconfig import call\n except ImportError:\n # technically all ffi defined above wont be built\n print('CFFI package is missing')\n else:\n try:\n call('libpulse', '--libs')\n except PkgConfigError:\n print('Failed to find pulseaudio headers. '\n 'PulseVolume widget will be unavailable')\n else:\n cffi_modules.append(\n 'libqtile/widget/pulseaudio_ffi.py:pulseaudio_ffi'\n )\n return cffi_modules\n\n\nsetup(\n cmdclass={'install': CheckCairoXcb},\n use_scm_version=True,\n cffi_modules=get_cffi_modules(),\n install_requires=[\"cffi>=1.0.0\"],\n)\n", "path": "setup.py"}]} | 2,296 | 69 |
gh_patches_debug_37751 | rasdani/github-patches | git_diff | mozilla__pontoon-2553 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When editing existing strings in rich editor cursor jumps to the end
As reported on Matrix:
"When editing existing strings which have translations for "One" and "More", after each input, the cursor jumps to the end of the string."
The problem no longer appears after reverting back to f765fa994d71f6ea21c99dcd527ead4d0c4e1ea0.
</issue>
<code>
[start of pontoon/machinery/views.py]
1 import json
2 import logging
3 import requests
4 import xml.etree.ElementTree as ET
5
6 from sacremoses import MosesDetokenizer
7 from urllib.parse import quote
8
9 from django.conf import settings
10 from django.contrib.auth.decorators import login_required
11 from django.core.paginator import EmptyPage, Paginator
12 from django.http import JsonResponse
13 from django.shortcuts import render
14 from django.template.loader import get_template
15 from django.utils.datastructures import MultiValueDictKeyError
16
17 from pontoon.base import utils
18 from pontoon.base.models import Entity, Locale, Project, Translation
19 from pontoon.machinery.utils import (
20 get_concordance_search_data,
21 get_google_translate_data,
22 get_translation_memory_data,
23 )
24
25
26 log = logging.getLogger(__name__)
27
28
29 def machinery(request):
30 locale = utils.get_project_locale_from_request(request, Locale.objects) or "en-GB"
31
32 return render(
33 request,
34 "machinery/machinery.html",
35 {
36 "locale": Locale.objects.get(code=locale),
37 "locales": Locale.objects.all(),
38 "is_google_translate_supported": bool(settings.GOOGLE_TRANSLATE_API_KEY),
39 "is_microsoft_translator_supported": bool(
40 settings.MICROSOFT_TRANSLATOR_API_KEY
41 ),
42 "is_systran_translate_supported": bool(settings.SYSTRAN_TRANSLATE_API_KEY),
43 },
44 )
45
46
47 def translation_memory(request):
48 """Get translations from internal translations memory."""
49 try:
50 text = request.GET["text"]
51 locale = Locale.objects.get(code=request.GET["locale"])
52 pk = request.GET.get("pk", None)
53
54 if pk is not None:
55 pk = int(pk)
56
57 except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:
58 return JsonResponse(
59 {"status": False, "message": f"Bad Request: {e}"},
60 status=400,
61 )
62
63 data = get_translation_memory_data(text, locale, pk)
64 return JsonResponse(data, safe=False)
65
66
67 def concordance_search(request):
68 """Search for translations in the internal translations memory."""
69 try:
70 text = request.GET["text"]
71 locale = Locale.objects.get(code=request.GET["locale"])
72 page_results_limit = int(request.GET.get("limit", 100))
73 page = int(request.GET.get("page", 1))
74 except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:
75 return JsonResponse(
76 {"status": False, "message": f"Bad Request: {e}"},
77 status=400,
78 )
79
80 paginator = Paginator(get_concordance_search_data(text, locale), page_results_limit)
81
82 try:
83 data = paginator.page(page)
84 except EmptyPage:
85 return JsonResponse({"results": [], "has_next": False})
86
87 # ArrayAgg (used in get_concordance_search_data()) does not support using
88 # distinct=True in combination with ordering, so we need to do one of them
89 # manually - after pagination, to reduce the number of rows processed.
90 projects = Project.objects.order_by("disabled", "-priority").values_list(
91 "name", flat=True
92 )
93 for r in data.object_list:
94 r["project_names"] = [p for p in projects if p in r["project_names"]]
95
96 return JsonResponse(
97 {"results": data.object_list, "has_next": data.has_next()}, safe=False
98 )
99
100
101 @login_required(redirect_field_name="", login_url="/403")
102 def microsoft_translator(request):
103 """Get translation from Microsoft machine translation service."""
104 try:
105 text = request.GET["text"]
106 locale_code = request.GET["locale"]
107
108 if not locale_code:
109 raise ValueError("Locale code is empty")
110
111 api_key = settings.MICROSOFT_TRANSLATOR_API_KEY
112 if not api_key:
113 raise ValueError("Missing api key")
114
115 except (MultiValueDictKeyError, ValueError) as e:
116 return JsonResponse(
117 {"status": False, "message": f"Bad Request: {e}"},
118 status=400,
119 )
120
121 url = "https://api.cognitive.microsofttranslator.com/translate"
122 headers = {"Ocp-Apim-Subscription-Key": api_key, "Content-Type": "application/json"}
123 payload = {
124 "api-version": "3.0",
125 "from": "en",
126 "to": locale_code,
127 "textType": "html",
128 }
129 body = [{"Text": text}]
130
131 try:
132 r = requests.post(url, params=payload, headers=headers, json=body)
133 r.raise_for_status()
134
135 root = json.loads(r.content)
136
137 if "error" in root:
138 log.error(f"Microsoft Translator error: {root}")
139 return JsonResponse(
140 {"status": False, "message": f"Bad Request: {root}"},
141 status=400,
142 )
143
144 return JsonResponse({"translation": root[0]["translations"][0]["text"]})
145
146 except requests.exceptions.RequestException as e:
147 return JsonResponse(
148 {"status": False, "message": f"{e}"},
149 status=r.status_code,
150 )
151
152
153 @login_required(redirect_field_name="", login_url="/403")
154 def google_translate(request):
155 """Get translation from Google machine translation service."""
156 try:
157 text = request.GET["text"]
158 locale_code = request.GET["locale"]
159
160 if not locale_code:
161 raise ValueError("Locale code is empty")
162
163 except (MultiValueDictKeyError, ValueError) as e:
164 return JsonResponse(
165 {"status": False, "message": f"Bad Request: {e}"},
166 status=400,
167 )
168
169 data = get_google_translate_data(text, locale_code)
170
171 if not data["status"]:
172 return JsonResponse(data, status=400)
173
174 return JsonResponse(data)
175
176
177 @login_required(redirect_field_name="", login_url="/403")
178 def systran_translate(request):
179 """Get translations from SYSTRAN machine translation service."""
180 try:
181 text = request.GET["text"]
182 locale_code = request.GET["locale"]
183
184 if not locale_code:
185 raise ValueError("Locale code is empty")
186
187 locale = Locale.objects.filter(systran_translate_code=locale_code).first()
188
189 api_key = settings.SYSTRAN_TRANSLATE_API_KEY
190 if not api_key:
191 raise ValueError("Missing api key")
192
193 except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:
194 return JsonResponse(
195 {"status": False, "message": f"Bad Request: {e}"},
196 status=400,
197 )
198
199 url = "https://api-translate.systran.net/translation/text/translate"
200
201 payload = {
202 "key": api_key,
203 "input": text,
204 "source": "en",
205 "target": locale_code,
206 "profile": locale.systran_translate_profile,
207 "format": "text",
208 }
209
210 try:
211 r = requests.post(url, params=payload)
212 r.raise_for_status()
213
214 root = json.loads(r.content)
215
216 if "error" in root:
217 log.error(f"SYSTRAN error: {root}")
218 return JsonResponse(
219 {"status": False, "message": f"Bad Request: {root}"},
220 status=400,
221 )
222
223 return JsonResponse({"translation": root["outputs"][0]["output"]})
224
225 except requests.exceptions.RequestException as e:
226 return JsonResponse(
227 {"status": False, "message": f"{e}"},
228 status=r.status_code,
229 )
230
231
232 def caighdean(request):
233 """Get translation from Caighdean machine translation service."""
234 try:
235 entityid = int(request.GET["id"])
236 entity = Entity.objects.get(id=entityid)
237 except (Entity.DoesNotExist, MultiValueDictKeyError, ValueError) as e:
238 return JsonResponse(
239 {"status": False, "message": f"Bad Request: {e}"},
240 status=400,
241 )
242
243 try:
244 text = entity.translation_set.get(
245 locale__code="gd",
246 plural_form=None if entity.string_plural == "" else 0,
247 approved=True,
248 ).string
249 except Translation.DoesNotExist:
250 return JsonResponse({})
251
252 url = "https://cadhan.com/api/intergaelic/3.0"
253
254 data = {
255 "teacs": text,
256 "foinse": "gd",
257 }
258
259 try:
260 r = requests.post(url, data=data)
261 r.raise_for_status()
262
263 root = json.loads(r.content)
264 tokens = [x[1] for x in root]
265 translation = (
266 MosesDetokenizer().detokenize(tokens, return_str=True).replace("\\n", "\n")
267 )
268
269 return JsonResponse({"original": text, "translation": translation})
270
271 except requests.exceptions.RequestException as e:
272 return JsonResponse(
273 {"status": False, "message": f"{e}"},
274 status=r.status_code,
275 )
276
277
278 def microsoft_terminology(request):
279 """Get translations from Microsoft Terminology Service."""
280 try:
281 text = request.GET["text"]
282 locale_code = request.GET["locale"]
283
284 if not locale_code:
285 raise ValueError("Locale code is empty")
286
287 except (MultiValueDictKeyError, ValueError) as e:
288 return JsonResponse(
289 {"status": False, "message": f"Bad Request: {e}"},
290 status=400,
291 )
292
293 obj = {}
294 url = "https://api.terminology.microsoft.com/Terminology.svc"
295 headers = {
296 "SOAPAction": (
297 '"http://api.terminology.microsoft.com/terminology/Terminology/GetTranslations"'
298 ),
299 "Content-Type": "text/xml; charset=utf-8",
300 }
301 payload = {
302 "text": quote(text.encode("utf-8")),
303 "to": locale_code,
304 "max_result": 5,
305 }
306 template = get_template("machinery/microsoft_terminology.jinja")
307
308 payload = template.render(payload)
309
310 try:
311 r = requests.post(url, data=payload, headers=headers)
312 r.raise_for_status()
313
314 translations = []
315 namespaces = {"a": "https://api.terminology.microsoft.com/terminology"}
316 root = ET.fromstring(r.content)
317 results = root.find(
318 ".//{http://api.terminology.microsoft.com/terminology}GetTranslationsResult"
319 )
320
321 if results is not None:
322 for translation in results:
323 translations.append(
324 {
325 "source": translation.find("a:OriginalText", namespaces).text,
326 "target": translation.find(
327 ".//a:TranslatedText", namespaces
328 ).text,
329 }
330 )
331
332 obj["translations"] = translations
333 return JsonResponse(obj)
334
335 except requests.exceptions.RequestException as e:
336 return JsonResponse(
337 {"status": False, "message": f"{e}"},
338 status=r.status_code,
339 )
340
[end of pontoon/machinery/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pontoon/machinery/views.py b/pontoon/machinery/views.py
--- a/pontoon/machinery/views.py
+++ b/pontoon/machinery/views.py
@@ -128,6 +128,7 @@
}
body = [{"Text": text}]
+ r = None
try:
r = requests.post(url, params=payload, headers=headers, json=body)
r.raise_for_status()
@@ -146,7 +147,7 @@
except requests.exceptions.RequestException as e:
return JsonResponse(
{"status": False, "message": f"{e}"},
- status=r.status_code,
+ status=r.status_code if r is not None else 500,
)
@@ -207,6 +208,7 @@
"format": "text",
}
+ r = None
try:
r = requests.post(url, params=payload)
r.raise_for_status()
@@ -225,7 +227,7 @@
except requests.exceptions.RequestException as e:
return JsonResponse(
{"status": False, "message": f"{e}"},
- status=r.status_code,
+ status=r.status_code if r is not None else 500,
)
@@ -256,6 +258,7 @@
"foinse": "gd",
}
+ r = None
try:
r = requests.post(url, data=data)
r.raise_for_status()
@@ -271,7 +274,7 @@
except requests.exceptions.RequestException as e:
return JsonResponse(
{"status": False, "message": f"{e}"},
- status=r.status_code,
+ status=r.status_code if r is not None else 500,
)
@@ -307,6 +310,7 @@
payload = template.render(payload)
+ r = None
try:
r = requests.post(url, data=payload, headers=headers)
r.raise_for_status()
@@ -335,5 +339,5 @@
except requests.exceptions.RequestException as e:
return JsonResponse(
{"status": False, "message": f"{e}"},
- status=r.status_code,
+ status=r.status_code if r is not None else 500,
)
| {"golden_diff": "diff --git a/pontoon/machinery/views.py b/pontoon/machinery/views.py\n--- a/pontoon/machinery/views.py\n+++ b/pontoon/machinery/views.py\n@@ -128,6 +128,7 @@\n }\n body = [{\"Text\": text}]\n \n+ r = None\n try:\n r = requests.post(url, params=payload, headers=headers, json=body)\n r.raise_for_status()\n@@ -146,7 +147,7 @@\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n- status=r.status_code,\n+ status=r.status_code if r is not None else 500,\n )\n \n \n@@ -207,6 +208,7 @@\n \"format\": \"text\",\n }\n \n+ r = None\n try:\n r = requests.post(url, params=payload)\n r.raise_for_status()\n@@ -225,7 +227,7 @@\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n- status=r.status_code,\n+ status=r.status_code if r is not None else 500,\n )\n \n \n@@ -256,6 +258,7 @@\n \"foinse\": \"gd\",\n }\n \n+ r = None\n try:\n r = requests.post(url, data=data)\n r.raise_for_status()\n@@ -271,7 +274,7 @@\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n- status=r.status_code,\n+ status=r.status_code if r is not None else 500,\n )\n \n \n@@ -307,6 +310,7 @@\n \n payload = template.render(payload)\n \n+ r = None\n try:\n r = requests.post(url, data=payload, headers=headers)\n r.raise_for_status()\n@@ -335,5 +339,5 @@\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n- status=r.status_code,\n+ status=r.status_code if r is not None else 500,\n )\n", "issue": "When editing existing strings in rich editor cursor jumps to the end\nAs reported on Matrix:\r\n\r\n\"When editing existing strings which have translations for \"One\" and \"More\", after each input, the cursor jumps to the end of the string.\"\r\n\r\nThe problem no longer appears after reverting back to f765fa994d71f6ea21c99dcd527ead4d0c4e1ea0.\n", "before_files": [{"content": "import json\nimport logging\nimport requests\nimport xml.etree.ElementTree as ET\n\nfrom sacremoses import MosesDetokenizer\nfrom urllib.parse import quote\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import EmptyPage, Paginator\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.template.loader import get_template\nfrom django.utils.datastructures import MultiValueDictKeyError\n\nfrom pontoon.base import utils\nfrom pontoon.base.models import Entity, Locale, Project, Translation\nfrom pontoon.machinery.utils import (\n get_concordance_search_data,\n get_google_translate_data,\n get_translation_memory_data,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\ndef machinery(request):\n locale = utils.get_project_locale_from_request(request, Locale.objects) or \"en-GB\"\n\n return render(\n request,\n \"machinery/machinery.html\",\n {\n \"locale\": Locale.objects.get(code=locale),\n \"locales\": Locale.objects.all(),\n \"is_google_translate_supported\": bool(settings.GOOGLE_TRANSLATE_API_KEY),\n \"is_microsoft_translator_supported\": bool(\n settings.MICROSOFT_TRANSLATOR_API_KEY\n ),\n \"is_systran_translate_supported\": bool(settings.SYSTRAN_TRANSLATE_API_KEY),\n },\n )\n\n\ndef translation_memory(request):\n \"\"\"Get translations from internal translations memory.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale = Locale.objects.get(code=request.GET[\"locale\"])\n pk = request.GET.get(\"pk\", None)\n\n if pk is not None:\n pk = int(pk)\n\n except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n data = get_translation_memory_data(text, locale, pk)\n return JsonResponse(data, safe=False)\n\n\ndef concordance_search(request):\n \"\"\"Search for translations in the internal translations memory.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale = Locale.objects.get(code=request.GET[\"locale\"])\n page_results_limit = int(request.GET.get(\"limit\", 100))\n page = int(request.GET.get(\"page\", 1))\n except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n paginator = Paginator(get_concordance_search_data(text, locale), page_results_limit)\n\n try:\n data = paginator.page(page)\n except EmptyPage:\n return JsonResponse({\"results\": [], \"has_next\": False})\n\n # ArrayAgg (used in get_concordance_search_data()) does not support using\n # distinct=True in combination with ordering, so we need to do one of them\n # manually - after pagination, to reduce the number of rows processed.\n projects = Project.objects.order_by(\"disabled\", \"-priority\").values_list(\n \"name\", flat=True\n )\n for r in data.object_list:\n r[\"project_names\"] = [p for p in projects if p in r[\"project_names\"]]\n\n return JsonResponse(\n {\"results\": data.object_list, \"has_next\": data.has_next()}, safe=False\n )\n\n\n@login_required(redirect_field_name=\"\", login_url=\"/403\")\ndef microsoft_translator(request):\n \"\"\"Get translation from Microsoft machine translation service.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale_code = request.GET[\"locale\"]\n\n if not locale_code:\n raise ValueError(\"Locale code is empty\")\n\n api_key = settings.MICROSOFT_TRANSLATOR_API_KEY\n if not api_key:\n raise ValueError(\"Missing api key\")\n\n except (MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n url = \"https://api.cognitive.microsofttranslator.com/translate\"\n headers = {\"Ocp-Apim-Subscription-Key\": api_key, \"Content-Type\": \"application/json\"}\n payload = {\n \"api-version\": \"3.0\",\n \"from\": \"en\",\n \"to\": locale_code,\n \"textType\": \"html\",\n }\n body = [{\"Text\": text}]\n\n try:\n r = requests.post(url, params=payload, headers=headers, json=body)\n r.raise_for_status()\n\n root = json.loads(r.content)\n\n if \"error\" in root:\n log.error(f\"Microsoft Translator error: {root}\")\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {root}\"},\n status=400,\n )\n\n return JsonResponse({\"translation\": root[0][\"translations\"][0][\"text\"]})\n\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n status=r.status_code,\n )\n\n\n@login_required(redirect_field_name=\"\", login_url=\"/403\")\ndef google_translate(request):\n \"\"\"Get translation from Google machine translation service.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale_code = request.GET[\"locale\"]\n\n if not locale_code:\n raise ValueError(\"Locale code is empty\")\n\n except (MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n data = get_google_translate_data(text, locale_code)\n\n if not data[\"status\"]:\n return JsonResponse(data, status=400)\n\n return JsonResponse(data)\n\n\n@login_required(redirect_field_name=\"\", login_url=\"/403\")\ndef systran_translate(request):\n \"\"\"Get translations from SYSTRAN machine translation service.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale_code = request.GET[\"locale\"]\n\n if not locale_code:\n raise ValueError(\"Locale code is empty\")\n\n locale = Locale.objects.filter(systran_translate_code=locale_code).first()\n\n api_key = settings.SYSTRAN_TRANSLATE_API_KEY\n if not api_key:\n raise ValueError(\"Missing api key\")\n\n except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n url = \"https://api-translate.systran.net/translation/text/translate\"\n\n payload = {\n \"key\": api_key,\n \"input\": text,\n \"source\": \"en\",\n \"target\": locale_code,\n \"profile\": locale.systran_translate_profile,\n \"format\": \"text\",\n }\n\n try:\n r = requests.post(url, params=payload)\n r.raise_for_status()\n\n root = json.loads(r.content)\n\n if \"error\" in root:\n log.error(f\"SYSTRAN error: {root}\")\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {root}\"},\n status=400,\n )\n\n return JsonResponse({\"translation\": root[\"outputs\"][0][\"output\"]})\n\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n status=r.status_code,\n )\n\n\ndef caighdean(request):\n \"\"\"Get translation from Caighdean machine translation service.\"\"\"\n try:\n entityid = int(request.GET[\"id\"])\n entity = Entity.objects.get(id=entityid)\n except (Entity.DoesNotExist, MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n try:\n text = entity.translation_set.get(\n locale__code=\"gd\",\n plural_form=None if entity.string_plural == \"\" else 0,\n approved=True,\n ).string\n except Translation.DoesNotExist:\n return JsonResponse({})\n\n url = \"https://cadhan.com/api/intergaelic/3.0\"\n\n data = {\n \"teacs\": text,\n \"foinse\": \"gd\",\n }\n\n try:\n r = requests.post(url, data=data)\n r.raise_for_status()\n\n root = json.loads(r.content)\n tokens = [x[1] for x in root]\n translation = (\n MosesDetokenizer().detokenize(tokens, return_str=True).replace(\"\\\\n\", \"\\n\")\n )\n\n return JsonResponse({\"original\": text, \"translation\": translation})\n\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n status=r.status_code,\n )\n\n\ndef microsoft_terminology(request):\n \"\"\"Get translations from Microsoft Terminology Service.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale_code = request.GET[\"locale\"]\n\n if not locale_code:\n raise ValueError(\"Locale code is empty\")\n\n except (MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n obj = {}\n url = \"https://api.terminology.microsoft.com/Terminology.svc\"\n headers = {\n \"SOAPAction\": (\n '\"http://api.terminology.microsoft.com/terminology/Terminology/GetTranslations\"'\n ),\n \"Content-Type\": \"text/xml; charset=utf-8\",\n }\n payload = {\n \"text\": quote(text.encode(\"utf-8\")),\n \"to\": locale_code,\n \"max_result\": 5,\n }\n template = get_template(\"machinery/microsoft_terminology.jinja\")\n\n payload = template.render(payload)\n\n try:\n r = requests.post(url, data=payload, headers=headers)\n r.raise_for_status()\n\n translations = []\n namespaces = {\"a\": \"https://api.terminology.microsoft.com/terminology\"}\n root = ET.fromstring(r.content)\n results = root.find(\n \".//{http://api.terminology.microsoft.com/terminology}GetTranslationsResult\"\n )\n\n if results is not None:\n for translation in results:\n translations.append(\n {\n \"source\": translation.find(\"a:OriginalText\", namespaces).text,\n \"target\": translation.find(\n \".//a:TranslatedText\", namespaces\n ).text,\n }\n )\n\n obj[\"translations\"] = translations\n return JsonResponse(obj)\n\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n status=r.status_code,\n )\n", "path": "pontoon/machinery/views.py"}]} | 3,897 | 522 |
gh_patches_debug_27358 | rasdani/github-patches | git_diff | modoboa__modoboa-759 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Passwords complexity
We must ensure passwords respect a minimum complexity.
See https://github.com/modoboa/modoboa-admin/issues/27
</issue>
<code>
[start of modoboa/core/forms.py]
1 # coding: utf-8
2
3 """Core forms."""
4
5 from django import forms
6 from django.utils.translation import ugettext as _, ugettext_lazy
7
8 from modoboa.core.models import User
9 from modoboa.lib import parameters
10
11
12 class LoginForm(forms.Form):
13 username = forms.CharField(
14 label=ugettext_lazy("Username"),
15 widget=forms.TextInput(attrs={"class": "form-control"})
16 )
17 password = forms.CharField(
18 label=ugettext_lazy("Password"),
19 widget=forms.PasswordInput(attrs={"class": "form-control"})
20 )
21 rememberme = forms.BooleanField(
22 initial=False,
23 required=False
24 )
25
26
27 class ProfileForm(forms.ModelForm):
28 oldpassword = forms.CharField(
29 label=ugettext_lazy("Old password"), required=False,
30 widget=forms.PasswordInput(attrs={"class": "form-control"})
31 )
32 newpassword = forms.CharField(
33 label=ugettext_lazy("New password"), required=False,
34 widget=forms.PasswordInput(attrs={"class": "form-control"})
35 )
36 confirmation = forms.CharField(
37 label=ugettext_lazy("Confirmation"), required=False,
38 widget=forms.PasswordInput(attrs={"class": "form-control"})
39 )
40
41 class Meta:
42 model = User
43 fields = ("first_name", "last_name")
44 widgets = {
45 'first_name': forms.TextInput(attrs={'class': 'form-control'}),
46 'last_name': forms.TextInput(attrs={'class': 'form-control'})
47 }
48
49 def __init__(self, update_password, *args, **kwargs):
50 super(ProfileForm, self).__init__(*args, **kwargs)
51 if not update_password:
52 del self.fields["oldpassword"]
53 del self.fields["newpassword"]
54 del self.fields["confirmation"]
55
56 def clean_oldpassword(self):
57 if self.cleaned_data["oldpassword"] == "":
58 return self.cleaned_data["oldpassword"]
59
60 if parameters.get_admin("AUTHENTICATION_TYPE") != "local":
61 return self.cleaned_data["oldpassword"]
62
63 if not self.instance.check_password(self.cleaned_data["oldpassword"]):
64 raise forms.ValidationError(_("Old password mismatchs"))
65 return self.cleaned_data["oldpassword"]
66
67 def clean_confirmation(self):
68 newpassword = self.cleaned_data["newpassword"]
69 confirmation = self.cleaned_data["confirmation"]
70 if newpassword != confirmation:
71 raise forms.ValidationError(_("Passwords mismatch"))
72 return self.cleaned_data["confirmation"]
73
74 def save(self, commit=True):
75 user = super(ProfileForm, self).save(commit=False)
76 if commit:
77 if self.cleaned_data.get("confirmation", "") != "":
78 user.set_password(
79 self.cleaned_data["confirmation"],
80 self.cleaned_data["oldpassword"]
81 )
82 user.save()
83 return user
84
[end of modoboa/core/forms.py]
[start of modoboa/core/dev_settings.py]
1 # Development settings
2 import os
3
4 BOWER_COMPONENTS_ROOT = os.path.join(
5 os.path.dirname(__file__), ".."
6 )
7
8 BOWER_INSTALLED_APPS = (
9 "jquery#1.9",
10 "jquery-ui#1.11",
11 "bootstrap#3.3.1",
12 "bootstrap-select#1.6",
13 "d3#3.5.0",
14 "eonasdan-bootstrap-datetimepicker#3.1.3",
15 "font-awesome#4.2.0",
16 "c3#0.4.10",
17 )
18
[end of modoboa/core/dev_settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modoboa/core/dev_settings.py b/modoboa/core/dev_settings.py
--- a/modoboa/core/dev_settings.py
+++ b/modoboa/core/dev_settings.py
@@ -8,7 +8,7 @@
BOWER_INSTALLED_APPS = (
"jquery#1.9",
"jquery-ui#1.11",
- "bootstrap#3.3.1",
+ "bootstrap#3.3.5",
"bootstrap-select#1.6",
"d3#3.5.0",
"eonasdan-bootstrap-datetimepicker#3.1.3",
diff --git a/modoboa/core/forms.py b/modoboa/core/forms.py
--- a/modoboa/core/forms.py
+++ b/modoboa/core/forms.py
@@ -5,6 +5,8 @@
from django import forms
from django.utils.translation import ugettext as _, ugettext_lazy
+from passwords.fields import PasswordField
+
from modoboa.core.models import User
from modoboa.lib import parameters
@@ -29,11 +31,11 @@
label=ugettext_lazy("Old password"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
- newpassword = forms.CharField(
+ newpassword = PasswordField(
label=ugettext_lazy("New password"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
- confirmation = forms.CharField(
+ confirmation = PasswordField(
label=ugettext_lazy("Confirmation"), required=False,
widget=forms.PasswordInput(attrs={"class": "form-control"})
)
| {"golden_diff": "diff --git a/modoboa/core/dev_settings.py b/modoboa/core/dev_settings.py\n--- a/modoboa/core/dev_settings.py\n+++ b/modoboa/core/dev_settings.py\n@@ -8,7 +8,7 @@\n BOWER_INSTALLED_APPS = (\n \"jquery#1.9\",\n \"jquery-ui#1.11\",\n- \"bootstrap#3.3.1\",\n+ \"bootstrap#3.3.5\",\n \"bootstrap-select#1.6\",\n \"d3#3.5.0\",\n \"eonasdan-bootstrap-datetimepicker#3.1.3\",\ndiff --git a/modoboa/core/forms.py b/modoboa/core/forms.py\n--- a/modoboa/core/forms.py\n+++ b/modoboa/core/forms.py\n@@ -5,6 +5,8 @@\n from django import forms\n from django.utils.translation import ugettext as _, ugettext_lazy\n \n+from passwords.fields import PasswordField\n+\n from modoboa.core.models import User\n from modoboa.lib import parameters\n \n@@ -29,11 +31,11 @@\n label=ugettext_lazy(\"Old password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n- newpassword = forms.CharField(\n+ newpassword = PasswordField(\n label=ugettext_lazy(\"New password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n- confirmation = forms.CharField(\n+ confirmation = PasswordField(\n label=ugettext_lazy(\"Confirmation\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n", "issue": "Passwords complexity\nWe must ensure passwords respect a minimum complexity.\n\nSee https://github.com/modoboa/modoboa-admin/issues/27\n\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"Core forms.\"\"\"\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _, ugettext_lazy\n\nfrom modoboa.core.models import User\nfrom modoboa.lib import parameters\n\n\nclass LoginForm(forms.Form):\n username = forms.CharField(\n label=ugettext_lazy(\"Username\"),\n widget=forms.TextInput(attrs={\"class\": \"form-control\"})\n )\n password = forms.CharField(\n label=ugettext_lazy(\"Password\"),\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n rememberme = forms.BooleanField(\n initial=False,\n required=False\n )\n\n\nclass ProfileForm(forms.ModelForm):\n oldpassword = forms.CharField(\n label=ugettext_lazy(\"Old password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n newpassword = forms.CharField(\n label=ugettext_lazy(\"New password\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n confirmation = forms.CharField(\n label=ugettext_lazy(\"Confirmation\"), required=False,\n widget=forms.PasswordInput(attrs={\"class\": \"form-control\"})\n )\n\n class Meta:\n model = User\n fields = (\"first_name\", \"last_name\")\n widgets = {\n 'first_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'last_name': forms.TextInput(attrs={'class': 'form-control'})\n }\n\n def __init__(self, update_password, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n if not update_password:\n del self.fields[\"oldpassword\"]\n del self.fields[\"newpassword\"]\n del self.fields[\"confirmation\"]\n\n def clean_oldpassword(self):\n if self.cleaned_data[\"oldpassword\"] == \"\":\n return self.cleaned_data[\"oldpassword\"]\n\n if parameters.get_admin(\"AUTHENTICATION_TYPE\") != \"local\":\n return self.cleaned_data[\"oldpassword\"]\n\n if not self.instance.check_password(self.cleaned_data[\"oldpassword\"]):\n raise forms.ValidationError(_(\"Old password mismatchs\"))\n return self.cleaned_data[\"oldpassword\"]\n\n def clean_confirmation(self):\n newpassword = self.cleaned_data[\"newpassword\"]\n confirmation = self.cleaned_data[\"confirmation\"]\n if newpassword != confirmation:\n raise forms.ValidationError(_(\"Passwords mismatch\"))\n return self.cleaned_data[\"confirmation\"]\n\n def save(self, commit=True):\n user = super(ProfileForm, self).save(commit=False)\n if commit:\n if self.cleaned_data.get(\"confirmation\", \"\") != \"\":\n user.set_password(\n self.cleaned_data[\"confirmation\"],\n self.cleaned_data[\"oldpassword\"]\n )\n user.save()\n return user\n", "path": "modoboa/core/forms.py"}, {"content": "# Development settings\nimport os\n\nBOWER_COMPONENTS_ROOT = os.path.join(\n os.path.dirname(__file__), \"..\"\n)\n\nBOWER_INSTALLED_APPS = (\n \"jquery#1.9\",\n \"jquery-ui#1.11\",\n \"bootstrap#3.3.1\",\n \"bootstrap-select#1.6\",\n \"d3#3.5.0\",\n \"eonasdan-bootstrap-datetimepicker#3.1.3\",\n \"font-awesome#4.2.0\",\n \"c3#0.4.10\",\n)\n", "path": "modoboa/core/dev_settings.py"}]} | 1,457 | 347 |
gh_patches_debug_29209 | rasdani/github-patches | git_diff | ciudadanointeligente__votainteligente-portal-electoral-573 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Backend candidato: hay una pregunta que tiene una alternativa en blanco.

</issue>
<code>
[start of backend_candidate/views.py]
1 from backend_candidate.models import is_candidate, CandidacyContact, Candidacy
2 from django.http import Http404
3 from django.views.generic.base import TemplateView, RedirectView
4 from django.views.generic import View
5 from django.utils.decorators import method_decorator
6 from django.contrib.auth.decorators import login_required
7 from django.views.generic.edit import FormView
8 from django.views.generic.list import ListView
9 from django.shortcuts import get_object_or_404
10 from backend_candidate.forms import get_form_for_election
11 from elections.models import Candidate, Election, PersonalData
12 from django.core.urlresolvers import reverse
13 from django.http import HttpResponseRedirect
14 from backend_candidate.forms import get_candidate_profile_form_class
15 from popular_proposal.models import Commitment, PopularProposal
16 from django.contrib import messages
17 from django.utils.translation import ugettext as _
18
19
20 class BackendCandidateBase(View):
21 @method_decorator(login_required)
22 def dispatch(self, request, *args, **kwargs):
23 if not is_candidate(request.user):
24 raise Http404
25 self.user = request.user
26 candidacy_objects = CandidacyContact.objects.filter(candidacy__user=self.user)
27 used_by_candidate = True
28 for candidacy_object in candidacy_objects:
29
30 if not candidacy_object.used_by_candidate:
31 used_by_candidate = False
32 candidacy_object.used_by_candidate = True
33 candidacy_object.save()
34 if not used_by_candidate:
35 return HttpResponseRedirect(reverse('password_reset'))
36 return super(BackendCandidateBase, self).dispatch(request,
37 *args,
38 **kwargs)
39
40
41 class HomeView(BackendCandidateBase, TemplateView):
42 template_name = "backend_candidate/home.html"
43
44 def get_context_data(self, *args, **kwargs):
45 context = super(HomeView, self).get_context_data(*args, **kwargs)
46 context['candidacies'] = self.user.candidacies.all()
47 return context
48
49
50 class CompleteMediaNaranjaView(FormView):
51 template_name = 'backend_candidate/complete_12_naranja.html'
52
53 @method_decorator(login_required)
54 def dispatch(self, request, *args, **kwargs):
55 if not is_candidate(request.user):
56 raise Http404
57 self.user = request.user
58 self.election = get_object_or_404(Election, slug=self.kwargs['slug'])
59 self.candidate = get_object_or_404(Candidate,
60 id=self.kwargs['candidate_id'])
61 return super(CompleteMediaNaranjaView, self).dispatch(request,
62 *args,
63 **kwargs)
64
65 def get_form_class(self):
66 return get_form_for_election(self.election)
67
68 def get_form_kwargs(self):
69 kwargs = super(CompleteMediaNaranjaView, self).get_form_kwargs()
70 kwargs['candidate'] = self.candidate
71 return kwargs
72
73 def get_context_data(self, **kwargs):
74 context = (super(CompleteMediaNaranjaView, self)
75 .get_context_data(**kwargs))
76 context['candidate'] = self.candidate
77 context['election'] = self.election
78 return context
79
80 def form_valid(self, form):
81 form.save()
82 messages.add_message(self.request, messages.INFO, _('Hemos guardado tus respuestas'))
83 return super(CompleteMediaNaranjaView, self).form_valid(form)
84
85 def get_success_url(self):
86 url = reverse('backend_candidate:complete_12_naranja',
87 kwargs={'slug': self.election.slug,
88 'candidate_id': self.candidate.id})
89 return url
90
91
92 class CandidacyJoinView(RedirectView):
93 permanent = False
94 query_string = True
95
96 @method_decorator(login_required)
97 def dispatch(self, *args, **kwargs):
98 self.contact = get_object_or_404(CandidacyContact,
99 identifier=self.kwargs['identifier'])
100 return super(CandidacyJoinView, self).dispatch(*args, **kwargs)
101
102 def get_redirect_url(self, *args, **kwargs):
103 candidacy, created = Candidacy.objects.get_or_create(candidate=self.contact.candidate,
104 user=self.request.user
105 )
106 self.contact.candidacy = candidacy
107 self.contact.used_by_candidate = True
108 self.contact.save()
109 return reverse('backend_candidate:home')
110
111
112 form_class = get_candidate_profile_form_class()
113
114
115 class ProfileView(FormView):
116 form_class = form_class
117 template_name = 'backend_candidate/complete_profile.html'
118
119 @method_decorator(login_required)
120 def dispatch(self, request, *args, **kwargs):
121 if not is_candidate(request.user):
122 raise Http404
123 self.user = request.user
124 self.election = get_object_or_404(Election, slug=self.kwargs['slug'])
125 self.candidate = get_object_or_404(Candidate,
126 id=self.kwargs['candidate_id'])
127 return super(ProfileView, self).dispatch(request, *args, **kwargs)
128
129 def get_form_kwargs(self):
130 kwargs = super(ProfileView, self).get_form_kwargs()
131 kwargs['candidate'] = self.candidate
132 return kwargs
133
134 def form_valid(self, form):
135 form.save()
136 messages.add_message(self.request, messages.INFO, _('Hemos actualizado tu perfil'))
137 return super(ProfileView, self).form_valid(form)
138
139 def get_success_url(self):
140 url = reverse('backend_candidate:complete_profile',
141 kwargs={'slug': self.election.slug,
142 'candidate_id': self.candidate.id}
143 )
144 return url
145
146 def get_initial(self):
147 initial = super(ProfileView, self).get_initial()
148 labels = []
149 for field in self.form_class.base_fields:
150 labels.append(field)
151 personal_datas = PersonalData.objects.filter(candidate=self.candidate,
152 label__in=labels)
153 for personal_data in personal_datas:
154 initial[str(personal_data.label)] = personal_data.value
155 return initial
156
157 def get_context_data(self, **kwargs):
158 context = (super(ProfileView, self)
159 .get_context_data(**kwargs))
160 context['candidate'] = self.candidate
161 context['election'] = self.election
162 return context
163
164
165 class MyCommitments(BackendCandidateBase, ListView):
166 model = Commitment
167 template_name = 'backend_candidate/i_have_commited.html'
168 context_object_name = 'commitments'
169
170 @method_decorator(login_required)
171 def dispatch(self, request, *args, **kwargs):
172 if not is_candidate(request.user):
173 raise Http404
174 self.user = request.user
175 self.election = get_object_or_404(Election, slug=self.kwargs['slug'])
176 self.candidate = get_object_or_404(Candidate,
177 id=self.kwargs['candidate_id'])
178 return super(MyCommitments, self).dispatch(request, *args, **kwargs)
179
180 def get_queryset(self):
181 qs = super(MyCommitments, self).get_queryset()
182 return qs.filter(candidate=self.candidate)
183
184 def get_context_data(self, **kwargs):
185 context = (super(MyCommitments, self)
186 .get_context_data(**kwargs))
187 context['candidate'] = self.candidate
188 context['election'] = self.election
189 return context
190
191
192 class ProposalsForMe(BackendCandidateBase, ListView):
193 model = PopularProposal
194 template_name = 'backend_candidate/proposals_for_me.html'
195 context_object_name = 'proposals'
196
197 @method_decorator(login_required)
198 def dispatch(self, request, *args, **kwargs):
199 if not is_candidate(request.user):
200 raise Http404
201 self.user = request.user
202 self.election = get_object_or_404(Election, slug=self.kwargs['slug'])
203 self.candidate = get_object_or_404(Candidate,
204 id=self.kwargs['candidate_id'])
205 return super(ProposalsForMe, self).dispatch(request, *args, **kwargs)
206
207 def get_queryset(self):
208 qs = super(ProposalsForMe, self).get_queryset()
209 proposals_ids = []
210 for commitment in self.candidate.commitments.all():
211 proposals_ids.append(commitment.proposal.id)
212 qs = qs.filter(area=self.election.area).exclude(id__in=proposals_ids)
213 return qs
214
215 def get_context_data(self, **kwargs):
216 context = (super(ProposalsForMe, self)
217 .get_context_data(**kwargs))
218 context['candidate'] = self.candidate
219 context['election'] = self.election
220 return context
[end of backend_candidate/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend_candidate/views.py b/backend_candidate/views.py
--- a/backend_candidate/views.py
+++ b/backend_candidate/views.py
@@ -38,7 +38,7 @@
**kwargs)
-class HomeView(BackendCandidateBase, TemplateView):
+class HomeView(BackendCandidateBase, RedirectView):
template_name = "backend_candidate/home.html"
def get_context_data(self, *args, **kwargs):
@@ -46,6 +46,13 @@
context['candidacies'] = self.user.candidacies.all()
return context
+ def get_redirect_url(self, *args, **kwargs):
+ candidacy = self.user.candidacies.first()
+ profile_url = reverse('backend_candidate:complete_profile',
+ kwargs={'slug': candidacy.candidate.election.slug,
+ 'candidate_id': candidacy.candidate.id})
+ return profile_url
+
class CompleteMediaNaranjaView(FormView):
template_name = 'backend_candidate/complete_12_naranja.html'
@@ -106,7 +113,11 @@
self.contact.candidacy = candidacy
self.contact.used_by_candidate = True
self.contact.save()
- return reverse('backend_candidate:home')
+ candidacy = self.request.user.candidacies.first()
+ profile_url = reverse('backend_candidate:complete_profile',
+ kwargs={'slug': candidacy.candidate.election.slug,
+ 'candidate_id': candidacy.candidate.id})
+ return profile_url
form_class = get_candidate_profile_form_class()
| {"golden_diff": "diff --git a/backend_candidate/views.py b/backend_candidate/views.py\n--- a/backend_candidate/views.py\n+++ b/backend_candidate/views.py\n@@ -38,7 +38,7 @@\n **kwargs)\n \n \n-class HomeView(BackendCandidateBase, TemplateView):\n+class HomeView(BackendCandidateBase, RedirectView):\n template_name = \"backend_candidate/home.html\"\n \n def get_context_data(self, *args, **kwargs):\n@@ -46,6 +46,13 @@\n context['candidacies'] = self.user.candidacies.all()\n return context\n \n+ def get_redirect_url(self, *args, **kwargs):\n+ candidacy = self.user.candidacies.first()\n+ profile_url = reverse('backend_candidate:complete_profile',\n+ kwargs={'slug': candidacy.candidate.election.slug,\n+ 'candidate_id': candidacy.candidate.id})\n+ return profile_url\n+\n \n class CompleteMediaNaranjaView(FormView):\n template_name = 'backend_candidate/complete_12_naranja.html'\n@@ -106,7 +113,11 @@\n self.contact.candidacy = candidacy\n self.contact.used_by_candidate = True\n self.contact.save()\n- return reverse('backend_candidate:home')\n+ candidacy = self.request.user.candidacies.first()\n+ profile_url = reverse('backend_candidate:complete_profile',\n+ kwargs={'slug': candidacy.candidate.election.slug,\n+ 'candidate_id': candidacy.candidate.id})\n+ return profile_url\n \n \n form_class = get_candidate_profile_form_class()\n", "issue": "Backend candidato: hay una pregunta que tiene una alternativa en blanco.\n\n\n", "before_files": [{"content": "from backend_candidate.models import is_candidate, CandidacyContact, Candidacy\nfrom django.http import Http404\nfrom django.views.generic.base import TemplateView, RedirectView\nfrom django.views.generic import View\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic.edit import FormView\nfrom django.views.generic.list import ListView\nfrom django.shortcuts import get_object_or_404\nfrom backend_candidate.forms import get_form_for_election\nfrom elections.models import Candidate, Election, PersonalData\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom backend_candidate.forms import get_candidate_profile_form_class\nfrom popular_proposal.models import Commitment, PopularProposal\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext as _\n\n\nclass BackendCandidateBase(View):\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if not is_candidate(request.user):\n raise Http404\n self.user = request.user\n candidacy_objects = CandidacyContact.objects.filter(candidacy__user=self.user)\n used_by_candidate = True\n for candidacy_object in candidacy_objects:\n\n if not candidacy_object.used_by_candidate:\n used_by_candidate = False\n candidacy_object.used_by_candidate = True\n candidacy_object.save()\n if not used_by_candidate:\n return HttpResponseRedirect(reverse('password_reset'))\n return super(BackendCandidateBase, self).dispatch(request,\n *args,\n **kwargs)\n\n\nclass HomeView(BackendCandidateBase, TemplateView):\n template_name = \"backend_candidate/home.html\"\n\n def get_context_data(self, *args, **kwargs):\n context = super(HomeView, self).get_context_data(*args, **kwargs)\n context['candidacies'] = self.user.candidacies.all()\n return context\n\n\nclass CompleteMediaNaranjaView(FormView):\n template_name = 'backend_candidate/complete_12_naranja.html'\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if not is_candidate(request.user):\n raise Http404\n self.user = request.user\n self.election = get_object_or_404(Election, slug=self.kwargs['slug'])\n self.candidate = get_object_or_404(Candidate,\n id=self.kwargs['candidate_id'])\n return super(CompleteMediaNaranjaView, self).dispatch(request,\n *args,\n **kwargs)\n\n def get_form_class(self):\n return get_form_for_election(self.election)\n\n def get_form_kwargs(self):\n kwargs = super(CompleteMediaNaranjaView, self).get_form_kwargs()\n kwargs['candidate'] = self.candidate\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = (super(CompleteMediaNaranjaView, self)\n .get_context_data(**kwargs))\n context['candidate'] = self.candidate\n context['election'] = self.election\n return context\n\n def form_valid(self, form):\n form.save()\n messages.add_message(self.request, messages.INFO, _('Hemos guardado tus respuestas'))\n return super(CompleteMediaNaranjaView, self).form_valid(form)\n\n def get_success_url(self):\n url = reverse('backend_candidate:complete_12_naranja',\n kwargs={'slug': self.election.slug,\n 'candidate_id': self.candidate.id})\n return url\n\n\nclass CandidacyJoinView(RedirectView):\n permanent = False\n query_string = True\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n self.contact = get_object_or_404(CandidacyContact,\n identifier=self.kwargs['identifier'])\n return super(CandidacyJoinView, self).dispatch(*args, **kwargs)\n\n def get_redirect_url(self, *args, **kwargs):\n candidacy, created = Candidacy.objects.get_or_create(candidate=self.contact.candidate,\n user=self.request.user\n )\n self.contact.candidacy = candidacy\n self.contact.used_by_candidate = True\n self.contact.save()\n return reverse('backend_candidate:home')\n\n\nform_class = get_candidate_profile_form_class()\n\n\nclass ProfileView(FormView):\n form_class = form_class\n template_name = 'backend_candidate/complete_profile.html'\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if not is_candidate(request.user):\n raise Http404\n self.user = request.user\n self.election = get_object_or_404(Election, slug=self.kwargs['slug'])\n self.candidate = get_object_or_404(Candidate,\n id=self.kwargs['candidate_id'])\n return super(ProfileView, self).dispatch(request, *args, **kwargs)\n\n def get_form_kwargs(self):\n kwargs = super(ProfileView, self).get_form_kwargs()\n kwargs['candidate'] = self.candidate\n return kwargs\n\n def form_valid(self, form):\n form.save()\n messages.add_message(self.request, messages.INFO, _('Hemos actualizado tu perfil'))\n return super(ProfileView, self).form_valid(form)\n\n def get_success_url(self):\n url = reverse('backend_candidate:complete_profile',\n kwargs={'slug': self.election.slug,\n 'candidate_id': self.candidate.id}\n )\n return url\n\n def get_initial(self):\n initial = super(ProfileView, self).get_initial()\n labels = []\n for field in self.form_class.base_fields:\n labels.append(field)\n personal_datas = PersonalData.objects.filter(candidate=self.candidate,\n label__in=labels)\n for personal_data in personal_datas:\n initial[str(personal_data.label)] = personal_data.value\n return initial\n\n def get_context_data(self, **kwargs):\n context = (super(ProfileView, self)\n .get_context_data(**kwargs))\n context['candidate'] = self.candidate\n context['election'] = self.election\n return context\n\n\nclass MyCommitments(BackendCandidateBase, ListView):\n model = Commitment\n template_name = 'backend_candidate/i_have_commited.html'\n context_object_name = 'commitments'\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if not is_candidate(request.user):\n raise Http404\n self.user = request.user\n self.election = get_object_or_404(Election, slug=self.kwargs['slug'])\n self.candidate = get_object_or_404(Candidate,\n id=self.kwargs['candidate_id'])\n return super(MyCommitments, self).dispatch(request, *args, **kwargs)\n\n def get_queryset(self):\n qs = super(MyCommitments, self).get_queryset()\n return qs.filter(candidate=self.candidate)\n\n def get_context_data(self, **kwargs):\n context = (super(MyCommitments, self)\n .get_context_data(**kwargs))\n context['candidate'] = self.candidate\n context['election'] = self.election\n return context\n\n\nclass ProposalsForMe(BackendCandidateBase, ListView):\n model = PopularProposal\n template_name = 'backend_candidate/proposals_for_me.html'\n context_object_name = 'proposals'\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if not is_candidate(request.user):\n raise Http404\n self.user = request.user\n self.election = get_object_or_404(Election, slug=self.kwargs['slug'])\n self.candidate = get_object_or_404(Candidate,\n id=self.kwargs['candidate_id'])\n return super(ProposalsForMe, self).dispatch(request, *args, **kwargs)\n\n def get_queryset(self):\n qs = super(ProposalsForMe, self).get_queryset()\n proposals_ids = []\n for commitment in self.candidate.commitments.all():\n proposals_ids.append(commitment.proposal.id)\n qs = qs.filter(area=self.election.area).exclude(id__in=proposals_ids)\n return qs\n\n def get_context_data(self, **kwargs):\n context = (super(ProposalsForMe, self)\n .get_context_data(**kwargs))\n context['candidate'] = self.candidate\n context['election'] = self.election\n return context", "path": "backend_candidate/views.py"}]} | 2,988 | 334 |
gh_patches_debug_29775 | rasdani/github-patches | git_diff | liqd__adhocracy4-476 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
adding multiple answer text to answer page
</issue>
<code>
[start of adhocracy4/comments_async/serializers.py]
1 from django.conf import settings
2 from django.utils.translation import ugettext as _
3 from easy_thumbnails.files import get_thumbnailer
4 from rest_framework import serializers
5
6 from adhocracy4.comments.models import Comment
7
8
9 class CommentSerializer(serializers.ModelSerializer):
10 """Default Serializer for the comments."""
11
12 user_name = serializers.SerializerMethodField()
13 user_pk = serializers.SerializerMethodField()
14 user_profile_url = serializers.SerializerMethodField()
15 user_image = serializers.SerializerMethodField()
16 is_deleted = serializers.SerializerMethodField()
17 ratings = serializers.SerializerMethodField()
18 is_moderator = serializers.SerializerMethodField()
19
20 class Meta:
21 model = Comment
22 read_only_fields = ('modified', 'created', 'id',
23 'user_name', 'user_pk', 'user_image',
24 'ratings', 'content_type', 'object_pk')
25 exclude = ('creator', 'is_censored', 'is_removed')
26
27 def to_representation(self, instance):
28 """
29 Create a dictionary form categories.
30
31 Gets the categories and adds them along with their values
32 to a dictionary.
33 """
34 ret = super().to_representation(instance)
35 categories = {}
36 if ret['comment_categories']:
37 category_choices = getattr(settings,
38 'A4_COMMENT_CATEGORIES', '')
39 if category_choices:
40 category_choices = dict((x, str(y)) for x, y
41 in category_choices)
42 category_list = ret['comment_categories'].strip('[]').split(',')
43 for category in category_list:
44 if category in category_choices:
45 categories[category] = category_choices[category]
46 else:
47 categories[category] = category
48 ret['comment_categories'] = categories
49 return ret
50
51 def to_internal_value(self, data):
52 data = super().to_internal_value(data)
53 if 'comment_categories' in data:
54 value = data.get('comment_categories')
55 if value == '' or value == '[]':
56 raise serializers.ValidationError({
57 'comment_categories': _('Please choose a category')
58 })
59 return data
60
61 def get_user_pk(self, obj):
62 if (obj.is_censored or obj.is_removed):
63 return -1
64 return str(obj.creator.id)
65
66 def get_user_profile_url(self, obj):
67 if obj.is_censored or obj.is_removed:
68 return ''
69 try:
70 return obj.creator.get_absolute_url()
71 except AttributeError:
72 return ''
73
74 def get_user_name(self, obj):
75 """Don't show username if comment is marked removed or censored."""
76 if(obj.is_censored or obj.is_removed):
77 return _('unknown user')
78 return obj.creator.get_short_name()
79
80 def get_user_image(self, obj):
81 """Load small thumbnail images for user images."""
82 if(obj.is_censored or obj.is_removed):
83 return None
84 try:
85 if obj.creator.avatar:
86 avatar = get_thumbnailer(obj.creator.avatar)['avatar']
87 return avatar.url
88 except AttributeError:
89 pass
90 return None
91
92 def get_is_moderator(self, obj):
93 return obj.project.has_moderator(obj.creator)
94
95 def get_is_deleted(self, obj):
96 """Return true if one of the flags is set."""
97 return (obj.is_censored or obj.is_removed)
98
99 def get_ratings(self, comment):
100 """
101 Get positive and negative rating count.
102
103 As well as info on the request users rating
104 """
105 user = self.context['request'].user
106 positive_ratings = comment.ratings.filter(value=1).count()
107 negative_ratings = comment.ratings.filter(value=-1).count()
108
109 if user.is_authenticated:
110 user_rating = comment.ratings.filter(creator=user).first()
111 else:
112 user_rating = None
113
114 if user_rating:
115 user_rating_value = user_rating.value
116 user_rating_id = user_rating.pk
117 else:
118 user_rating_value = None
119 user_rating_id = None
120
121 result = {
122 'positive_ratings': positive_ratings,
123 'negative_ratings': negative_ratings,
124 'current_user_rating_value': user_rating_value,
125 'current_user_rating_id': user_rating_id
126 }
127
128 return result
129
130
131 class CommentListSerializer(CommentSerializer):
132 """Serializer for the comments to be used when viewed as list."""
133
134 comment = serializers.SerializerMethodField()
135
136 def get_comment(self, obj):
137 if obj.is_removed:
138 return _('deleted by creator')
139 if obj.is_censored:
140 return _('deleted by moderator')
141 return obj.comment
142
143
144 class ThreadSerializer(CommentSerializer):
145 """Serializes a comment including child comment (replies)."""
146
147 child_comments = CommentSerializer(many=True, read_only=True)
148
149
150 class ThreadListSerializer(CommentListSerializer):
151 """
152 Serializes comments when viewed.
153
154 As list including child comment (replies).
155 """
156
157 child_comments = CommentListSerializer(many=True, read_only=True)
158
[end of adhocracy4/comments_async/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/adhocracy4/comments_async/serializers.py b/adhocracy4/comments_async/serializers.py
--- a/adhocracy4/comments_async/serializers.py
+++ b/adhocracy4/comments_async/serializers.py
@@ -21,7 +21,8 @@
model = Comment
read_only_fields = ('modified', 'created', 'id',
'user_name', 'user_pk', 'user_image',
- 'ratings', 'content_type', 'object_pk')
+ 'user_image_fallback', 'ratings',
+ 'content_type', 'object_pk')
exclude = ('creator', 'is_censored', 'is_removed')
def to_representation(self, instance):
@@ -77,6 +78,17 @@
return _('unknown user')
return obj.creator.get_short_name()
+ def get_user_image_fallback(self, obj):
+ """Load small thumbnail images for default user images."""
+ if(obj.is_censored or obj.is_removed):
+ return None
+ try:
+ if obj.creator.avatar_fallback:
+ return obj.creator.avatar_fallback
+ except AttributeError:
+ pass
+ return None
+
def get_user_image(self, obj):
"""Load small thumbnail images for user images."""
if(obj.is_censored or obj.is_removed):
@@ -87,7 +99,7 @@
return avatar.url
except AttributeError:
pass
- return None
+ return self.get_user_image_fallback(obj)
def get_is_moderator(self, obj):
return obj.project.has_moderator(obj.creator)
| {"golden_diff": "diff --git a/adhocracy4/comments_async/serializers.py b/adhocracy4/comments_async/serializers.py\n--- a/adhocracy4/comments_async/serializers.py\n+++ b/adhocracy4/comments_async/serializers.py\n@@ -21,7 +21,8 @@\n model = Comment\n read_only_fields = ('modified', 'created', 'id',\n 'user_name', 'user_pk', 'user_image',\n- 'ratings', 'content_type', 'object_pk')\n+ 'user_image_fallback', 'ratings',\n+ 'content_type', 'object_pk')\n exclude = ('creator', 'is_censored', 'is_removed')\n \n def to_representation(self, instance):\n@@ -77,6 +78,17 @@\n return _('unknown user')\n return obj.creator.get_short_name()\n \n+ def get_user_image_fallback(self, obj):\n+ \"\"\"Load small thumbnail images for default user images.\"\"\"\n+ if(obj.is_censored or obj.is_removed):\n+ return None\n+ try:\n+ if obj.creator.avatar_fallback:\n+ return obj.creator.avatar_fallback\n+ except AttributeError:\n+ pass\n+ return None\n+\n def get_user_image(self, obj):\n \"\"\"Load small thumbnail images for user images.\"\"\"\n if(obj.is_censored or obj.is_removed):\n@@ -87,7 +99,7 @@\n return avatar.url\n except AttributeError:\n pass\n- return None\n+ return self.get_user_image_fallback(obj)\n \n def get_is_moderator(self, obj):\n return obj.project.has_moderator(obj.creator)\n", "issue": "adding multiple answer text to answer page\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.utils.translation import ugettext as _\nfrom easy_thumbnails.files import get_thumbnailer\nfrom rest_framework import serializers\n\nfrom adhocracy4.comments.models import Comment\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n \"\"\"Default Serializer for the comments.\"\"\"\n\n user_name = serializers.SerializerMethodField()\n user_pk = serializers.SerializerMethodField()\n user_profile_url = serializers.SerializerMethodField()\n user_image = serializers.SerializerMethodField()\n is_deleted = serializers.SerializerMethodField()\n ratings = serializers.SerializerMethodField()\n is_moderator = serializers.SerializerMethodField()\n\n class Meta:\n model = Comment\n read_only_fields = ('modified', 'created', 'id',\n 'user_name', 'user_pk', 'user_image',\n 'ratings', 'content_type', 'object_pk')\n exclude = ('creator', 'is_censored', 'is_removed')\n\n def to_representation(self, instance):\n \"\"\"\n Create a dictionary form categories.\n\n Gets the categories and adds them along with their values\n to a dictionary.\n \"\"\"\n ret = super().to_representation(instance)\n categories = {}\n if ret['comment_categories']:\n category_choices = getattr(settings,\n 'A4_COMMENT_CATEGORIES', '')\n if category_choices:\n category_choices = dict((x, str(y)) for x, y\n in category_choices)\n category_list = ret['comment_categories'].strip('[]').split(',')\n for category in category_list:\n if category in category_choices:\n categories[category] = category_choices[category]\n else:\n categories[category] = category\n ret['comment_categories'] = categories\n return ret\n\n def to_internal_value(self, data):\n data = super().to_internal_value(data)\n if 'comment_categories' in data:\n value = data.get('comment_categories')\n if value == '' or value == '[]':\n raise serializers.ValidationError({\n 'comment_categories': _('Please choose a category')\n })\n return data\n\n def get_user_pk(self, obj):\n if (obj.is_censored or obj.is_removed):\n return -1\n return str(obj.creator.id)\n\n def get_user_profile_url(self, obj):\n if obj.is_censored or obj.is_removed:\n return ''\n try:\n return obj.creator.get_absolute_url()\n except AttributeError:\n return ''\n\n def get_user_name(self, obj):\n \"\"\"Don't show username if comment is marked removed or censored.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return _('unknown user')\n return obj.creator.get_short_name()\n\n def get_user_image(self, obj):\n \"\"\"Load small thumbnail images for user images.\"\"\"\n if(obj.is_censored or obj.is_removed):\n return None\n try:\n if obj.creator.avatar:\n avatar = get_thumbnailer(obj.creator.avatar)['avatar']\n return avatar.url\n except AttributeError:\n pass\n return None\n\n def get_is_moderator(self, obj):\n return obj.project.has_moderator(obj.creator)\n\n def get_is_deleted(self, obj):\n \"\"\"Return true if one of the flags is set.\"\"\"\n return (obj.is_censored or obj.is_removed)\n\n def get_ratings(self, comment):\n \"\"\"\n Get positive and negative rating count.\n\n As well as info on the request users rating\n \"\"\"\n user = self.context['request'].user\n positive_ratings = comment.ratings.filter(value=1).count()\n negative_ratings = comment.ratings.filter(value=-1).count()\n\n if user.is_authenticated:\n user_rating = comment.ratings.filter(creator=user).first()\n else:\n user_rating = None\n\n if user_rating:\n user_rating_value = user_rating.value\n user_rating_id = user_rating.pk\n else:\n user_rating_value = None\n user_rating_id = None\n\n result = {\n 'positive_ratings': positive_ratings,\n 'negative_ratings': negative_ratings,\n 'current_user_rating_value': user_rating_value,\n 'current_user_rating_id': user_rating_id\n }\n\n return result\n\n\nclass CommentListSerializer(CommentSerializer):\n \"\"\"Serializer for the comments to be used when viewed as list.\"\"\"\n\n comment = serializers.SerializerMethodField()\n\n def get_comment(self, obj):\n if obj.is_removed:\n return _('deleted by creator')\n if obj.is_censored:\n return _('deleted by moderator')\n return obj.comment\n\n\nclass ThreadSerializer(CommentSerializer):\n \"\"\"Serializes a comment including child comment (replies).\"\"\"\n\n child_comments = CommentSerializer(many=True, read_only=True)\n\n\nclass ThreadListSerializer(CommentListSerializer):\n \"\"\"\n Serializes comments when viewed.\n\n As list including child comment (replies).\n \"\"\"\n\n child_comments = CommentListSerializer(many=True, read_only=True)\n", "path": "adhocracy4/comments_async/serializers.py"}]} | 1,951 | 353 |
gh_patches_debug_18594 | rasdani/github-patches | git_diff | modoboa__modoboa-973 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test failure when using tox
This is because tox use sqlite and has no user set
```
======================================================================
ERROR: test_map_upgrade (modoboa.admin.tests.test_mapfiles.MapFilesTestCase)
Check that map content is used.
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/gawel/bear/modoboa/.tox/py27/lib/python2.7/site-packages/modoboa/admin/tests/test_mapfiles.py", line 45, in test_map_upgrade
self.assertEqual(mapcontent["user"], dbsettings["USER"])
KeyError: 'user'
----------------------------------------------------------------------
Ran 178 tests in 44.553s
```
</issue>
<code>
[start of modoboa/core/management/commands/generate_postfix_maps.py]
1 """Management command to generate/update postfix map files."""
2
3 import copy
4 import hashlib
5 import os
6 import sys
7
8 from django.conf import settings
9 from django.core.management.base import BaseCommand
10 from django.template import Context, Template
11 from django.utils import timezone
12
13 import dj_database_url
14
15 from ... import signals
16 from ... import utils
17
18 MAP_FILE_TEMPLATE = """# This file was generated on {{ date }} by running:
19 # {{ commandline }}
20 # DO NOT EDIT!
21 """
22
23
24 class Command(BaseCommand):
25 """Command class."""
26
27 help = "Generate/update postfix map files."
28
29 def add_arguments(self, parser):
30 """Add extra arguments."""
31 parser.add_argument(
32 "--dburl", help="Custom database url")
33 parser.add_argument(
34 "--destdir", default=".",
35 help="Directory where files will be created")
36 parser.add_argument(
37 "--force-overwrite", action="store_true", default=False,
38 help="Force overwrite of existing map files")
39
40 def __load_checksums(self, destdir):
41 """Load existing checksums if possible."""
42 self.__checksums_file = os.path.join(
43 destdir, "modoboa-postfix-maps.chk")
44 self.__checksums = {}
45 if not os.path.exists(self.__checksums_file):
46 return
47 with open(self.__checksums_file) as fp:
48 for line in fp:
49 fname, dbtype, checksum = line.split(":")
50 self.__checksums[fname.strip()] = {
51 "dbtype": dbtype, "checksum": checksum.strip()
52 }
53
54 def __register_map_files(self):
55 """Load specified applications."""
56 responses = signals.register_postfix_maps.send(sender=self.__class__)
57 mapfiles = []
58 for response in responses:
59 mapfiles += response[1]
60 return mapfiles
61
62 def __check_file(self, path):
63 """Check if map file has been modified."""
64 fname = os.path.basename(path)
65 condition = (
66 not self.__checksums or
67 fname not in self.__checksums)
68 if condition:
69 return True
70 with open(path) as fp:
71 checksum = hashlib.md5(fp.read()).hexdigest()
72 return checksum == self.__checksums[fname]["checksum"]
73
74 def get_template(self, dbtype):
75 """Return map file template."""
76 tplcontent = MAP_FILE_TEMPLATE
77 if dbtype == "sqlite":
78 tplcontent += """dbpath = {{ dbname }}
79 query = {{ query|safe }}
80 """
81 else:
82 tplcontent += """user = {{ dbuser }}
83 password = {{ dbpass }}
84 dbname = {{ dbname }}
85 hosts = {{ dbhost }}
86 query = {{ query|safe }}
87 """
88 return Template(tplcontent)
89
90 def get_template_context(self, options):
91 """Build the context used to render templates."""
92 dburl = options.get("dburl")
93 db_settings = (
94 dj_database_url.config(default=dburl)
95 if dburl else settings.DATABASES["default"])
96 if "sqlite" in db_settings["ENGINE"]:
97 dbtype = "sqlite"
98 elif "psycopg2" in db_settings["ENGINE"]:
99 dbtype = "postgres"
100 else:
101 dbtype = "mysql"
102 commandline = "{} {}".format(
103 os.path.basename(sys.argv[0]), " ".join(sys.argv[1:]))
104 context = {
105 "date": timezone.now(),
106 "commandline": commandline,
107 "dbtype": dbtype,
108 "dbuser": db_settings["USER"],
109 "dbpass": db_settings["PASSWORD"],
110 "dbname": db_settings["NAME"],
111 "dbhost": db_settings.get("HOST", "127.0.0.1"),
112 }
113 return context
114
115 def __render_map_file(
116 self, mapobject, destdir, context, force_overwrite=False):
117 """Render a map file."""
118 fullpath = os.path.join(destdir, mapobject.filename)
119 if os.path.exists(fullpath) and not force_overwrite:
120 if not self.__check_file(fullpath):
121 print(
122 "Cannot upgrade '{}' map because it has been modified."
123 .format(mapobject.filename))
124 return self.__checksums[mapobject.filename]
125 mapcontent = utils.parse_map_file(fullpath)
126 context = copy.deepcopy(context)
127 context["dbtype"] = self.__checksums[mapobject.filename]["dbtype"]
128 context["dbuser"] = mapcontent["user"]
129 context["dbpass"] = mapcontent["password"]
130 context["dbname"] = mapcontent["dbname"]
131 context["dbhost"] = mapcontent["hosts"]
132 content = self.get_template(context["dbtype"]).render(
133 Context(
134 dict(context.items(),
135 query=getattr(mapobject, context["dbtype"]))
136 )
137 )
138 fullpath = os.path.join(destdir, mapobject.filename)
139 with open(fullpath, "w") as fp:
140 fp.write(content)
141 return hashlib.md5(content).hexdigest()
142
143 def handle(self, *args, **options):
144 """Command entry point."""
145 mapfiles = self.__register_map_files()
146 destdir = os.path.realpath(options["destdir"])
147 try:
148 os.mkdir(destdir)
149 except OSError:
150 pass
151 self.__load_checksums(destdir)
152 context = self.get_template_context(options)
153 checksums = {}
154 for mapobject in mapfiles:
155 checksum = self.__render_map_file(
156 mapobject, destdir, context,
157 force_overwrite=options["force_overwrite"])
158 checksums[mapobject.filename] = checksum
159 with open(self.__checksums_file, "w") as fp:
160 for fname, checksum in checksums.items():
161 fp.write("{}:{}:{}\n".format(
162 fname, context["dbtype"], checksum))
163
[end of modoboa/core/management/commands/generate_postfix_maps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modoboa/core/management/commands/generate_postfix_maps.py b/modoboa/core/management/commands/generate_postfix_maps.py
--- a/modoboa/core/management/commands/generate_postfix_maps.py
+++ b/modoboa/core/management/commands/generate_postfix_maps.py
@@ -125,10 +125,13 @@
mapcontent = utils.parse_map_file(fullpath)
context = copy.deepcopy(context)
context["dbtype"] = self.__checksums[mapobject.filename]["dbtype"]
- context["dbuser"] = mapcontent["user"]
- context["dbpass"] = mapcontent["password"]
- context["dbname"] = mapcontent["dbname"]
- context["dbhost"] = mapcontent["hosts"]
+ if context["dbtype"] == "sqlite":
+ context["dbname"] = mapcontent["dbpath"]
+ else:
+ context["dbuser"] = mapcontent["user"]
+ context["dbpass"] = mapcontent["password"]
+ context["dbname"] = mapcontent["dbname"]
+ context["dbhost"] = mapcontent["hosts"]
content = self.get_template(context["dbtype"]).render(
Context(
dict(context.items(),
| {"golden_diff": "diff --git a/modoboa/core/management/commands/generate_postfix_maps.py b/modoboa/core/management/commands/generate_postfix_maps.py\n--- a/modoboa/core/management/commands/generate_postfix_maps.py\n+++ b/modoboa/core/management/commands/generate_postfix_maps.py\n@@ -125,10 +125,13 @@\n mapcontent = utils.parse_map_file(fullpath)\n context = copy.deepcopy(context)\n context[\"dbtype\"] = self.__checksums[mapobject.filename][\"dbtype\"]\n- context[\"dbuser\"] = mapcontent[\"user\"]\n- context[\"dbpass\"] = mapcontent[\"password\"]\n- context[\"dbname\"] = mapcontent[\"dbname\"]\n- context[\"dbhost\"] = mapcontent[\"hosts\"]\n+ if context[\"dbtype\"] == \"sqlite\":\n+ context[\"dbname\"] = mapcontent[\"dbpath\"]\n+ else:\n+ context[\"dbuser\"] = mapcontent[\"user\"]\n+ context[\"dbpass\"] = mapcontent[\"password\"]\n+ context[\"dbname\"] = mapcontent[\"dbname\"]\n+ context[\"dbhost\"] = mapcontent[\"hosts\"]\n content = self.get_template(context[\"dbtype\"]).render(\n Context(\n dict(context.items(),\n", "issue": "Test failure when using tox\nThis is because tox use sqlite and has no user set\n\n```\n======================================================================\nERROR: test_map_upgrade (modoboa.admin.tests.test_mapfiles.MapFilesTestCase)\nCheck that map content is used.\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/home/gawel/bear/modoboa/.tox/py27/lib/python2.7/site-packages/modoboa/admin/tests/test_mapfiles.py\", line 45, in test_map_upgrade\n self.assertEqual(mapcontent[\"user\"], dbsettings[\"USER\"])\nKeyError: 'user'\n\n----------------------------------------------------------------------\nRan 178 tests in 44.553s\n```\n\n", "before_files": [{"content": "\"\"\"Management command to generate/update postfix map files.\"\"\"\n\nimport copy\nimport hashlib\nimport os\nimport sys\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.template import Context, Template\nfrom django.utils import timezone\n\nimport dj_database_url\n\nfrom ... import signals\nfrom ... import utils\n\nMAP_FILE_TEMPLATE = \"\"\"# This file was generated on {{ date }} by running:\n# {{ commandline }}\n# DO NOT EDIT!\n\"\"\"\n\n\nclass Command(BaseCommand):\n \"\"\"Command class.\"\"\"\n\n help = \"Generate/update postfix map files.\"\n\n def add_arguments(self, parser):\n \"\"\"Add extra arguments.\"\"\"\n parser.add_argument(\n \"--dburl\", help=\"Custom database url\")\n parser.add_argument(\n \"--destdir\", default=\".\",\n help=\"Directory where files will be created\")\n parser.add_argument(\n \"--force-overwrite\", action=\"store_true\", default=False,\n help=\"Force overwrite of existing map files\")\n\n def __load_checksums(self, destdir):\n \"\"\"Load existing checksums if possible.\"\"\"\n self.__checksums_file = os.path.join(\n destdir, \"modoboa-postfix-maps.chk\")\n self.__checksums = {}\n if not os.path.exists(self.__checksums_file):\n return\n with open(self.__checksums_file) as fp:\n for line in fp:\n fname, dbtype, checksum = line.split(\":\")\n self.__checksums[fname.strip()] = {\n \"dbtype\": dbtype, \"checksum\": checksum.strip()\n }\n\n def __register_map_files(self):\n \"\"\"Load specified applications.\"\"\"\n responses = signals.register_postfix_maps.send(sender=self.__class__)\n mapfiles = []\n for response in responses:\n mapfiles += response[1]\n return mapfiles\n\n def __check_file(self, path):\n \"\"\"Check if map file has been modified.\"\"\"\n fname = os.path.basename(path)\n condition = (\n not self.__checksums or\n fname not in self.__checksums)\n if condition:\n return True\n with open(path) as fp:\n checksum = hashlib.md5(fp.read()).hexdigest()\n return checksum == self.__checksums[fname][\"checksum\"]\n\n def get_template(self, dbtype):\n \"\"\"Return map file template.\"\"\"\n tplcontent = MAP_FILE_TEMPLATE\n if dbtype == \"sqlite\":\n tplcontent += \"\"\"dbpath = {{ dbname }}\nquery = {{ query|safe }}\n\"\"\"\n else:\n tplcontent += \"\"\"user = {{ dbuser }}\npassword = {{ dbpass }}\ndbname = {{ dbname }}\nhosts = {{ dbhost }}\nquery = {{ query|safe }}\n\"\"\"\n return Template(tplcontent)\n\n def get_template_context(self, options):\n \"\"\"Build the context used to render templates.\"\"\"\n dburl = options.get(\"dburl\")\n db_settings = (\n dj_database_url.config(default=dburl)\n if dburl else settings.DATABASES[\"default\"])\n if \"sqlite\" in db_settings[\"ENGINE\"]:\n dbtype = \"sqlite\"\n elif \"psycopg2\" in db_settings[\"ENGINE\"]:\n dbtype = \"postgres\"\n else:\n dbtype = \"mysql\"\n commandline = \"{} {}\".format(\n os.path.basename(sys.argv[0]), \" \".join(sys.argv[1:]))\n context = {\n \"date\": timezone.now(),\n \"commandline\": commandline,\n \"dbtype\": dbtype,\n \"dbuser\": db_settings[\"USER\"],\n \"dbpass\": db_settings[\"PASSWORD\"],\n \"dbname\": db_settings[\"NAME\"],\n \"dbhost\": db_settings.get(\"HOST\", \"127.0.0.1\"),\n }\n return context\n\n def __render_map_file(\n self, mapobject, destdir, context, force_overwrite=False):\n \"\"\"Render a map file.\"\"\"\n fullpath = os.path.join(destdir, mapobject.filename)\n if os.path.exists(fullpath) and not force_overwrite:\n if not self.__check_file(fullpath):\n print(\n \"Cannot upgrade '{}' map because it has been modified.\"\n .format(mapobject.filename))\n return self.__checksums[mapobject.filename]\n mapcontent = utils.parse_map_file(fullpath)\n context = copy.deepcopy(context)\n context[\"dbtype\"] = self.__checksums[mapobject.filename][\"dbtype\"]\n context[\"dbuser\"] = mapcontent[\"user\"]\n context[\"dbpass\"] = mapcontent[\"password\"]\n context[\"dbname\"] = mapcontent[\"dbname\"]\n context[\"dbhost\"] = mapcontent[\"hosts\"]\n content = self.get_template(context[\"dbtype\"]).render(\n Context(\n dict(context.items(),\n query=getattr(mapobject, context[\"dbtype\"]))\n )\n )\n fullpath = os.path.join(destdir, mapobject.filename)\n with open(fullpath, \"w\") as fp:\n fp.write(content)\n return hashlib.md5(content).hexdigest()\n\n def handle(self, *args, **options):\n \"\"\"Command entry point.\"\"\"\n mapfiles = self.__register_map_files()\n destdir = os.path.realpath(options[\"destdir\"])\n try:\n os.mkdir(destdir)\n except OSError:\n pass\n self.__load_checksums(destdir)\n context = self.get_template_context(options)\n checksums = {}\n for mapobject in mapfiles:\n checksum = self.__render_map_file(\n mapobject, destdir, context,\n force_overwrite=options[\"force_overwrite\"])\n checksums[mapobject.filename] = checksum\n with open(self.__checksums_file, \"w\") as fp:\n for fname, checksum in checksums.items():\n fp.write(\"{}:{}:{}\\n\".format(\n fname, context[\"dbtype\"], checksum))\n", "path": "modoboa/core/management/commands/generate_postfix_maps.py"}]} | 2,304 | 273 |
gh_patches_debug_37302 | rasdani/github-patches | git_diff | Parsl__parsl-1321 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"ZMQError: Operation cannot be accomplished in current state" triggers shutdown
## Brief description and request
I've been seeing an error crop up in longer running Parsl DFK's which trigger a graceful, but unwanted shutdown. I can't find a reliable/minimal example to cause this trigger other than "run a while." So, I'm going to post as much debug info I can about our setup and am seeking advice on things to test or other information which would be helpful in debugging this error.
I'm happy to test whatever is needed, I can also make direct code modifications
## Backtrace of error thrown
The final error is in the title but here is the backtrace, it reliably fails on this line:
```
... (there are higher functions in this stack from our program)
File "/home/lnaden/github/qcfractal/qcfractal/queue/parsl_adapter.py", line 94, in count_running_workers
if hasattr(executor, 'connected_workers'):
File "/home/lnaden/miniconda3/envs/qca/lib/python3.7/site-packages/parsl/executors/high_throughput/executor.py", line 463, in connected_workers
workers = self.command_client.run("WORKERS")
File "/home/lnaden/miniconda3/envs/qca/lib/python3.7/site-packages/parsl/executors/high_throughput/zmq_pipes.py", line 40, in run
self.zmq_socket.send_pyobj(message, copy=True)
File "/home/lnaden/miniconda3/envs/qca/lib/python3.7/site-packages/zmq/sugar/socket.py", line 606, in send_pyobj
return self.send(msg, flags=flags, **kwargs)
File "/home/lnaden/miniconda3/envs/qca/lib/python3.7/site-packages/zmq/sugar/socket.py", line 395, in send
return super(Socket, self).send(data, flags=flags, copy=copy, track=track)
File "zmq/backend/cython/socket.pyx", line 725, in zmq.backend.cython.socket.Socket.send
File "zmq/backend/cython/socket.pyx", line 772, in zmq.backend.cython.socket.Socket.send
File "zmq/backend/cython/socket.pyx", line 247, in zmq.backend.cython.socket._send_copy
File "zmq/backend/cython/socket.pyx", line 242, in zmq.backend.cython.socket._send_copy
File "zmq/backend/cython/checkrc.pxd", line 25, in zmq.backend.cython.checkrc._check_rc
zmq.error.ZMQError: Operation cannot be accomplished in current state
```
Below this it behaves as though a standard shutdown signal and the next log item is `[I 190716 15:38:08 dflow:821] DFK cleanup initiated`
## Additional info
`if hasattr(executor, 'connected_workers'):` is inside a loop over the values: `parsl.dataflow.dflow.DataFlowKernel.executors.values()`
Near as I can tell, this error is only thrown and logged at the higher level programs which are running Parsl and this does not appear in any of the logs or files from the `runinfo` folder.
Leading up to the crash, there are multiple "loss of Manager" errors. The first set of lost managers happened ~6 hours after the processes were started, which is expected given the wall time of the cluster would have stopped them. However, after that initial set, we started loosing managers every 5-30 minutes seemingly randomly, all due to "too many missed heartbeats." This behavior is the same on 2 separate clusters (similar hardware, same SLURM scheduler). Not sure if this is related, but it's an observation.
Config file:
```python
Config(
app_cache=True,
checkpoint_files=None,
checkpoint_mode=None,
checkpoint_period=None,
data_management_max_threads=10,
executors=[HighThroughputExecutor(
address='calogin1',
cores_per_worker=16,
heartbeat_period=30,
heartbeat_threshold=120,
interchange_port_range=(55000, 56000),
label='QCFractal_Parsl_Slurm_Executor',
launch_cmd='process_worker_pool.py {debug} {max_workers} -p {prefetch_capacity} -c {cores_per_worker} -m {mem_per_worker} --poll {poll_period} --task_url={task_url} --result_url={result_url} --logdir={logdir} --block_id={{block_id}} --hb_period={heartbeat_period} --hb_threshold={heartbeat_threshold} ',
managed=True,
max_workers=48,
mem_per_worker=None,
poll_period=10,
prefetch_capacity=0,
provider=SlurmProvider(
'normal_q',
channel=LocalChannel(envs={}, script_dir=None, userhome='/home/lnaden/qcarchive/run_cc'),
cmd_timeout=10,
exclusive=True,
init_blocks=0,
launcher=SingleNodeLauncher(),
max_blocks=24,
min_blocks=0,
move_files=True,
nodes_per_block=1,
parallelism=1,
scheduler_options='#SBATCH --exclusive\n#SBATCH -A themolssi\n',
walltime='06:00:00',
worker_init='source /home/lnaden/qcarchive/boot_qcarchive_env.sh'
),
storage_access=[],
suppress_failure=False,
worker_debug=False,
worker_logdir_root=None,
worker_port_range=(54000, 55000),
worker_ports=None,
working_dir=None
)],
lazy_errors=True,
monitoring=None,
retries=0,
run_dir='runinfo',
strategy='simple',
usage_tracking=False
)
```
Setup info: SLURM Cluster, executing locally on the head node in a background process. Directly calling and manipulating the `DataFlowKernel` after providing it a `Config`
</issue>
<code>
[start of parsl/executors/high_throughput/zmq_pipes.py]
1 #!/usr/bin/env python3
2
3 import zmq
4 import time
5 import pickle
6 import logging
7
8 logger = logging.getLogger(__name__)
9
10
11 class CommandClient(object):
12 """ CommandClient
13 """
14 def __init__(self, ip_address, port_range):
15 """
16 Parameters
17 ----------
18
19 ip_address: str
20 IP address of the client (where Parsl runs)
21 port_range: tuple(int, int)
22 Port range for the comms between client and interchange
23
24 """
25 self.context = zmq.Context()
26 self.zmq_socket = self.context.socket(zmq.REQ)
27 self.port = self.zmq_socket.bind_to_random_port("tcp://{}".format(ip_address),
28 min_port=port_range[0],
29 max_port=port_range[1])
30
31 def run(self, message):
32 """ This function needs to be fast at the same time aware of the possibility of
33 ZMQ pipes overflowing.
34
35 The timeout increases slowly if contention is detected on ZMQ pipes.
36 We could set copy=False and get slightly better latency but this results
37 in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.
38 This issue can be magnified if each the serialized buffer itself is larger.
39 """
40 self.zmq_socket.send_pyobj(message, copy=True)
41 reply = self.zmq_socket.recv_pyobj()
42 return reply
43
44 def close(self):
45 self.zmq_socket.close()
46 self.context.term()
47
48
49 class TasksOutgoing(object):
50 """ Outgoing task queue from the executor to the Interchange
51 """
52 def __init__(self, ip_address, port_range):
53 """
54 Parameters
55 ----------
56
57 ip_address: str
58 IP address of the client (where Parsl runs)
59 port_range: tuple(int, int)
60 Port range for the comms between client and interchange
61
62 """
63 self.context = zmq.Context()
64 self.zmq_socket = self.context.socket(zmq.DEALER)
65 self.zmq_socket.set_hwm(0)
66 self.port = self.zmq_socket.bind_to_random_port("tcp://{}".format(ip_address),
67 min_port=port_range[0],
68 max_port=port_range[1])
69 self.poller = zmq.Poller()
70 self.poller.register(self.zmq_socket, zmq.POLLOUT)
71
72 def put(self, message):
73 """ This function needs to be fast at the same time aware of the possibility of
74 ZMQ pipes overflowing.
75
76 The timeout increases slowly if contention is detected on ZMQ pipes.
77 We could set copy=False and get slightly better latency but this results
78 in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.
79 This issue can be magnified if each the serialized buffer itself is larger.
80 """
81 timeout_ms = 0
82 while True:
83 socks = dict(self.poller.poll(timeout=timeout_ms))
84 if self.zmq_socket in socks and socks[self.zmq_socket] == zmq.POLLOUT:
85 # The copy option adds latency but reduces the risk of ZMQ overflow
86 self.zmq_socket.send_pyobj(message, copy=True)
87 return
88 else:
89 timeout_ms += 1
90 logger.debug("Not sending due to full zmq pipe, timeout: {} ms".format(timeout_ms))
91
92 def close(self):
93 self.zmq_socket.close()
94 self.context.term()
95
96
97 class ResultsIncoming(object):
98 """ Incoming results queue from the Interchange to the executor
99 """
100
101 def __init__(self, ip_address, port_range):
102 """
103 Parameters
104 ----------
105
106 ip_address: str
107 IP address of the client (where Parsl runs)
108 port_range: tuple(int, int)
109 Port range for the comms between client and interchange
110
111 """
112 self.context = zmq.Context()
113 self.results_receiver = self.context.socket(zmq.DEALER)
114 self.results_receiver.set_hwm(0)
115 self.port = self.results_receiver.bind_to_random_port("tcp://{}".format(ip_address),
116 min_port=port_range[0],
117 max_port=port_range[1])
118
119 def get(self, block=True, timeout=None):
120 return self.results_receiver.recv_multipart()
121
122 def request_close(self):
123 status = self.results_receiver.send(pickle.dumps(None))
124 time.sleep(0.1)
125 return status
126
127 def close(self):
128 self.results_receiver.close()
129 self.context.term()
130
[end of parsl/executors/high_throughput/zmq_pipes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/executors/high_throughput/zmq_pipes.py b/parsl/executors/high_throughput/zmq_pipes.py
--- a/parsl/executors/high_throughput/zmq_pipes.py
+++ b/parsl/executors/high_throughput/zmq_pipes.py
@@ -4,6 +4,7 @@
import time
import pickle
import logging
+import threading
logger = logging.getLogger(__name__)
@@ -23,12 +24,27 @@
"""
self.context = zmq.Context()
- self.zmq_socket = self.context.socket(zmq.REQ)
- self.port = self.zmq_socket.bind_to_random_port("tcp://{}".format(ip_address),
- min_port=port_range[0],
- max_port=port_range[1])
+ self.ip_address = ip_address
+ self.port_range = port_range
+ self.port = None
+ self.create_socket_and_bind()
+ self._lock = threading.Lock()
- def run(self, message):
+ def create_socket_and_bind(self):
+ """ Creates socket and binds to a port.
+
+ Upon recreating the socket, we bind to the same port.
+ """
+ self.zmq_socket = self.context.socket(zmq.REQ)
+ self.zmq_socket.setsockopt(zmq.LINGER, 0)
+ if self.port is None:
+ self.port = self.zmq_socket.bind_to_random_port("tcp://{}".format(self.ip_address),
+ min_port=self.port_range[0],
+ max_port=self.port_range[1])
+ else:
+ self.zmq_socket.bind("tcp://{}:{}".format(self.ip_address, self.port))
+
+ def run(self, message, max_retries=3):
""" This function needs to be fast at the same time aware of the possibility of
ZMQ pipes overflowing.
@@ -37,8 +53,24 @@
in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.
This issue can be magnified if each the serialized buffer itself is larger.
"""
- self.zmq_socket.send_pyobj(message, copy=True)
- reply = self.zmq_socket.recv_pyobj()
+ reply = '__PARSL_ZMQ_PIPES_MAGIC__'
+ with self._lock:
+ for i in range(max_retries):
+ try:
+ self.zmq_socket.send_pyobj(message, copy=True)
+ reply = self.zmq_socket.recv_pyobj()
+ except zmq.ZMQError:
+ logger.exception("Potential ZMQ REQ-REP deadlock caught")
+ logger.info("Trying to reestablish context")
+ self.zmq_socket.close()
+ self.context.destroy()
+ self.context = zmq.Context()
+ self.create_socket_and_bind()
+
+ if reply == '__PARSL_ZMQ_PIPES_MAGIC__':
+ logger.error("Command channel run retries exhausted. Unable to run command")
+ raise Exception("Command Channel retries exhausted")
+
return reply
def close(self):
| {"golden_diff": "diff --git a/parsl/executors/high_throughput/zmq_pipes.py b/parsl/executors/high_throughput/zmq_pipes.py\n--- a/parsl/executors/high_throughput/zmq_pipes.py\n+++ b/parsl/executors/high_throughput/zmq_pipes.py\n@@ -4,6 +4,7 @@\n import time\n import pickle\n import logging\n+import threading\n \n logger = logging.getLogger(__name__)\n \n@@ -23,12 +24,27 @@\n \n \"\"\"\n self.context = zmq.Context()\n- self.zmq_socket = self.context.socket(zmq.REQ)\n- self.port = self.zmq_socket.bind_to_random_port(\"tcp://{}\".format(ip_address),\n- min_port=port_range[0],\n- max_port=port_range[1])\n+ self.ip_address = ip_address\n+ self.port_range = port_range\n+ self.port = None\n+ self.create_socket_and_bind()\n+ self._lock = threading.Lock()\n \n- def run(self, message):\n+ def create_socket_and_bind(self):\n+ \"\"\" Creates socket and binds to a port.\n+\n+ Upon recreating the socket, we bind to the same port.\n+ \"\"\"\n+ self.zmq_socket = self.context.socket(zmq.REQ)\n+ self.zmq_socket.setsockopt(zmq.LINGER, 0)\n+ if self.port is None:\n+ self.port = self.zmq_socket.bind_to_random_port(\"tcp://{}\".format(self.ip_address),\n+ min_port=self.port_range[0],\n+ max_port=self.port_range[1])\n+ else:\n+ self.zmq_socket.bind(\"tcp://{}:{}\".format(self.ip_address, self.port))\n+\n+ def run(self, message, max_retries=3):\n \"\"\" This function needs to be fast at the same time aware of the possibility of\n ZMQ pipes overflowing.\n \n@@ -37,8 +53,24 @@\n in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.\n This issue can be magnified if each the serialized buffer itself is larger.\n \"\"\"\n- self.zmq_socket.send_pyobj(message, copy=True)\n- reply = self.zmq_socket.recv_pyobj()\n+ reply = '__PARSL_ZMQ_PIPES_MAGIC__'\n+ with self._lock:\n+ for i in range(max_retries):\n+ try:\n+ self.zmq_socket.send_pyobj(message, copy=True)\n+ reply = self.zmq_socket.recv_pyobj()\n+ except zmq.ZMQError:\n+ logger.exception(\"Potential ZMQ REQ-REP deadlock caught\")\n+ logger.info(\"Trying to reestablish context\")\n+ self.zmq_socket.close()\n+ self.context.destroy()\n+ self.context = zmq.Context()\n+ self.create_socket_and_bind()\n+\n+ if reply == '__PARSL_ZMQ_PIPES_MAGIC__':\n+ logger.error(\"Command channel run retries exhausted. Unable to run command\")\n+ raise Exception(\"Command Channel retries exhausted\")\n+\n return reply\n \n def close(self):\n", "issue": "\"ZMQError: Operation cannot be accomplished in current state\" triggers shutdown\n## Brief description and request\r\nI've been seeing an error crop up in longer running Parsl DFK's which trigger a graceful, but unwanted shutdown. I can't find a reliable/minimal example to cause this trigger other than \"run a while.\" So, I'm going to post as much debug info I can about our setup and am seeking advice on things to test or other information which would be helpful in debugging this error.\r\n\r\nI'm happy to test whatever is needed, I can also make direct code modifications \r\n\r\n## Backtrace of error thrown\r\n The final error is in the title but here is the backtrace, it reliably fails on this line:\r\n\r\n```\r\n... (there are higher functions in this stack from our program)\r\n File \"/home/lnaden/github/qcfractal/qcfractal/queue/parsl_adapter.py\", line 94, in count_running_workers\r\n if hasattr(executor, 'connected_workers'):\r\n File \"/home/lnaden/miniconda3/envs/qca/lib/python3.7/site-packages/parsl/executors/high_throughput/executor.py\", line 463, in connected_workers\r\n workers = self.command_client.run(\"WORKERS\")\r\n File \"/home/lnaden/miniconda3/envs/qca/lib/python3.7/site-packages/parsl/executors/high_throughput/zmq_pipes.py\", line 40, in run\r\n self.zmq_socket.send_pyobj(message, copy=True)\r\n File \"/home/lnaden/miniconda3/envs/qca/lib/python3.7/site-packages/zmq/sugar/socket.py\", line 606, in send_pyobj\r\n return self.send(msg, flags=flags, **kwargs)\r\n File \"/home/lnaden/miniconda3/envs/qca/lib/python3.7/site-packages/zmq/sugar/socket.py\", line 395, in send\r\n return super(Socket, self).send(data, flags=flags, copy=copy, track=track)\r\n File \"zmq/backend/cython/socket.pyx\", line 725, in zmq.backend.cython.socket.Socket.send\r\n File \"zmq/backend/cython/socket.pyx\", line 772, in zmq.backend.cython.socket.Socket.send\r\n File \"zmq/backend/cython/socket.pyx\", line 247, in zmq.backend.cython.socket._send_copy\r\n File \"zmq/backend/cython/socket.pyx\", line 242, in zmq.backend.cython.socket._send_copy\r\n File \"zmq/backend/cython/checkrc.pxd\", line 25, in zmq.backend.cython.checkrc._check_rc\r\nzmq.error.ZMQError: Operation cannot be accomplished in current state\r\n```\r\n\r\nBelow this it behaves as though a standard shutdown signal and the next log item is `[I 190716 15:38:08 dflow:821] DFK cleanup initiated`\r\n\r\n## Additional info\r\n\r\n`if hasattr(executor, 'connected_workers'):` is inside a loop over the values: `parsl.dataflow.dflow.DataFlowKernel.executors.values()` \r\n\r\nNear as I can tell, this error is only thrown and logged at the higher level programs which are running Parsl and this does not appear in any of the logs or files from the `runinfo` folder. \r\n\r\nLeading up to the crash, there are multiple \"loss of Manager\" errors. The first set of lost managers happened ~6 hours after the processes were started, which is expected given the wall time of the cluster would have stopped them. However, after that initial set, we started loosing managers every 5-30 minutes seemingly randomly, all due to \"too many missed heartbeats.\" This behavior is the same on 2 separate clusters (similar hardware, same SLURM scheduler). Not sure if this is related, but it's an observation.\r\n\r\nConfig file:\r\n```python\r\nConfig(\r\n app_cache=True,\r\n checkpoint_files=None,\r\n checkpoint_mode=None,\r\n checkpoint_period=None,\r\n data_management_max_threads=10,\r\n executors=[HighThroughputExecutor(\r\n address='calogin1',\r\n cores_per_worker=16,\r\n heartbeat_period=30,\r\n heartbeat_threshold=120,\r\n interchange_port_range=(55000, 56000),\r\n label='QCFractal_Parsl_Slurm_Executor',\r\n launch_cmd='process_worker_pool.py {debug} {max_workers} -p {prefetch_capacity} -c {cores_per_worker} -m {mem_per_worker} --poll {poll_period} --task_url={task_url} --result_url={result_url} --logdir={logdir} --block_id={{block_id}} --hb_period={heartbeat_period} --hb_threshold={heartbeat_threshold} ',\r\n managed=True,\r\n max_workers=48,\r\n mem_per_worker=None,\r\n poll_period=10,\r\n prefetch_capacity=0,\r\n provider=SlurmProvider(\r\n 'normal_q',\r\n channel=LocalChannel(envs={}, script_dir=None, userhome='/home/lnaden/qcarchive/run_cc'),\r\n cmd_timeout=10,\r\n exclusive=True,\r\n init_blocks=0,\r\n launcher=SingleNodeLauncher(),\r\n max_blocks=24,\r\n min_blocks=0,\r\n move_files=True,\r\n nodes_per_block=1,\r\n parallelism=1,\r\n scheduler_options='#SBATCH --exclusive\\n#SBATCH -A themolssi\\n',\r\n walltime='06:00:00',\r\n worker_init='source /home/lnaden/qcarchive/boot_qcarchive_env.sh'\r\n ),\r\n storage_access=[],\r\n suppress_failure=False,\r\n worker_debug=False,\r\n worker_logdir_root=None,\r\n worker_port_range=(54000, 55000),\r\n worker_ports=None,\r\n working_dir=None\r\n )],\r\n lazy_errors=True,\r\n monitoring=None,\r\n retries=0,\r\n run_dir='runinfo',\r\n strategy='simple',\r\n usage_tracking=False\r\n)\r\n\r\n```\r\n\r\nSetup info: SLURM Cluster, executing locally on the head node in a background process. Directly calling and manipulating the `DataFlowKernel` after providing it a `Config` \n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport zmq\nimport time\nimport pickle\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass CommandClient(object):\n \"\"\" CommandClient\n \"\"\"\n def __init__(self, ip_address, port_range):\n \"\"\"\n Parameters\n ----------\n\n ip_address: str\n IP address of the client (where Parsl runs)\n port_range: tuple(int, int)\n Port range for the comms between client and interchange\n\n \"\"\"\n self.context = zmq.Context()\n self.zmq_socket = self.context.socket(zmq.REQ)\n self.port = self.zmq_socket.bind_to_random_port(\"tcp://{}\".format(ip_address),\n min_port=port_range[0],\n max_port=port_range[1])\n\n def run(self, message):\n \"\"\" This function needs to be fast at the same time aware of the possibility of\n ZMQ pipes overflowing.\n\n The timeout increases slowly if contention is detected on ZMQ pipes.\n We could set copy=False and get slightly better latency but this results\n in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.\n This issue can be magnified if each the serialized buffer itself is larger.\n \"\"\"\n self.zmq_socket.send_pyobj(message, copy=True)\n reply = self.zmq_socket.recv_pyobj()\n return reply\n\n def close(self):\n self.zmq_socket.close()\n self.context.term()\n\n\nclass TasksOutgoing(object):\n \"\"\" Outgoing task queue from the executor to the Interchange\n \"\"\"\n def __init__(self, ip_address, port_range):\n \"\"\"\n Parameters\n ----------\n\n ip_address: str\n IP address of the client (where Parsl runs)\n port_range: tuple(int, int)\n Port range for the comms between client and interchange\n\n \"\"\"\n self.context = zmq.Context()\n self.zmq_socket = self.context.socket(zmq.DEALER)\n self.zmq_socket.set_hwm(0)\n self.port = self.zmq_socket.bind_to_random_port(\"tcp://{}\".format(ip_address),\n min_port=port_range[0],\n max_port=port_range[1])\n self.poller = zmq.Poller()\n self.poller.register(self.zmq_socket, zmq.POLLOUT)\n\n def put(self, message):\n \"\"\" This function needs to be fast at the same time aware of the possibility of\n ZMQ pipes overflowing.\n\n The timeout increases slowly if contention is detected on ZMQ pipes.\n We could set copy=False and get slightly better latency but this results\n in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.\n This issue can be magnified if each the serialized buffer itself is larger.\n \"\"\"\n timeout_ms = 0\n while True:\n socks = dict(self.poller.poll(timeout=timeout_ms))\n if self.zmq_socket in socks and socks[self.zmq_socket] == zmq.POLLOUT:\n # The copy option adds latency but reduces the risk of ZMQ overflow\n self.zmq_socket.send_pyobj(message, copy=True)\n return\n else:\n timeout_ms += 1\n logger.debug(\"Not sending due to full zmq pipe, timeout: {} ms\".format(timeout_ms))\n\n def close(self):\n self.zmq_socket.close()\n self.context.term()\n\n\nclass ResultsIncoming(object):\n \"\"\" Incoming results queue from the Interchange to the executor\n \"\"\"\n\n def __init__(self, ip_address, port_range):\n \"\"\"\n Parameters\n ----------\n\n ip_address: str\n IP address of the client (where Parsl runs)\n port_range: tuple(int, int)\n Port range for the comms between client and interchange\n\n \"\"\"\n self.context = zmq.Context()\n self.results_receiver = self.context.socket(zmq.DEALER)\n self.results_receiver.set_hwm(0)\n self.port = self.results_receiver.bind_to_random_port(\"tcp://{}\".format(ip_address),\n min_port=port_range[0],\n max_port=port_range[1])\n\n def get(self, block=True, timeout=None):\n return self.results_receiver.recv_multipart()\n\n def request_close(self):\n status = self.results_receiver.send(pickle.dumps(None))\n time.sleep(0.1)\n return status\n\n def close(self):\n self.results_receiver.close()\n self.context.term()\n", "path": "parsl/executors/high_throughput/zmq_pipes.py"}]} | 3,097 | 665 |
gh_patches_debug_16790 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-5633 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PubSub: Message publish_time return type unexpected
I am working on a PubSub project and was a bit confused about the Message `publish_time`. I expected to get `datetime` per [the docstring](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/1acc8c22664229b6681ff91654932998e611e1c2/pubsub/google/cloud/pubsub_v1/subscriber/message.py#L152) for the method. Instead I got a `Timestamp` that apparently comes from protobuf types. I found there is even a [unit test](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/1acc8c22664229b6681ff91654932998e611e1c2/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py#L49) that verifies that `publish_time` returns a `Timestamp`.
Can we return `datetime`? Or should the docs just be updated to explain what is actually being returned? I'd prefer the former but wanted to ask before submitting a PR, given the inconsistency.
</issue>
<code>
[start of pubsub/google/cloud/pubsub_v1/subscriber/message.py]
1 # Copyright 2017, Google LLC All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import json
18 import math
19 import time
20
21 from google.cloud.pubsub_v1.subscriber._protocol import requests
22
23
24 _MESSAGE_REPR = """\
25 Message {{
26 data: {!r}
27 attributes: {}
28 }}"""
29
30
31 def _indent(lines, prefix=' '):
32 """Indent some text.
33
34 Note that this is present as ``textwrap.indent``, but not in Python 2.
35
36 Args:
37 lines (str): The newline delimited string to be indented.
38 prefix (Optional[str]): The prefix to indent each line with. Default
39 to two spaces.
40
41 Returns:
42 str: The newly indented content.
43 """
44 indented = []
45 for line in lines.split('\n'):
46 indented.append(prefix + line)
47 return '\n'.join(indented)
48
49
50 class Message(object):
51 """A representation of a single Pub/Sub message.
52
53 The common way to interact with
54 :class:`~.pubsub_v1.subscriber.message.Message` objects is to receive
55 them in callbacks on subscriptions; most users should never have a need
56 to instantiate them by hand. (The exception to this is if you are
57 implementing a custom subclass to
58 :class:`~.pubsub_v1.subscriber._consumer.Consumer`.)
59
60 Attributes:
61 message_id (str): The message ID. In general, you should not need
62 to use this directly.
63 data (bytes): The data in the message. Note that this will be a
64 :class:`bytes`, not a text string.
65 attributes (.ScalarMapContainer): The attributes sent along with the
66 message. See :attr:`attributes` for more information on this type.
67 publish_time (datetime): The time that this message was originally
68 published.
69 """
70
71 def __init__(self, message, ack_id, request_queue):
72 """Construct the Message.
73
74 .. note::
75
76 This class should not be constructed directly; it is the
77 responsibility of :class:`BasePolicy` subclasses to do so.
78
79 Args:
80 message (~.pubsub_v1.types.PubsubMessage): The message received
81 from Pub/Sub.
82 ack_id (str): The ack_id received from Pub/Sub.
83 request_queue (queue.Queue): A queue provided by the policy that
84 can accept requests; the policy is responsible for handling
85 those requests.
86 """
87 self._message = message
88 self._ack_id = ack_id
89 self._request_queue = request_queue
90 self.message_id = message.message_id
91
92 # The instantiation time is the time that this message
93 # was received. Tracking this provides us a way to be smart about
94 # the default lease deadline.
95 self._received_timestamp = time.time()
96
97 # The policy should lease this message, telling PubSub that it has
98 # it until it is acked or otherwise dropped.
99 self.lease()
100
101 def __repr__(self):
102 # Get an abbreviated version of the data.
103 abbv_data = self._message.data
104 if len(abbv_data) > 50:
105 abbv_data = abbv_data[:50] + b'...'
106
107 pretty_attrs = json.dumps(
108 dict(self.attributes),
109 indent=2,
110 separators=(',', ': '),
111 sort_keys=True,
112 )
113 pretty_attrs = _indent(pretty_attrs)
114 # We don't actually want the first line indented.
115 pretty_attrs = pretty_attrs.lstrip()
116 return _MESSAGE_REPR.format(abbv_data, pretty_attrs)
117
118 @property
119 def attributes(self):
120 """Return the attributes of the underlying Pub/Sub Message.
121
122 .. warning::
123
124 A ``ScalarMapContainer`` behaves slightly differently than a
125 ``dict``. For a Pub / Sub message this is a ``string->string`` map.
126 When trying to access a value via ``map['key']``, if the key is
127 not in the map, then the default value for the string type will
128 be returned, which is an empty string. It may be more intuitive
129 to just cast the map to a ``dict`` or to one use ``map.get``.
130
131 Returns:
132 .ScalarMapContainer: The message's attributes. This is a
133 ``dict``-like object provided by ``google.protobuf``.
134 """
135 return self._message.attributes
136
137 @property
138 def data(self):
139 """Return the data for the underlying Pub/Sub Message.
140
141 Returns:
142 bytes: The message data. This is always a bytestring; if you
143 want a text string, call :meth:`bytes.decode`.
144 """
145 return self._message.data
146
147 @property
148 def publish_time(self):
149 """Return the time that the message was originally published.
150
151 Returns:
152 datetime: The date and time that the message was published.
153 """
154 return self._message.publish_time
155
156 @property
157 def size(self):
158 """Return the size of the underlying message, in bytes."""
159 return self._message.ByteSize()
160
161 def ack(self):
162 """Acknowledge the given message.
163
164 Acknowledging a message in Pub/Sub means that you are done
165 with it, and it will not be delivered to this subscription again.
166 You should avoid acknowledging messages until you have
167 *finished* processing them, so that in the event of a failure,
168 you receive the message again.
169
170 .. warning::
171 Acks in Pub/Sub are best effort. You should always
172 ensure that your processing code is idempotent, as you may
173 receive any given message more than once.
174 """
175 time_to_ack = math.ceil(time.time() - self._received_timestamp)
176 self._request_queue.put(
177 requests.AckRequest(
178 ack_id=self._ack_id,
179 byte_size=self.size,
180 time_to_ack=time_to_ack
181 )
182 )
183
184 def drop(self):
185 """Release the message from lease management.
186
187 This informs the policy to no longer hold on to the lease for this
188 message. Pub/Sub will re-deliver the message if it is not acknowledged
189 before the existing lease expires.
190
191 .. warning::
192 For most use cases, the only reason to drop a message from
193 lease management is on :meth:`ack` or :meth:`nack`; these methods
194 both call this one. You probably do not want to call this method
195 directly.
196 """
197 self._request_queue.put(
198 requests.DropRequest(
199 ack_id=self._ack_id,
200 byte_size=self.size
201 )
202 )
203
204 def lease(self):
205 """Inform the policy to lease this message continually.
206
207 .. note::
208 This method is called by the constructor, and you should never
209 need to call it manually.
210 """
211 self._request_queue.put(
212 requests.LeaseRequest(
213 ack_id=self._ack_id,
214 byte_size=self.size
215 )
216 )
217
218 def modify_ack_deadline(self, seconds):
219 """Resets the deadline for acknowledgement.
220
221 New deadline will be the given value of seconds from now.
222
223 The default implementation handles this for you; you should not need
224 to manually deal with setting ack deadlines. The exception case is
225 if you are implementing your own custom subclass of
226 :class:`~.pubsub_v1.subcriber._consumer.Consumer`.
227
228 Args:
229 seconds (int): The number of seconds to set the lease deadline
230 to. This should be between 0 and 600. Due to network latency,
231 values below 10 are advised against.
232 """
233 self._request_queue.put(
234 requests.ModAckRequest(
235 ack_id=self._ack_id,
236 seconds=seconds
237 )
238 )
239
240 def nack(self):
241 """Decline to acknowldge the given message.
242
243 This will cause the message to be re-delivered to the subscription.
244 """
245 self._request_queue.put(
246 requests.NackRequest(
247 ack_id=self._ack_id,
248 byte_size=self.size
249 )
250 )
251
[end of pubsub/google/cloud/pubsub_v1/subscriber/message.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py
--- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py
+++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py
@@ -14,10 +14,12 @@
from __future__ import absolute_import
+import datetime
import json
import math
import time
+from google.api_core import datetime_helpers
from google.cloud.pubsub_v1.subscriber._protocol import requests
@@ -151,7 +153,11 @@
Returns:
datetime: The date and time that the message was published.
"""
- return self._message.publish_time
+ timestamp = self._message.publish_time
+ delta = datetime.timedelta(
+ seconds=timestamp.seconds,
+ microseconds=timestamp.nanos // 1000)
+ return datetime_helpers._UTC_EPOCH + delta
@property
def size(self):
| {"golden_diff": "diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/message.py b/pubsub/google/cloud/pubsub_v1/subscriber/message.py\n--- a/pubsub/google/cloud/pubsub_v1/subscriber/message.py\n+++ b/pubsub/google/cloud/pubsub_v1/subscriber/message.py\n@@ -14,10 +14,12 @@\n \n from __future__ import absolute_import\n \n+import datetime\n import json\n import math\n import time\n \n+from google.api_core import datetime_helpers\n from google.cloud.pubsub_v1.subscriber._protocol import requests\n \n \n@@ -151,7 +153,11 @@\n Returns:\n datetime: The date and time that the message was published.\n \"\"\"\n- return self._message.publish_time\n+ timestamp = self._message.publish_time\n+ delta = datetime.timedelta(\n+ seconds=timestamp.seconds,\n+ microseconds=timestamp.nanos // 1000)\n+ return datetime_helpers._UTC_EPOCH + delta\n \n @property\n def size(self):\n", "issue": "PubSub: Message publish_time return type unexpected\nI am working on a PubSub project and was a bit confused about the Message `publish_time`. I expected to get `datetime` per [the docstring](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/1acc8c22664229b6681ff91654932998e611e1c2/pubsub/google/cloud/pubsub_v1/subscriber/message.py#L152) for the method. Instead I got a `Timestamp` that apparently comes from protobuf types. I found there is even a [unit test](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/1acc8c22664229b6681ff91654932998e611e1c2/pubsub/tests/unit/pubsub_v1/subscriber/test_message.py#L49) that verifies that `publish_time` returns a `Timestamp`.\r\n\r\nCan we return `datetime`? Or should the docs just be updated to explain what is actually being returned? I'd prefer the former but wanted to ask before submitting a PR, given the inconsistency.\n", "before_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport json\nimport math\nimport time\n\nfrom google.cloud.pubsub_v1.subscriber._protocol import requests\n\n\n_MESSAGE_REPR = \"\"\"\\\nMessage {{\n data: {!r}\n attributes: {}\n}}\"\"\"\n\n\ndef _indent(lines, prefix=' '):\n \"\"\"Indent some text.\n\n Note that this is present as ``textwrap.indent``, but not in Python 2.\n\n Args:\n lines (str): The newline delimited string to be indented.\n prefix (Optional[str]): The prefix to indent each line with. Default\n to two spaces.\n\n Returns:\n str: The newly indented content.\n \"\"\"\n indented = []\n for line in lines.split('\\n'):\n indented.append(prefix + line)\n return '\\n'.join(indented)\n\n\nclass Message(object):\n \"\"\"A representation of a single Pub/Sub message.\n\n The common way to interact with\n :class:`~.pubsub_v1.subscriber.message.Message` objects is to receive\n them in callbacks on subscriptions; most users should never have a need\n to instantiate them by hand. (The exception to this is if you are\n implementing a custom subclass to\n :class:`~.pubsub_v1.subscriber._consumer.Consumer`.)\n\n Attributes:\n message_id (str): The message ID. In general, you should not need\n to use this directly.\n data (bytes): The data in the message. Note that this will be a\n :class:`bytes`, not a text string.\n attributes (.ScalarMapContainer): The attributes sent along with the\n message. See :attr:`attributes` for more information on this type.\n publish_time (datetime): The time that this message was originally\n published.\n \"\"\"\n\n def __init__(self, message, ack_id, request_queue):\n \"\"\"Construct the Message.\n\n .. note::\n\n This class should not be constructed directly; it is the\n responsibility of :class:`BasePolicy` subclasses to do so.\n\n Args:\n message (~.pubsub_v1.types.PubsubMessage): The message received\n from Pub/Sub.\n ack_id (str): The ack_id received from Pub/Sub.\n request_queue (queue.Queue): A queue provided by the policy that\n can accept requests; the policy is responsible for handling\n those requests.\n \"\"\"\n self._message = message\n self._ack_id = ack_id\n self._request_queue = request_queue\n self.message_id = message.message_id\n\n # The instantiation time is the time that this message\n # was received. Tracking this provides us a way to be smart about\n # the default lease deadline.\n self._received_timestamp = time.time()\n\n # The policy should lease this message, telling PubSub that it has\n # it until it is acked or otherwise dropped.\n self.lease()\n\n def __repr__(self):\n # Get an abbreviated version of the data.\n abbv_data = self._message.data\n if len(abbv_data) > 50:\n abbv_data = abbv_data[:50] + b'...'\n\n pretty_attrs = json.dumps(\n dict(self.attributes),\n indent=2,\n separators=(',', ': '),\n sort_keys=True,\n )\n pretty_attrs = _indent(pretty_attrs)\n # We don't actually want the first line indented.\n pretty_attrs = pretty_attrs.lstrip()\n return _MESSAGE_REPR.format(abbv_data, pretty_attrs)\n\n @property\n def attributes(self):\n \"\"\"Return the attributes of the underlying Pub/Sub Message.\n\n .. warning::\n\n A ``ScalarMapContainer`` behaves slightly differently than a\n ``dict``. For a Pub / Sub message this is a ``string->string`` map.\n When trying to access a value via ``map['key']``, if the key is\n not in the map, then the default value for the string type will\n be returned, which is an empty string. It may be more intuitive\n to just cast the map to a ``dict`` or to one use ``map.get``.\n\n Returns:\n .ScalarMapContainer: The message's attributes. This is a\n ``dict``-like object provided by ``google.protobuf``.\n \"\"\"\n return self._message.attributes\n\n @property\n def data(self):\n \"\"\"Return the data for the underlying Pub/Sub Message.\n\n Returns:\n bytes: The message data. This is always a bytestring; if you\n want a text string, call :meth:`bytes.decode`.\n \"\"\"\n return self._message.data\n\n @property\n def publish_time(self):\n \"\"\"Return the time that the message was originally published.\n\n Returns:\n datetime: The date and time that the message was published.\n \"\"\"\n return self._message.publish_time\n\n @property\n def size(self):\n \"\"\"Return the size of the underlying message, in bytes.\"\"\"\n return self._message.ByteSize()\n\n def ack(self):\n \"\"\"Acknowledge the given message.\n\n Acknowledging a message in Pub/Sub means that you are done\n with it, and it will not be delivered to this subscription again.\n You should avoid acknowledging messages until you have\n *finished* processing them, so that in the event of a failure,\n you receive the message again.\n\n .. warning::\n Acks in Pub/Sub are best effort. You should always\n ensure that your processing code is idempotent, as you may\n receive any given message more than once.\n \"\"\"\n time_to_ack = math.ceil(time.time() - self._received_timestamp)\n self._request_queue.put(\n requests.AckRequest(\n ack_id=self._ack_id,\n byte_size=self.size,\n time_to_ack=time_to_ack\n )\n )\n\n def drop(self):\n \"\"\"Release the message from lease management.\n\n This informs the policy to no longer hold on to the lease for this\n message. Pub/Sub will re-deliver the message if it is not acknowledged\n before the existing lease expires.\n\n .. warning::\n For most use cases, the only reason to drop a message from\n lease management is on :meth:`ack` or :meth:`nack`; these methods\n both call this one. You probably do not want to call this method\n directly.\n \"\"\"\n self._request_queue.put(\n requests.DropRequest(\n ack_id=self._ack_id,\n byte_size=self.size\n )\n )\n\n def lease(self):\n \"\"\"Inform the policy to lease this message continually.\n\n .. note::\n This method is called by the constructor, and you should never\n need to call it manually.\n \"\"\"\n self._request_queue.put(\n requests.LeaseRequest(\n ack_id=self._ack_id,\n byte_size=self.size\n )\n )\n\n def modify_ack_deadline(self, seconds):\n \"\"\"Resets the deadline for acknowledgement.\n\n New deadline will be the given value of seconds from now.\n\n The default implementation handles this for you; you should not need\n to manually deal with setting ack deadlines. The exception case is\n if you are implementing your own custom subclass of\n :class:`~.pubsub_v1.subcriber._consumer.Consumer`.\n\n Args:\n seconds (int): The number of seconds to set the lease deadline\n to. This should be between 0 and 600. Due to network latency,\n values below 10 are advised against.\n \"\"\"\n self._request_queue.put(\n requests.ModAckRequest(\n ack_id=self._ack_id,\n seconds=seconds\n )\n )\n\n def nack(self):\n \"\"\"Decline to acknowldge the given message.\n\n This will cause the message to be re-delivered to the subscription.\n \"\"\"\n self._request_queue.put(\n requests.NackRequest(\n ack_id=self._ack_id,\n byte_size=self.size\n )\n )\n", "path": "pubsub/google/cloud/pubsub_v1/subscriber/message.py"}]} | 3,313 | 221 |
gh_patches_debug_22308 | rasdani/github-patches | git_diff | StackStorm__st2-3656 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't publish messages to exporter exchange if exporter service is not enabled and running
While working on #3648 and I noticed `st2.exporter.work` queue grows indefinitely.
The reason for that is that exporter service is optional and doesn't run by default.
We should modify the code to only publish messages to that exchange if exporter service is enabled and running (otherwise this queue will keep growing indefinitely and this could eventually cause issues).
</issue>
<code>
[start of st2common/st2common/transport/bootstrap_utils.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import socket
17
18 import retrying
19 from oslo_config import cfg
20 from kombu import Connection
21
22 from st2common import log as logging
23 from st2common.transport import utils as transport_utils
24 from st2common.transport.actionexecutionstate import ACTIONEXECUTIONSTATE_XCHG
25 from st2common.transport.announcement import ANNOUNCEMENT_XCHG
26 from st2common.transport.connection_retry_wrapper import ConnectionRetryWrapper
27 from st2common.transport.execution import EXECUTION_XCHG
28 from st2common.transport.liveaction import LIVEACTION_XCHG, LIVEACTION_STATUS_MGMT_XCHG
29 from st2common.transport.reactor import SENSOR_CUD_XCHG
30 from st2common.transport.reactor import TRIGGER_CUD_XCHG, TRIGGER_INSTANCE_XCHG
31 from st2common.transport import reactor
32 from st2common.transport.queues import ACTIONSCHEDULER_REQUEST_QUEUE
33 from st2common.transport.queues import ACTIONRUNNER_WORK_QUEUE
34 from st2common.transport.queues import ACTIONRUNNER_CANCEL_QUEUE
35 from st2common.transport.queues import EXPORTER_WORK_QUEUE
36 from st2common.transport.queues import NOTIFIER_ACTIONUPDATE_WORK_QUEUE
37 from st2common.transport.queues import RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE
38 from st2common.transport.queues import RULESENGINE_WORK_QUEUE
39 from st2common.transport.queues import STREAM_ANNOUNCEMENT_WORK_QUEUE
40 from st2common.transport.queues import STREAM_EXECUTION_WORK_QUEUE
41 from st2common.transport.queues import STREAM_LIVEACTION_WORK_QUEUE
42
43 LOG = logging.getLogger('st2common.transport.bootstrap')
44
45 __all__ = [
46 'register_exchanges',
47
48 'EXCHANGES',
49 'QUEUES'
50 ]
51
52 # List of exchanges which are pre-declared on service set up.
53 EXCHANGES = [
54 ACTIONEXECUTIONSTATE_XCHG,
55 ANNOUNCEMENT_XCHG,
56 EXECUTION_XCHG,
57 LIVEACTION_XCHG,
58 LIVEACTION_STATUS_MGMT_XCHG,
59 TRIGGER_CUD_XCHG,
60 TRIGGER_INSTANCE_XCHG,
61 SENSOR_CUD_XCHG
62 ]
63
64 # List of queues which are pre-declared on service startup.
65 # All the queues need to be declared and bound up front so we can guarantee messages get routed
66 # and don't get lost even if there are no consumers online
67 QUEUES = [
68 ACTIONSCHEDULER_REQUEST_QUEUE,
69 ACTIONRUNNER_WORK_QUEUE,
70 ACTIONRUNNER_CANCEL_QUEUE,
71 EXPORTER_WORK_QUEUE,
72 NOTIFIER_ACTIONUPDATE_WORK_QUEUE,
73 RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE,
74 RULESENGINE_WORK_QUEUE,
75
76 STREAM_ANNOUNCEMENT_WORK_QUEUE,
77 STREAM_EXECUTION_WORK_QUEUE,
78 STREAM_LIVEACTION_WORK_QUEUE,
79
80 # Those queues are dynamically / late created on some class init but we still need to
81 # pre-declare them for redis Kombu backend to work.
82 reactor.get_trigger_cud_queue(name='st2.preinit', routing_key='init'),
83 reactor.get_sensor_cud_queue(name='st2.preinit', routing_key='init')
84 ]
85
86
87 def _do_register_exchange(exchange, connection, channel, retry_wrapper):
88 try:
89 kwargs = {
90 'exchange': exchange.name,
91 'type': exchange.type,
92 'durable': exchange.durable,
93 'auto_delete': exchange.auto_delete,
94 'arguments': exchange.arguments,
95 'nowait': False,
96 'passive': False
97 }
98 # Use the retry wrapper to increase resiliency in recoverable errors.
99 retry_wrapper.ensured(connection=connection,
100 obj=channel,
101 to_ensure_func=channel.exchange_declare,
102 **kwargs)
103 LOG.debug('Registered exchange %s (%s).' % (exchange.name, str(kwargs)))
104 except Exception:
105 LOG.exception('Failed to register exchange: %s.', exchange.name)
106
107
108 def _do_predeclare_queue(channel, queue):
109 LOG.debug('Predeclaring queue for exchange "%s"' % (queue.exchange.name))
110
111 bound_queue = None
112
113 try:
114 bound_queue = queue(channel)
115 bound_queue.declare(nowait=False)
116 LOG.debug('Predeclared queue for exchange "%s"' % (queue.exchange.name))
117 except Exception:
118 LOG.exception('Failed to predeclare queue for exchange "%s"' % (queue.exchange.name))
119
120 return bound_queue
121
122
123 def register_exchanges():
124 LOG.debug('Registering exchanges...')
125 connection_urls = transport_utils.get_messaging_urls()
126 with Connection(connection_urls) as conn:
127 # Use ConnectionRetryWrapper to deal with rmq clustering etc.
128 retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls), logger=LOG)
129
130 def wrapped_register_exchanges(connection, channel):
131 for exchange in EXCHANGES:
132 _do_register_exchange(exchange=exchange, connection=connection, channel=channel,
133 retry_wrapper=retry_wrapper)
134
135 retry_wrapper.run(connection=conn, wrapped_callback=wrapped_register_exchanges)
136
137 def wrapped_predeclare_queues(connection, channel):
138 for queue in QUEUES:
139 _do_predeclare_queue(channel=channel, queue=queue)
140
141 retry_wrapper.run(connection=conn, wrapped_callback=wrapped_predeclare_queues)
142
143
144 def register_exchanges_with_retry():
145 def retry_if_io_error(exception):
146 return isinstance(exception, socket.error)
147
148 retrying_obj = retrying.Retrying(
149 retry_on_exception=retry_if_io_error,
150 wait_fixed=cfg.CONF.messaging.connection_retry_wait,
151 stop_max_attempt_number=cfg.CONF.messaging.connection_retries
152 )
153 return retrying_obj.call(register_exchanges)
154
[end of st2common/st2common/transport/bootstrap_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/st2common/st2common/transport/bootstrap_utils.py b/st2common/st2common/transport/bootstrap_utils.py
--- a/st2common/st2common/transport/bootstrap_utils.py
+++ b/st2common/st2common/transport/bootstrap_utils.py
@@ -32,7 +32,6 @@
from st2common.transport.queues import ACTIONSCHEDULER_REQUEST_QUEUE
from st2common.transport.queues import ACTIONRUNNER_WORK_QUEUE
from st2common.transport.queues import ACTIONRUNNER_CANCEL_QUEUE
-from st2common.transport.queues import EXPORTER_WORK_QUEUE
from st2common.transport.queues import NOTIFIER_ACTIONUPDATE_WORK_QUEUE
from st2common.transport.queues import RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE
from st2common.transport.queues import RULESENGINE_WORK_QUEUE
@@ -68,7 +67,6 @@
ACTIONSCHEDULER_REQUEST_QUEUE,
ACTIONRUNNER_WORK_QUEUE,
ACTIONRUNNER_CANCEL_QUEUE,
- EXPORTER_WORK_QUEUE,
NOTIFIER_ACTIONUPDATE_WORK_QUEUE,
RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE,
RULESENGINE_WORK_QUEUE,
| {"golden_diff": "diff --git a/st2common/st2common/transport/bootstrap_utils.py b/st2common/st2common/transport/bootstrap_utils.py\n--- a/st2common/st2common/transport/bootstrap_utils.py\n+++ b/st2common/st2common/transport/bootstrap_utils.py\n@@ -32,7 +32,6 @@\n from st2common.transport.queues import ACTIONSCHEDULER_REQUEST_QUEUE\n from st2common.transport.queues import ACTIONRUNNER_WORK_QUEUE\n from st2common.transport.queues import ACTIONRUNNER_CANCEL_QUEUE\n-from st2common.transport.queues import EXPORTER_WORK_QUEUE\n from st2common.transport.queues import NOTIFIER_ACTIONUPDATE_WORK_QUEUE\n from st2common.transport.queues import RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE\n from st2common.transport.queues import RULESENGINE_WORK_QUEUE\n@@ -68,7 +67,6 @@\n ACTIONSCHEDULER_REQUEST_QUEUE,\n ACTIONRUNNER_WORK_QUEUE,\n ACTIONRUNNER_CANCEL_QUEUE,\n- EXPORTER_WORK_QUEUE,\n NOTIFIER_ACTIONUPDATE_WORK_QUEUE,\n RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE,\n RULESENGINE_WORK_QUEUE,\n", "issue": "Don't publish messages to exporter exchange if exporter service is not enabled and running\nWhile working on #3648 and I noticed `st2.exporter.work` queue grows indefinitely.\r\n\r\nThe reason for that is that exporter service is optional and doesn't run by default.\r\n\r\nWe should modify the code to only publish messages to that exchange if exporter service is enabled and running (otherwise this queue will keep growing indefinitely and this could eventually cause issues).\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport socket\n\nimport retrying\nfrom oslo_config import cfg\nfrom kombu import Connection\n\nfrom st2common import log as logging\nfrom st2common.transport import utils as transport_utils\nfrom st2common.transport.actionexecutionstate import ACTIONEXECUTIONSTATE_XCHG\nfrom st2common.transport.announcement import ANNOUNCEMENT_XCHG\nfrom st2common.transport.connection_retry_wrapper import ConnectionRetryWrapper\nfrom st2common.transport.execution import EXECUTION_XCHG\nfrom st2common.transport.liveaction import LIVEACTION_XCHG, LIVEACTION_STATUS_MGMT_XCHG\nfrom st2common.transport.reactor import SENSOR_CUD_XCHG\nfrom st2common.transport.reactor import TRIGGER_CUD_XCHG, TRIGGER_INSTANCE_XCHG\nfrom st2common.transport import reactor\nfrom st2common.transport.queues import ACTIONSCHEDULER_REQUEST_QUEUE\nfrom st2common.transport.queues import ACTIONRUNNER_WORK_QUEUE\nfrom st2common.transport.queues import ACTIONRUNNER_CANCEL_QUEUE\nfrom st2common.transport.queues import EXPORTER_WORK_QUEUE\nfrom st2common.transport.queues import NOTIFIER_ACTIONUPDATE_WORK_QUEUE\nfrom st2common.transport.queues import RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE\nfrom st2common.transport.queues import RULESENGINE_WORK_QUEUE\nfrom st2common.transport.queues import STREAM_ANNOUNCEMENT_WORK_QUEUE\nfrom st2common.transport.queues import STREAM_EXECUTION_WORK_QUEUE\nfrom st2common.transport.queues import STREAM_LIVEACTION_WORK_QUEUE\n\nLOG = logging.getLogger('st2common.transport.bootstrap')\n\n__all__ = [\n 'register_exchanges',\n\n 'EXCHANGES',\n 'QUEUES'\n]\n\n# List of exchanges which are pre-declared on service set up.\nEXCHANGES = [\n ACTIONEXECUTIONSTATE_XCHG,\n ANNOUNCEMENT_XCHG,\n EXECUTION_XCHG,\n LIVEACTION_XCHG,\n LIVEACTION_STATUS_MGMT_XCHG,\n TRIGGER_CUD_XCHG,\n TRIGGER_INSTANCE_XCHG,\n SENSOR_CUD_XCHG\n]\n\n# List of queues which are pre-declared on service startup.\n# All the queues need to be declared and bound up front so we can guarantee messages get routed\n# and don't get lost even if there are no consumers online\nQUEUES = [\n ACTIONSCHEDULER_REQUEST_QUEUE,\n ACTIONRUNNER_WORK_QUEUE,\n ACTIONRUNNER_CANCEL_QUEUE,\n EXPORTER_WORK_QUEUE,\n NOTIFIER_ACTIONUPDATE_WORK_QUEUE,\n RESULTSTRACKER_ACTIONSTATE_WORK_QUEUE,\n RULESENGINE_WORK_QUEUE,\n\n STREAM_ANNOUNCEMENT_WORK_QUEUE,\n STREAM_EXECUTION_WORK_QUEUE,\n STREAM_LIVEACTION_WORK_QUEUE,\n\n # Those queues are dynamically / late created on some class init but we still need to\n # pre-declare them for redis Kombu backend to work.\n reactor.get_trigger_cud_queue(name='st2.preinit', routing_key='init'),\n reactor.get_sensor_cud_queue(name='st2.preinit', routing_key='init')\n]\n\n\ndef _do_register_exchange(exchange, connection, channel, retry_wrapper):\n try:\n kwargs = {\n 'exchange': exchange.name,\n 'type': exchange.type,\n 'durable': exchange.durable,\n 'auto_delete': exchange.auto_delete,\n 'arguments': exchange.arguments,\n 'nowait': False,\n 'passive': False\n }\n # Use the retry wrapper to increase resiliency in recoverable errors.\n retry_wrapper.ensured(connection=connection,\n obj=channel,\n to_ensure_func=channel.exchange_declare,\n **kwargs)\n LOG.debug('Registered exchange %s (%s).' % (exchange.name, str(kwargs)))\n except Exception:\n LOG.exception('Failed to register exchange: %s.', exchange.name)\n\n\ndef _do_predeclare_queue(channel, queue):\n LOG.debug('Predeclaring queue for exchange \"%s\"' % (queue.exchange.name))\n\n bound_queue = None\n\n try:\n bound_queue = queue(channel)\n bound_queue.declare(nowait=False)\n LOG.debug('Predeclared queue for exchange \"%s\"' % (queue.exchange.name))\n except Exception:\n LOG.exception('Failed to predeclare queue for exchange \"%s\"' % (queue.exchange.name))\n\n return bound_queue\n\n\ndef register_exchanges():\n LOG.debug('Registering exchanges...')\n connection_urls = transport_utils.get_messaging_urls()\n with Connection(connection_urls) as conn:\n # Use ConnectionRetryWrapper to deal with rmq clustering etc.\n retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls), logger=LOG)\n\n def wrapped_register_exchanges(connection, channel):\n for exchange in EXCHANGES:\n _do_register_exchange(exchange=exchange, connection=connection, channel=channel,\n retry_wrapper=retry_wrapper)\n\n retry_wrapper.run(connection=conn, wrapped_callback=wrapped_register_exchanges)\n\n def wrapped_predeclare_queues(connection, channel):\n for queue in QUEUES:\n _do_predeclare_queue(channel=channel, queue=queue)\n\n retry_wrapper.run(connection=conn, wrapped_callback=wrapped_predeclare_queues)\n\n\ndef register_exchanges_with_retry():\n def retry_if_io_error(exception):\n return isinstance(exception, socket.error)\n\n retrying_obj = retrying.Retrying(\n retry_on_exception=retry_if_io_error,\n wait_fixed=cfg.CONF.messaging.connection_retry_wait,\n stop_max_attempt_number=cfg.CONF.messaging.connection_retries\n )\n return retrying_obj.call(register_exchanges)\n", "path": "st2common/st2common/transport/bootstrap_utils.py"}]} | 2,332 | 236 |
gh_patches_debug_64029 | rasdani/github-patches | git_diff | holoviz__panel-1044 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support src urls and alt texts on Images
#### My Pain
I would like to use images in Panel via `pn.pane.Markdown` and/ or `pn.pane.PNG`.
Currently the Bokeh layout engine does not layout markdown with images well. See https://github.com/holoviz/panel/issues/835.
So I need to stick to `pn.pane.PNG` for images.
But the `ImageBase` class does not support parameters like the `src` url and `alt` text from the the HTML `img` tag. So I cannot provide image links or alt texts.
#### Solution
Add `src` and `alt` parameters to the `BaseImage` class.
</issue>
<code>
[start of panel/pane/image.py]
1 """
2 Contains Image panes including renderers for PNG, SVG, GIF and JPG
3 file types.
4 """
5 from __future__ import absolute_import, division, unicode_literals
6
7 import base64
8
9 from io import BytesIO
10 from six import string_types
11
12 import param
13
14 from .markup import escape, DivPaneBase
15 from ..util import isfile, isurl
16
17
18 class ImageBase(DivPaneBase):
19 """
20 Encodes an image as base64 and wraps it in a Bokeh Div model.
21 This is an abstract base class that needs the image type
22 to be specified and specific code for determining the image shape.
23
24 The imgtype determines the filetype, extension, and MIME type for
25 this image. Each image type (png,jpg,gif) has a base class that
26 supports anything with a `_repr_X_` method (where X is `png`,
27 `gif`, etc.), a local file with the given file extension, or a
28 HTTP(S) url with the given extension. Subclasses of each type can
29 provide their own way of obtaining or generating a PNG.
30 """
31
32 alt_text = param.String(default=None, doc="""
33 alt text to add to the image tag. The alt text is shown when a
34 user cannot load or display the image.""")
35
36 link_url = param.String(default=None, doc="""
37 A link URL to make the image clickable and link to some other
38 website.""")
39
40 embed = param.Boolean(default=True, doc="""
41 Whether to embed the image as base64.""")
42
43 imgtype = 'None'
44
45 _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']
46
47 _target_transforms = {'object': """'<img src="' + value + '"></img>'"""}
48
49 __abstract = True
50
51 @classmethod
52 def applies(cls, obj):
53 imgtype = cls.imgtype
54 if hasattr(obj, '_repr_{}_'.format(imgtype)):
55 return True
56 if isinstance(obj, string_types):
57 if isfile(obj) and obj.endswith('.'+imgtype):
58 return True
59 if isurl(obj, [cls.imgtype]):
60 return True
61 if hasattr(obj, 'read'): # Check for file like object
62 return True
63 return False
64
65 def _type_error(self, object):
66 if isinstance(object, string_types):
67 raise ValueError("%s pane cannot parse string that is not a filename "
68 "or URL." % type(self).__name__)
69 super(ImageBase, self)._type_error(object)
70
71 def _img(self):
72 if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):
73 return getattr(self.object, '_repr_' + self.imgtype + '_')()
74 if isinstance(self.object, string_types):
75 if isfile(self.object):
76 with open(self.object, 'rb') as f:
77 return f.read()
78 if hasattr(self.object, 'read'):
79 return self.object.read()
80 if isurl(self.object, [self.imgtype]):
81 import requests
82 r = requests.request(url=self.object, method='GET')
83 return r.content
84
85 def _imgshape(self, data):
86 """Calculate and return image width,height"""
87 raise NotImplementedError
88
89 def _get_properties(self):
90 p = super(ImageBase, self)._get_properties()
91 if self.object is None:
92 return dict(p, text='<img></img>')
93 data = self._img()
94 if not isinstance(data, bytes):
95 data = base64.b64decode(data)
96 width, height = self._imgshape(data)
97 if self.width is not None:
98 if self.height is None:
99 height = int((self.width/width)*height)
100 else:
101 height = self.height
102 width = self.width
103 elif self.height is not None:
104 width = int((self.height/height)*width)
105 height = self.height
106 if not self.embed:
107 src = self.object
108 else:
109 b64 = base64.b64encode(data).decode("utf-8")
110 src = "data:image/"+self.imgtype+";base64,{b64}".format(b64=b64)
111
112 smode = self.sizing_mode
113 if smode in ['fixed', None]:
114 w, h = '%spx' % width, '%spx' % height
115 elif smode == 'stretch_both':
116 w, h = '100%', '100%'
117 elif smode == 'stretch_height':
118 w, h = '%spx' % width, '100%'
119 elif smode == 'stretch_height':
120 w, h = '100%', '%spx' % height
121 elif smode == 'scale_height':
122 w, h = 'auto', '100%'
123 else:
124 w, h = '100%', 'auto'
125
126 html = '<img src="{src}" width="{width}" height="{height}" alt="{alt}"></img>'.format(
127 src=src, width=w, height=h, alt=self.alt_text or '')
128
129 if self.link_url:
130 html = '<a href="{url}" target="_blank">{html}</a>'.format(
131 url=self.link_url, html=html)
132
133 return dict(p, width=width, height=height, text=escape(html))
134
135
136 class PNG(ImageBase):
137
138 imgtype = 'png'
139
140 @classmethod
141 def _imgshape(cls, data):
142 import struct
143 w, h = struct.unpack('>LL', data[16:24])
144 return int(w), int(h)
145
146
147 class GIF(ImageBase):
148
149 imgtype = 'gif'
150
151 @classmethod
152 def _imgshape(cls, data):
153 import struct
154 w, h = struct.unpack("<HH", data[6:10])
155 return int(w), int(h)
156
157
158 class JPG(ImageBase):
159
160 imgtype = 'jpg'
161
162 @classmethod
163 def _imgshape(cls, data):
164 import struct
165 b = BytesIO(data)
166 b.read(2)
167 c = b.read(1)
168 while (c and ord(c) != 0xDA):
169 while (ord(c) != 0xFF): c = b.read(1)
170 while (ord(c) == 0xFF): c = b.read(1)
171 if (ord(c) >= 0xC0 and ord(c) <= 0xC3):
172 b.read(3)
173 h, w = struct.unpack(">HH", b.read(4))
174 break
175 else:
176 b.read(int(struct.unpack(">H", b.read(2))[0])-2)
177 c = b.read(1)
178 return int(w), int(h)
179
180
181 class SVG(ImageBase):
182
183 encode = param.Boolean(default=False, doc="""
184 Whether to enable base64 encoding of the SVG, base64 encoded
185 SVGs do not support links.""")
186
187 imgtype = 'svg'
188
189 _rerender_params = ImageBase._rerender_params + ['encode']
190
191 @classmethod
192 def applies(cls, obj):
193 return (super(SVG, cls).applies(obj) or
194 (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))
195
196 def _type_error(self, object):
197 if isinstance(object, string_types):
198 raise ValueError("%s pane cannot parse string that is not a filename, "
199 "URL or a SVG XML contents." % type(self).__name__)
200 super(SVG, self)._type_error(object)
201
202 def _img(self):
203 if (isinstance(self.object, string_types) and
204 self.object.lstrip().startswith('<svg')):
205 return self.object
206 return super(SVG, self)._img()
207
208 def _imgshape(self, data):
209 return (self.width, self.height)
210
211 def _get_properties(self):
212 p = super(ImageBase, self)._get_properties()
213 if self.object is None:
214 return dict(p, text='<img></img>')
215 data = self._img()
216 width, height = self._imgshape(data)
217 if not isinstance(data, bytes):
218 data = data.encode('utf-8')
219
220 if self.encode:
221 b64 = base64.b64encode(data).decode("utf-8")
222 src = "data:image/svg+xml;base64,{b64}".format(b64=b64)
223 html = "<img src='{src}' width={width} height={height}></img>".format(
224 src=src, width=width, height=height
225 )
226 else:
227 html = data.decode("utf-8")
228 return dict(p, width=width, height=height, text=escape(html))
229
[end of panel/pane/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/panel/pane/image.py b/panel/pane/image.py
--- a/panel/pane/image.py
+++ b/panel/pane/image.py
@@ -114,7 +114,7 @@
w, h = '%spx' % width, '%spx' % height
elif smode == 'stretch_both':
w, h = '100%', '100%'
- elif smode == 'stretch_height':
+ elif smode == 'stretch_width':
w, h = '%spx' % width, '100%'
elif smode == 'stretch_height':
w, h = '100%', '%spx' % height
| {"golden_diff": "diff --git a/panel/pane/image.py b/panel/pane/image.py\n--- a/panel/pane/image.py\n+++ b/panel/pane/image.py\n@@ -114,7 +114,7 @@\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n- elif smode == 'stretch_height':\n+ elif smode == 'stretch_width':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n", "issue": "Support src urls and alt texts on Images\n#### My Pain\r\n\r\nI would like to use images in Panel via `pn.pane.Markdown` and/ or `pn.pane.PNG`.\r\n\r\nCurrently the Bokeh layout engine does not layout markdown with images well. See https://github.com/holoviz/panel/issues/835.\r\n\r\nSo I need to stick to `pn.pane.PNG` for images.\r\n\r\nBut the `ImageBase` class does not support parameters like the `src` url and `alt` text from the the HTML `img` tag. So I cannot provide image links or alt texts.\r\n\r\n#### Solution\r\n\r\nAdd `src` and `alt` parameters to the `BaseImage` class.\n", "before_files": [{"content": "\"\"\"\nContains Image panes including renderers for PNG, SVG, GIF and JPG\nfile types.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport base64\n\nfrom io import BytesIO\nfrom six import string_types\n\nimport param\n\nfrom .markup import escape, DivPaneBase\nfrom ..util import isfile, isurl\n\n\nclass ImageBase(DivPaneBase):\n \"\"\"\n Encodes an image as base64 and wraps it in a Bokeh Div model.\n This is an abstract base class that needs the image type\n to be specified and specific code for determining the image shape.\n\n The imgtype determines the filetype, extension, and MIME type for\n this image. Each image type (png,jpg,gif) has a base class that\n supports anything with a `_repr_X_` method (where X is `png`,\n `gif`, etc.), a local file with the given file extension, or a\n HTTP(S) url with the given extension. Subclasses of each type can\n provide their own way of obtaining or generating a PNG.\n \"\"\"\n\n alt_text = param.String(default=None, doc=\"\"\"\n alt text to add to the image tag. The alt text is shown when a\n user cannot load or display the image.\"\"\")\n\n link_url = param.String(default=None, doc=\"\"\"\n A link URL to make the image clickable and link to some other\n website.\"\"\")\n\n embed = param.Boolean(default=True, doc=\"\"\"\n Whether to embed the image as base64.\"\"\")\n\n imgtype = 'None'\n\n _rerender_params = ['alt_text', 'link_url', 'embed', 'object', 'style']\n\n _target_transforms = {'object': \"\"\"'<img src=\"' + value + '\"></img>'\"\"\"}\n\n __abstract = True\n\n @classmethod\n def applies(cls, obj):\n imgtype = cls.imgtype\n if hasattr(obj, '_repr_{}_'.format(imgtype)):\n return True\n if isinstance(obj, string_types):\n if isfile(obj) and obj.endswith('.'+imgtype):\n return True\n if isurl(obj, [cls.imgtype]):\n return True\n if hasattr(obj, 'read'): # Check for file like object\n return True\n return False\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename \"\n \"or URL.\" % type(self).__name__)\n super(ImageBase, self)._type_error(object)\n\n def _img(self):\n if hasattr(self.object, '_repr_{}_'.format(self.imgtype)):\n return getattr(self.object, '_repr_' + self.imgtype + '_')()\n if isinstance(self.object, string_types):\n if isfile(self.object):\n with open(self.object, 'rb') as f:\n return f.read()\n if hasattr(self.object, 'read'):\n return self.object.read()\n if isurl(self.object, [self.imgtype]):\n import requests\n r = requests.request(url=self.object, method='GET')\n return r.content\n\n def _imgshape(self, data):\n \"\"\"Calculate and return image width,height\"\"\"\n raise NotImplementedError\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n if not isinstance(data, bytes):\n data = base64.b64decode(data)\n width, height = self._imgshape(data)\n if self.width is not None:\n if self.height is None:\n height = int((self.width/width)*height)\n else:\n height = self.height\n width = self.width\n elif self.height is not None:\n width = int((self.height/height)*width)\n height = self.height\n if not self.embed:\n src = self.object\n else:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/\"+self.imgtype+\";base64,{b64}\".format(b64=b64)\n\n smode = self.sizing_mode\n if smode in ['fixed', None]:\n w, h = '%spx' % width, '%spx' % height\n elif smode == 'stretch_both':\n w, h = '100%', '100%'\n elif smode == 'stretch_height':\n w, h = '%spx' % width, '100%'\n elif smode == 'stretch_height':\n w, h = '100%', '%spx' % height\n elif smode == 'scale_height':\n w, h = 'auto', '100%'\n else:\n w, h = '100%', 'auto'\n\n html = '<img src=\"{src}\" width=\"{width}\" height=\"{height}\" alt=\"{alt}\"></img>'.format(\n src=src, width=w, height=h, alt=self.alt_text or '')\n\n if self.link_url:\n html = '<a href=\"{url}\" target=\"_blank\">{html}</a>'.format(\n url=self.link_url, html=html)\n\n return dict(p, width=width, height=height, text=escape(html))\n\n\nclass PNG(ImageBase):\n\n imgtype = 'png'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack('>LL', data[16:24])\n return int(w), int(h)\n\n\nclass GIF(ImageBase):\n\n imgtype = 'gif'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n w, h = struct.unpack(\"<HH\", data[6:10])\n return int(w), int(h)\n\n\nclass JPG(ImageBase):\n\n imgtype = 'jpg'\n\n @classmethod\n def _imgshape(cls, data):\n import struct\n b = BytesIO(data)\n b.read(2)\n c = b.read(1)\n while (c and ord(c) != 0xDA):\n while (ord(c) != 0xFF): c = b.read(1)\n while (ord(c) == 0xFF): c = b.read(1)\n if (ord(c) >= 0xC0 and ord(c) <= 0xC3):\n b.read(3)\n h, w = struct.unpack(\">HH\", b.read(4))\n break\n else:\n b.read(int(struct.unpack(\">H\", b.read(2))[0])-2)\n c = b.read(1)\n return int(w), int(h)\n\n\nclass SVG(ImageBase):\n\n encode = param.Boolean(default=False, doc=\"\"\"\n Whether to enable base64 encoding of the SVG, base64 encoded\n SVGs do not support links.\"\"\")\n\n imgtype = 'svg'\n\n _rerender_params = ImageBase._rerender_params + ['encode']\n\n @classmethod\n def applies(cls, obj):\n return (super(SVG, cls).applies(obj) or\n (isinstance(obj, string_types) and obj.lstrip().startswith('<svg')))\n\n def _type_error(self, object):\n if isinstance(object, string_types):\n raise ValueError(\"%s pane cannot parse string that is not a filename, \"\n \"URL or a SVG XML contents.\" % type(self).__name__)\n super(SVG, self)._type_error(object)\n\n def _img(self):\n if (isinstance(self.object, string_types) and\n self.object.lstrip().startswith('<svg')):\n return self.object\n return super(SVG, self)._img()\n\n def _imgshape(self, data):\n return (self.width, self.height)\n\n def _get_properties(self):\n p = super(ImageBase, self)._get_properties()\n if self.object is None:\n return dict(p, text='<img></img>')\n data = self._img()\n width, height = self._imgshape(data)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n\n if self.encode:\n b64 = base64.b64encode(data).decode(\"utf-8\")\n src = \"data:image/svg+xml;base64,{b64}\".format(b64=b64)\n html = \"<img src='{src}' width={width} height={height}></img>\".format(\n src=src, width=width, height=height\n )\n else:\n html = data.decode(\"utf-8\")\n return dict(p, width=width, height=height, text=escape(html))\n", "path": "panel/pane/image.py"}]} | 3,153 | 156 |
gh_patches_debug_26552 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-5002 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Simplify vcs_support backend git by using GitPython
We already started using GitPython for some submodules pieces. We should continue porting some of our hand rolled logic to use GitPython instead.
Pieces of `readthedocs.vcs_support.backend.git` we should definitely port:
* `parse_branches()` -- this supersedes a branch I started at #2997 to replace parsing this with a csvreader
* `parse_tags()` -- same silly code using a csvreader
* `find_ref()`
* `ref_exists()`
* `repo_exists()`
Maybe:
* `tags()`
* `branches()`
These would be good first targets to port over, as we are executing these commands to get data out of the repository. It's not important that these messages are surfaced to users in build command output.
I'm going to block on getting a feature out that executes these commands in docker first though, as we need to clone and submodule checkout inside the docker container to isolate these calls. I can't quite consider how relying on gitpython for some of these calls, but not all, works with regard to docker vcs checkouts.
</issue>
<code>
[start of readthedocs/vcs_support/backends/git.py]
1 # -*- coding: utf-8 -*-
2 """Git-related utilities."""
3
4 from __future__ import (
5 absolute_import,
6 division,
7 print_function,
8 unicode_literals,
9 )
10
11 import logging
12 import os
13 import re
14
15 import git
16 from builtins import str
17 from django.core.exceptions import ValidationError
18 from django.conf import settings
19 from git.exc import BadName
20
21 from readthedocs.config import ALL
22 from readthedocs.projects.exceptions import RepositoryError
23 from readthedocs.projects.validators import validate_submodule_url
24 from readthedocs.vcs_support.base import BaseVCS, VCSVersion
25
26 log = logging.getLogger(__name__)
27
28
29 class Backend(BaseVCS):
30
31 """Git VCS backend."""
32
33 supports_tags = True
34 supports_branches = True
35 supports_submodules = True
36 fallback_branch = 'master' # default branch
37 repo_depth = 50
38
39 def __init__(self, *args, **kwargs):
40 super(Backend, self).__init__(*args, **kwargs)
41 self.token = kwargs.get('token', None)
42 self.repo_url = self._get_clone_url()
43
44 def _get_clone_url(self):
45 if '://' in self.repo_url:
46 hacked_url = self.repo_url.split('://')[1]
47 hacked_url = re.sub('.git$', '', hacked_url)
48 clone_url = 'https://%s' % hacked_url
49 if self.token:
50 clone_url = 'https://%s@%s' % (self.token, hacked_url)
51 return clone_url
52 # Don't edit URL because all hosts aren't the same
53 # else:
54 # clone_url = 'git://%s' % (hacked_url)
55 return self.repo_url
56
57 def set_remote_url(self, url):
58 return self.run('git', 'remote', 'set-url', 'origin', url)
59
60 def update(self):
61 """Clone or update the repository."""
62 super(Backend, self).update()
63 if self.repo_exists():
64 self.set_remote_url(self.repo_url)
65 return self.fetch()
66 self.make_clean_working_dir()
67 return self.clone()
68
69 def repo_exists(self):
70 code, _, _ = self.run('git', 'status', record=False)
71 return code == 0
72
73 def are_submodules_available(self, config):
74 """Test whether git submodule checkout step should be performed."""
75 # TODO remove this after users migrate to a config file
76 from readthedocs.projects.models import Feature
77 submodules_in_config = (
78 config.submodules.exclude != ALL or
79 config.submodules.include
80 )
81 if (self.project.has_feature(Feature.SKIP_SUBMODULES) or
82 not submodules_in_config):
83 return False
84
85 # Keep compatibility with previous projects
86 code, out, _ = self.run('git', 'submodule', 'status', record=False)
87 return code == 0 and bool(out)
88
89 def validate_submodules(self, config):
90 """
91 Returns the submodules and check that its URLs are valid.
92
93 .. note::
94
95 Allways call after `self.are_submodules_available`.
96
97 :returns: tuple(bool, list)
98
99 Returns true if all required submodules URLs are valid.
100 Returns a list of all required submodules:
101 - Include is `ALL`, returns all submodules avaliable.
102 - Include is a list, returns just those.
103 - Exclude is `ALL` - this should never happen.
104 - Exlude is a list, returns all avaliable submodules
105 but those from the list.
106 """
107 repo = git.Repo(self.working_dir)
108 submodules = {
109 sub.path: sub
110 for sub in repo.submodules
111 }
112
113 for sub_path in config.submodules.exclude:
114 path = sub_path.rstrip('/')
115 if path in submodules:
116 del submodules[path]
117
118 if config.submodules.include != ALL and config.submodules.include:
119 submodules_include = {}
120 for sub_path in config.submodules.include:
121 path = sub_path.rstrip('/')
122 submodules_include[path] = submodules[path]
123 submodules = submodules_include
124
125 for path, submodule in submodules.items():
126 try:
127 validate_submodule_url(submodule.url)
128 except ValidationError:
129 return False, []
130 return True, submodules.keys()
131
132 def fetch(self):
133 code, stdout, stderr = self.run(
134 'git', 'fetch', '--depth', str(self.repo_depth),
135 '--tags', '--prune', '--prune-tags',
136 )
137 if code != 0:
138 raise RepositoryError
139 return code, stdout, stderr
140
141 def checkout_revision(self, revision=None):
142 if not revision:
143 branch = self.default_branch or self.fallback_branch
144 revision = 'origin/%s' % branch
145
146 code, out, err = self.run('git', 'checkout', '--force', revision)
147 if code != 0:
148 log.warning("Failed to checkout revision '%s': %s", revision, code)
149 return [code, out, err]
150
151 def clone(self):
152 """Clones the repository."""
153 code, stdout, stderr = self.run(
154 'git', 'clone', '--depth', str(self.repo_depth),
155 '--no-single-branch', self.repo_url, '.'
156 )
157 if code != 0:
158 raise RepositoryError
159 return code, stdout, stderr
160
161 @property
162 def tags(self):
163 versions = []
164 repo = git.Repo(self.working_dir)
165 for tag in repo.tags:
166 try:
167 versions.append(VCSVersion(self, str(tag.commit), str(tag)))
168 except ValueError as e:
169 # ValueError: Cannot resolve commit as tag TAGNAME points to a
170 # blob object - use the `.object` property instead to access it
171 # This is not a real tag for us, so we skip it
172 # https://github.com/rtfd/readthedocs.org/issues/4440
173 log.warning('Git tag skipped: %s', tag, exc_info=True)
174 continue
175 return versions
176
177 @property
178 def branches(self):
179 repo = git.Repo(self.working_dir)
180 versions = []
181 branches = []
182
183 # ``repo.remotes.origin.refs`` returns remote branches
184 if repo.remotes:
185 branches += repo.remotes.origin.refs
186
187 for branch in branches:
188 verbose_name = branch.name
189 if verbose_name.startswith('origin/'):
190 verbose_name = verbose_name.replace('origin/', '')
191 if verbose_name == 'HEAD':
192 continue
193 versions.append(VCSVersion(self, str(branch), verbose_name))
194 return versions
195
196 @property
197 def commit(self):
198 _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')
199 return stdout.strip()
200
201 def checkout(self, identifier=None):
202 """Checkout to identifier or latest."""
203 super(Backend, self).checkout()
204 # Find proper identifier
205 if not identifier:
206 identifier = self.default_branch or self.fallback_branch
207
208 identifier = self.find_ref(identifier)
209
210 # Checkout the correct identifier for this branch.
211 code, out, err = self.checkout_revision(identifier)
212 if code != 0:
213 return code, out, err
214
215 # Clean any remains of previous checkouts
216 self.run('git', 'clean', '-d', '-f', '-f')
217 return code, out, err
218
219 def update_submodules(self, config):
220 if self.are_submodules_available(config):
221 valid, submodules = self.validate_submodules(config)
222 if valid:
223 self.checkout_submodules(submodules, config)
224 else:
225 raise RepositoryError(RepositoryError.INVALID_SUBMODULES)
226
227 def checkout_submodules(self, submodules, config):
228 """Checkout all repository submodules."""
229 self.run('git', 'submodule', 'sync')
230 cmd = [
231 'git',
232 'submodule',
233 'update',
234 '--init',
235 '--force',
236 ]
237 if config.submodules.recursive:
238 cmd.append('--recursive')
239 cmd += submodules
240 self.run(*cmd)
241
242 def find_ref(self, ref):
243 # Check if ref starts with 'origin/'
244 if ref.startswith('origin/'):
245 return ref
246
247 # Check if ref is a branch of the origin remote
248 if self.ref_exists('remotes/origin/' + ref):
249 return 'origin/' + ref
250
251 return ref
252
253 def ref_exists(self, ref):
254 try:
255 r = git.Repo(self.working_dir)
256 if r.commit(ref):
257 return True
258 except (BadName, ValueError):
259 return False
260 return False
261
262 @property
263 def env(self):
264 env = super(Backend, self).env
265 env['GIT_DIR'] = os.path.join(self.working_dir, '.git')
266 # Don't prompt for username, this requires Git 2.3+
267 env['GIT_TERMINAL_PROMPT'] = '0'
268 return env
269
[end of readthedocs/vcs_support/backends/git.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py
--- a/readthedocs/vcs_support/backends/git.py
+++ b/readthedocs/vcs_support/backends/git.py
@@ -15,8 +15,7 @@
import git
from builtins import str
from django.core.exceptions import ValidationError
-from django.conf import settings
-from git.exc import BadName
+from git.exc import BadName, InvalidGitRepositoryError
from readthedocs.config import ALL
from readthedocs.projects.exceptions import RepositoryError
@@ -67,8 +66,11 @@
return self.clone()
def repo_exists(self):
- code, _, _ = self.run('git', 'status', record=False)
- return code == 0
+ try:
+ git.Repo(self.working_dir)
+ except InvalidGitRepositoryError:
+ return False
+ return True
def are_submodules_available(self, config):
"""Test whether git submodule checkout step should be performed."""
@@ -83,8 +85,8 @@
return False
# Keep compatibility with previous projects
- code, out, _ = self.run('git', 'submodule', 'status', record=False)
- return code == 0 and bool(out)
+ repo = git.Repo(self.working_dir)
+ return bool(repo.submodules)
def validate_submodules(self, config):
"""
| {"golden_diff": "diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py\n--- a/readthedocs/vcs_support/backends/git.py\n+++ b/readthedocs/vcs_support/backends/git.py\n@@ -15,8 +15,7 @@\n import git\n from builtins import str\n from django.core.exceptions import ValidationError\n-from django.conf import settings\n-from git.exc import BadName\n+from git.exc import BadName, InvalidGitRepositoryError\n \n from readthedocs.config import ALL\n from readthedocs.projects.exceptions import RepositoryError\n@@ -67,8 +66,11 @@\n return self.clone()\n \n def repo_exists(self):\n- code, _, _ = self.run('git', 'status', record=False)\n- return code == 0\n+ try:\n+ git.Repo(self.working_dir)\n+ except InvalidGitRepositoryError:\n+ return False\n+ return True\n \n def are_submodules_available(self, config):\n \"\"\"Test whether git submodule checkout step should be performed.\"\"\"\n@@ -83,8 +85,8 @@\n return False\n \n # Keep compatibility with previous projects\n- code, out, _ = self.run('git', 'submodule', 'status', record=False)\n- return code == 0 and bool(out)\n+ repo = git.Repo(self.working_dir)\n+ return bool(repo.submodules)\n \n def validate_submodules(self, config):\n \"\"\"\n", "issue": "Simplify vcs_support backend git by using GitPython\nWe already started using GitPython for some submodules pieces. We should continue porting some of our hand rolled logic to use GitPython instead.\r\n\r\nPieces of `readthedocs.vcs_support.backend.git` we should definitely port:\r\n\r\n* `parse_branches()` -- this supersedes a branch I started at #2997 to replace parsing this with a csvreader\r\n* `parse_tags()` -- same silly code using a csvreader\r\n* `find_ref()`\r\n* `ref_exists()`\r\n* `repo_exists()`\r\n\r\nMaybe:\r\n\r\n* `tags()`\r\n* `branches()`\r\n\r\nThese would be good first targets to port over, as we are executing these commands to get data out of the repository. It's not important that these messages are surfaced to users in build command output.\r\n\r\nI'm going to block on getting a feature out that executes these commands in docker first though, as we need to clone and submodule checkout inside the docker container to isolate these calls. I can't quite consider how relying on gitpython for some of these calls, but not all, works with regard to docker vcs checkouts.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Git-related utilities.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport logging\nimport os\nimport re\n\nimport git\nfrom builtins import str\nfrom django.core.exceptions import ValidationError\nfrom django.conf import settings\nfrom git.exc import BadName\n\nfrom readthedocs.config import ALL\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.projects.validators import validate_submodule_url\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n supports_submodules = True\n fallback_branch = 'master' # default branch\n repo_depth = 50\n\n def __init__(self, *args, **kwargs):\n super(Backend, self).__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://%s@%s' % (self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n \"\"\"Clone or update the repository.\"\"\"\n super(Backend, self).update()\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n return self.fetch()\n self.make_clean_working_dir()\n return self.clone()\n\n def repo_exists(self):\n code, _, _ = self.run('git', 'status', record=False)\n return code == 0\n\n def are_submodules_available(self, config):\n \"\"\"Test whether git submodule checkout step should be performed.\"\"\"\n # TODO remove this after users migrate to a config file\n from readthedocs.projects.models import Feature\n submodules_in_config = (\n config.submodules.exclude != ALL or\n config.submodules.include\n )\n if (self.project.has_feature(Feature.SKIP_SUBMODULES) or\n not submodules_in_config):\n return False\n\n # Keep compatibility with previous projects\n code, out, _ = self.run('git', 'submodule', 'status', record=False)\n return code == 0 and bool(out)\n\n def validate_submodules(self, config):\n \"\"\"\n Returns the submodules and check that its URLs are valid.\n\n .. note::\n\n Allways call after `self.are_submodules_available`.\n\n :returns: tuple(bool, list)\n\n Returns true if all required submodules URLs are valid.\n Returns a list of all required submodules:\n - Include is `ALL`, returns all submodules avaliable.\n - Include is a list, returns just those.\n - Exclude is `ALL` - this should never happen.\n - Exlude is a list, returns all avaliable submodules\n but those from the list.\n \"\"\"\n repo = git.Repo(self.working_dir)\n submodules = {\n sub.path: sub\n for sub in repo.submodules\n }\n\n for sub_path in config.submodules.exclude:\n path = sub_path.rstrip('/')\n if path in submodules:\n del submodules[path]\n\n if config.submodules.include != ALL and config.submodules.include:\n submodules_include = {}\n for sub_path in config.submodules.include:\n path = sub_path.rstrip('/')\n submodules_include[path] = submodules[path]\n submodules = submodules_include\n\n for path, submodule in submodules.items():\n try:\n validate_submodule_url(submodule.url)\n except ValidationError:\n return False, []\n return True, submodules.keys()\n\n def fetch(self):\n code, stdout, stderr = self.run(\n 'git', 'fetch', '--depth', str(self.repo_depth),\n '--tags', '--prune', '--prune-tags',\n )\n if code != 0:\n raise RepositoryError\n return code, stdout, stderr\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run('git', 'checkout', '--force', revision)\n if code != 0:\n log.warning(\"Failed to checkout revision '%s': %s\", revision, code)\n return [code, out, err]\n\n def clone(self):\n \"\"\"Clones the repository.\"\"\"\n code, stdout, stderr = self.run(\n 'git', 'clone', '--depth', str(self.repo_depth),\n '--no-single-branch', self.repo_url, '.'\n )\n if code != 0:\n raise RepositoryError\n return code, stdout, stderr\n\n @property\n def tags(self):\n versions = []\n repo = git.Repo(self.working_dir)\n for tag in repo.tags:\n try:\n versions.append(VCSVersion(self, str(tag.commit), str(tag)))\n except ValueError as e:\n # ValueError: Cannot resolve commit as tag TAGNAME points to a\n # blob object - use the `.object` property instead to access it\n # This is not a real tag for us, so we skip it\n # https://github.com/rtfd/readthedocs.org/issues/4440\n log.warning('Git tag skipped: %s', tag, exc_info=True)\n continue\n return versions\n\n @property\n def branches(self):\n repo = git.Repo(self.working_dir)\n versions = []\n branches = []\n\n # ``repo.remotes.origin.refs`` returns remote branches\n if repo.remotes:\n branches += repo.remotes.origin.refs\n\n for branch in branches:\n verbose_name = branch.name\n if verbose_name.startswith('origin/'):\n verbose_name = verbose_name.replace('origin/', '')\n if verbose_name == 'HEAD':\n continue\n versions.append(VCSVersion(self, str(branch), verbose_name))\n return versions\n\n @property\n def commit(self):\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n\n def checkout(self, identifier=None):\n \"\"\"Checkout to identifier or latest.\"\"\"\n super(Backend, self).checkout()\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n return code, out, err\n\n def update_submodules(self, config):\n if self.are_submodules_available(config):\n valid, submodules = self.validate_submodules(config)\n if valid:\n self.checkout_submodules(submodules, config)\n else:\n raise RepositoryError(RepositoryError.INVALID_SUBMODULES)\n\n def checkout_submodules(self, submodules, config):\n \"\"\"Checkout all repository submodules.\"\"\"\n self.run('git', 'submodule', 'sync')\n cmd = [\n 'git',\n 'submodule',\n 'update',\n '--init',\n '--force',\n ]\n if config.submodules.recursive:\n cmd.append('--recursive')\n cmd += submodules\n self.run(*cmd)\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n try:\n r = git.Repo(self.working_dir)\n if r.commit(ref):\n return True\n except (BadName, ValueError):\n return False\n return False\n\n @property\n def env(self):\n env = super(Backend, self).env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py"}]} | 3,409 | 319 |
gh_patches_debug_36231 | rasdani/github-patches | git_diff | interactions-py__interactions.py-611 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[REQUEST] Add a basic `Color` object.
### Describe the feature.
People may want to be able to easily call upon colors. Since it should be at least an accepted concept, I am wanting to add a basic `Color` object that includes the official branding colors. Please note that other colors that are custom-defined are ideally frowned on for this idea since the core library is about making implementations strict to what Discord offer, which we can argue debatably with the branding colors.
### Code of Conduct
- [X] I agree to follow the contribution requirements.
</issue>
<code>
[start of interactions/api/models/misc.py]
1 # TODO: This is post-v4.
2 # TODO: Reorganise these models based on which big obj uses little obj
3 # TODO: Potentially rename some model references to enums, if applicable
4 # TODO: Reorganise mixins to its own thing, currently placed here because circular import sucks.
5 # also, it should be serialiser* but idk, fl0w'd say something if I left it like that. /shrug
6 import datetime
7 from logging import Logger
8 from math import floor
9 from typing import Union
10
11 from interactions.base import get_logger
12
13 log: Logger = get_logger("mixin")
14
15
16 class DictSerializerMixin(object):
17 """
18 The purpose of this mixin is to be subclassed.
19
20 .. note::
21 On subclass, it:
22 -- From kwargs (received from the Discord API response), add it to the `_json` attribute
23 such that it can be reused by other libraries/extensions
24 -- Aids in attributing the kwargs to actual model attributes, i.e. `User.id`
25 -- Dynamically sets attributes not given to kwargs but slotted to None, signifying that it doesn't exist.
26
27 .. warning::
28 This does NOT convert them to its own data types, i.e. timestamps, or User within Member. This is left by
29 the object that's using the mixin.
30 """
31
32 __slots__ = "_json"
33
34 def __init__(self, **kwargs):
35 self._json = kwargs
36 # for key in kwargs:
37 # setattr(self, key, kwargs[key])
38
39 for key in kwargs:
40 if key in self.__slots__ if hasattr(self, "__slots__") else True:
41 # else case if the mixin is used outside of this library and/or SDK.
42 setattr(self, key, kwargs[key])
43 else:
44 log.warning(
45 f"Attribute {key} is missing from the {self.__class__.__name__} data model, skipping."
46 )
47 # work on message printout? Effective, but I think it should be a little bit more friendly
48 # towards end users
49
50 # if self.__slots__ is not None: # safeguard, runtime check
51 if hasattr(self, "__slots__"):
52 for _attr in self.__slots__:
53 if not hasattr(self, _attr):
54 setattr(self, _attr, None)
55
56
57 class Overwrite(DictSerializerMixin):
58 """
59 This is used for the PermissionOverride object.
60
61 :ivar int id: Role or User ID
62 :ivar int type: Type that corresponds ot the ID; 0 for role and 1 for member.
63 :ivar str allow: Permission bit set.
64 :ivar str deny: Permission bit set.
65 """
66
67 __slots__ = ("_json", "id", "type", "allow", "deny")
68
69 def __init__(self, **kwargs):
70 super().__init__(**kwargs)
71
72
73 class ClientStatus(DictSerializerMixin):
74 """
75 An object that symbolizes the status per client device per session.
76
77 :ivar Optional[str] desktop?: User's status set for an active desktop application session
78 :ivar Optional[str] mobile?: User's status set for an active mobile application session
79 :ivar Optional[str] web?: User's status set for an active web application session
80 """
81
82 __slots__ = ("_json", "desktop", "mobile", "web")
83
84 def __init__(self, **kwargs):
85 super().__init__(**kwargs)
86
87
88 class Snowflake(object):
89 """
90 The Snowflake object.
91
92 This snowflake object will have features closely related to the
93 API schema. In turn, compared to regular d.py's treated snowflakes,
94 these will be treated as strings.
95
96
97 (Basically, snowflakes will be treated as if they were from d.py 0.16.12)
98
99 .. note::
100 You can still provide integers to them, to ensure ease of use of transition and/or
101 if discord API for some odd reason will switch to integer.
102 """
103
104 __slots__ = "_snowflake"
105
106 # Slotting properties are pointless, they are not in-memory
107 # and are instead computed in-model.
108
109 def __init__(self, snowflake: Union[int, str, "Snowflake"]) -> None:
110 self._snowflake = str(snowflake)
111
112 def __str__(self):
113 # This is overridden for model comparison between IDs.
114 return self._snowflake
115
116 def __int__(self):
117 # Easier to use for HTTP calling instead of int(str(obj)).
118 return int(self._snowflake)
119
120 @property
121 def increment(self) -> int:
122 """
123 This is the 'Increment' portion of the snowflake.
124 This is incremented for every ID generated on that process.
125
126 :return: An integer denoting the increment.
127 """
128 return int(self._snowflake) & 0xFFF
129
130 @property
131 def worker_id(self) -> int:
132 """
133 This is the Internal Worker ID of the snowflake.
134 :return: An integer denoting the internal worker ID.
135 """
136 return (int(self._snowflake) & 0x3E0000) >> 17
137
138 @property
139 def process_id(self) -> int:
140 """
141 This is the Internal Process ID of the snowflake.
142 :return: An integer denoting the internal process ID.
143 """
144 return (int(self._snowflake) & 0x1F000) >> 12
145
146 @property
147 def epoch(self) -> float:
148 """
149 This is the Timestamp field of the snowflake.
150
151 :return: A float containing the seconds since Discord Epoch.
152 """
153 return floor(((int(self._snowflake) >> 22) + 1420070400000) / 1000)
154
155 @property
156 def timestamp(self) -> datetime.datetime:
157 """
158 The Datetime object variation of the Timestamp field of the snowflake.
159
160 :return: The converted Datetime object from the Epoch. This respects UTC.
161 """
162 return datetime.datetime.utcfromtimestamp(self.epoch)
163
164 # ---- Extra stuff that might be helpful.
165
166 def __hash__(self):
167 return hash(self._snowflake)
168
169 # Do we need not equals, equals, gt/lt/ge/le?
170 # If so, list them under. By Discord API this may not be needed
171 # but end users might.
172
173
174 class Format:
175 """
176 This object is used to respectively format markdown strings
177 provided by the WYSIWYG text editor for ease-of-accessibility
178 and simple implementations into bots.
179
180 .. note::
181 All base strings are given brackets before being f-string
182 parsable to make conversion simplified.
183
184 .. warning::
185 the ``stylize()`` method must be used if you're actually
186 looking to give a **str** specific result.
187 """
188
189 USER = "<@%s>"
190 USER_NICK = "<@!%s>"
191 CHANNEL = "<#%s>"
192 ROLE = "<@&%s>"
193 EMOJI = "<:%s:%d>"
194 EMOJI_ANIMATED = "<a:%s:%d>"
195 TIMESTAMP = "<t:%s>"
196 TIMESTAMP_SHORT_T = "<t:%s:t>"
197 TIMESTAMP_LONG_T = "<t:%s:T>"
198 TIMESTAMP_SHORT_D = "<t:%s:d>"
199 TIMESTAMP_LONG_D = "<t:%s:D>"
200 TIMESTAMP_SHORT_DT = TIMESTAMP
201 TIMESTAMP_LONG_DT = "<t:%s:F>"
202 TIMESTAMP_RELATIVE = "<t:%s:R>"
203
204 @classmethod
205 def stylize(cls, format: str, **kwargs) -> str:
206 r"""
207 This takes a format style from the object and
208 converts it into a usable string for ease.
209
210 :param format: The format string to use.
211 :type format: str
212 :param \**kwargs: Multiple key-word arguments to use, where key=value is format=value.
213 :type \**kwargs: dict
214 :return: The formatted string.
215 :rtype: str
216 """
217 new: str = f"" # noqa: F541
218 for kwarg in kwargs:
219 if format == kwarg:
220 new %= format
221 return new
222
223
224 class MISSING:
225 """A pseudosentinel based from an empty object. This does violate PEP, but, I don't care."""
226
227 ...
228
[end of interactions/api/models/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/interactions/api/models/misc.py b/interactions/api/models/misc.py
--- a/interactions/api/models/misc.py
+++ b/interactions/api/models/misc.py
@@ -171,54 +171,55 @@
# but end users might.
-class Format:
+class Color(object):
"""
- This object is used to respectively format markdown strings
- provided by the WYSIWYG text editor for ease-of-accessibility
- and simple implementations into bots.
+ An object representing Discord branding colors.
.. note::
- All base strings are given brackets before being f-string
- parsable to make conversion simplified.
-
- .. warning::
- the ``stylize()`` method must be used if you're actually
- looking to give a **str** specific result.
+ This object only intends to cover the branding colors
+ and no others. The main reason behind this is due to
+ the current accepted standard of using hex codes or other
+ custom-defined colors.
"""
- USER = "<@%s>"
- USER_NICK = "<@!%s>"
- CHANNEL = "<#%s>"
- ROLE = "<@&%s>"
- EMOJI = "<:%s:%d>"
- EMOJI_ANIMATED = "<a:%s:%d>"
- TIMESTAMP = "<t:%s>"
- TIMESTAMP_SHORT_T = "<t:%s:t>"
- TIMESTAMP_LONG_T = "<t:%s:T>"
- TIMESTAMP_SHORT_D = "<t:%s:d>"
- TIMESTAMP_LONG_D = "<t:%s:D>"
- TIMESTAMP_SHORT_DT = TIMESTAMP
- TIMESTAMP_LONG_DT = "<t:%s:F>"
- TIMESTAMP_RELATIVE = "<t:%s:R>"
-
- @classmethod
- def stylize(cls, format: str, **kwargs) -> str:
- r"""
- This takes a format style from the object and
- converts it into a usable string for ease.
-
- :param format: The format string to use.
- :type format: str
- :param \**kwargs: Multiple key-word arguments to use, where key=value is format=value.
- :type \**kwargs: dict
- :return: The formatted string.
- :rtype: str
- """
- new: str = f"" # noqa: F541
- for kwarg in kwargs:
- if format == kwarg:
- new %= format
- return new
+ @property
+ def blurple(self) -> hex:
+ """Returns a hexadecimal value of the blurple color."""
+ return 0x5865F2
+
+ @property
+ def green(self) -> hex:
+ """Returns a hexadecimal value of the green color."""
+ return 0x57F287
+
+ @property
+ def yellow(self) -> hex:
+ """Returns a hexadecimal value of the yellow color."""
+ return 0xFEE75C
+
+ @property
+ def fuchsia(self) -> hex:
+ """Returns a hexadecimal value of the fuchsia color."""
+ return 0xEB459E
+
+ @property
+ def red(self) -> hex:
+ """Returns a hexadecimal value of the red color."""
+ return 0xED4245
+
+ # I can't imagine any bot developers actually using these.
+ # If they don't know white is ff and black is 00, something's seriously
+ # wrong.
+
+ @property
+ def white(self) -> hex:
+ """Returns a hexadecimal value of the white color."""
+ return 0xFFFFFF
+
+ @property
+ def black(self) -> hex:
+ """Returns a hexadecimal value of the black color."""
+ return 0x000000
class MISSING:
| {"golden_diff": "diff --git a/interactions/api/models/misc.py b/interactions/api/models/misc.py\n--- a/interactions/api/models/misc.py\n+++ b/interactions/api/models/misc.py\n@@ -171,54 +171,55 @@\n # but end users might.\n \n \n-class Format:\n+class Color(object):\n \"\"\"\n- This object is used to respectively format markdown strings\n- provided by the WYSIWYG text editor for ease-of-accessibility\n- and simple implementations into bots.\n+ An object representing Discord branding colors.\n \n .. note::\n- All base strings are given brackets before being f-string\n- parsable to make conversion simplified.\n-\n- .. warning::\n- the ``stylize()`` method must be used if you're actually\n- looking to give a **str** specific result.\n+ This object only intends to cover the branding colors\n+ and no others. The main reason behind this is due to\n+ the current accepted standard of using hex codes or other\n+ custom-defined colors.\n \"\"\"\n \n- USER = \"<@%s>\"\n- USER_NICK = \"<@!%s>\"\n- CHANNEL = \"<#%s>\"\n- ROLE = \"<@&%s>\"\n- EMOJI = \"<:%s:%d>\"\n- EMOJI_ANIMATED = \"<a:%s:%d>\"\n- TIMESTAMP = \"<t:%s>\"\n- TIMESTAMP_SHORT_T = \"<t:%s:t>\"\n- TIMESTAMP_LONG_T = \"<t:%s:T>\"\n- TIMESTAMP_SHORT_D = \"<t:%s:d>\"\n- TIMESTAMP_LONG_D = \"<t:%s:D>\"\n- TIMESTAMP_SHORT_DT = TIMESTAMP\n- TIMESTAMP_LONG_DT = \"<t:%s:F>\"\n- TIMESTAMP_RELATIVE = \"<t:%s:R>\"\n-\n- @classmethod\n- def stylize(cls, format: str, **kwargs) -> str:\n- r\"\"\"\n- This takes a format style from the object and\n- converts it into a usable string for ease.\n-\n- :param format: The format string to use.\n- :type format: str\n- :param \\**kwargs: Multiple key-word arguments to use, where key=value is format=value.\n- :type \\**kwargs: dict\n- :return: The formatted string.\n- :rtype: str\n- \"\"\"\n- new: str = f\"\" # noqa: F541\n- for kwarg in kwargs:\n- if format == kwarg:\n- new %= format\n- return new\n+ @property\n+ def blurple(self) -> hex:\n+ \"\"\"Returns a hexadecimal value of the blurple color.\"\"\"\n+ return 0x5865F2\n+\n+ @property\n+ def green(self) -> hex:\n+ \"\"\"Returns a hexadecimal value of the green color.\"\"\"\n+ return 0x57F287\n+\n+ @property\n+ def yellow(self) -> hex:\n+ \"\"\"Returns a hexadecimal value of the yellow color.\"\"\"\n+ return 0xFEE75C\n+\n+ @property\n+ def fuchsia(self) -> hex:\n+ \"\"\"Returns a hexadecimal value of the fuchsia color.\"\"\"\n+ return 0xEB459E\n+\n+ @property\n+ def red(self) -> hex:\n+ \"\"\"Returns a hexadecimal value of the red color.\"\"\"\n+ return 0xED4245\n+\n+ # I can't imagine any bot developers actually using these.\n+ # If they don't know white is ff and black is 00, something's seriously\n+ # wrong.\n+\n+ @property\n+ def white(self) -> hex:\n+ \"\"\"Returns a hexadecimal value of the white color.\"\"\"\n+ return 0xFFFFFF\n+\n+ @property\n+ def black(self) -> hex:\n+ \"\"\"Returns a hexadecimal value of the black color.\"\"\"\n+ return 0x000000\n \n \n class MISSING:\n", "issue": "[REQUEST] Add a basic `Color` object.\n### Describe the feature.\n\nPeople may want to be able to easily call upon colors. Since it should be at least an accepted concept, I am wanting to add a basic `Color` object that includes the official branding colors. Please note that other colors that are custom-defined are ideally frowned on for this idea since the core library is about making implementations strict to what Discord offer, which we can argue debatably with the branding colors.\n\n### Code of Conduct\n\n- [X] I agree to follow the contribution requirements.\n", "before_files": [{"content": "# TODO: This is post-v4.\n# TODO: Reorganise these models based on which big obj uses little obj\n# TODO: Potentially rename some model references to enums, if applicable\n# TODO: Reorganise mixins to its own thing, currently placed here because circular import sucks.\n# also, it should be serialiser* but idk, fl0w'd say something if I left it like that. /shrug\nimport datetime\nfrom logging import Logger\nfrom math import floor\nfrom typing import Union\n\nfrom interactions.base import get_logger\n\nlog: Logger = get_logger(\"mixin\")\n\n\nclass DictSerializerMixin(object):\n \"\"\"\n The purpose of this mixin is to be subclassed.\n\n .. note::\n On subclass, it:\n -- From kwargs (received from the Discord API response), add it to the `_json` attribute\n such that it can be reused by other libraries/extensions\n -- Aids in attributing the kwargs to actual model attributes, i.e. `User.id`\n -- Dynamically sets attributes not given to kwargs but slotted to None, signifying that it doesn't exist.\n\n .. warning::\n This does NOT convert them to its own data types, i.e. timestamps, or User within Member. This is left by\n the object that's using the mixin.\n \"\"\"\n\n __slots__ = \"_json\"\n\n def __init__(self, **kwargs):\n self._json = kwargs\n # for key in kwargs:\n # setattr(self, key, kwargs[key])\n\n for key in kwargs:\n if key in self.__slots__ if hasattr(self, \"__slots__\") else True:\n # else case if the mixin is used outside of this library and/or SDK.\n setattr(self, key, kwargs[key])\n else:\n log.warning(\n f\"Attribute {key} is missing from the {self.__class__.__name__} data model, skipping.\"\n )\n # work on message printout? Effective, but I think it should be a little bit more friendly\n # towards end users\n\n # if self.__slots__ is not None: # safeguard, runtime check\n if hasattr(self, \"__slots__\"):\n for _attr in self.__slots__:\n if not hasattr(self, _attr):\n setattr(self, _attr, None)\n\n\nclass Overwrite(DictSerializerMixin):\n \"\"\"\n This is used for the PermissionOverride object.\n\n :ivar int id: Role or User ID\n :ivar int type: Type that corresponds ot the ID; 0 for role and 1 for member.\n :ivar str allow: Permission bit set.\n :ivar str deny: Permission bit set.\n \"\"\"\n\n __slots__ = (\"_json\", \"id\", \"type\", \"allow\", \"deny\")\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n\nclass ClientStatus(DictSerializerMixin):\n \"\"\"\n An object that symbolizes the status per client device per session.\n\n :ivar Optional[str] desktop?: User's status set for an active desktop application session\n :ivar Optional[str] mobile?: User's status set for an active mobile application session\n :ivar Optional[str] web?: User's status set for an active web application session\n \"\"\"\n\n __slots__ = (\"_json\", \"desktop\", \"mobile\", \"web\")\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n\nclass Snowflake(object):\n \"\"\"\n The Snowflake object.\n\n This snowflake object will have features closely related to the\n API schema. In turn, compared to regular d.py's treated snowflakes,\n these will be treated as strings.\n\n\n (Basically, snowflakes will be treated as if they were from d.py 0.16.12)\n\n .. note::\n You can still provide integers to them, to ensure ease of use of transition and/or\n if discord API for some odd reason will switch to integer.\n \"\"\"\n\n __slots__ = \"_snowflake\"\n\n # Slotting properties are pointless, they are not in-memory\n # and are instead computed in-model.\n\n def __init__(self, snowflake: Union[int, str, \"Snowflake\"]) -> None:\n self._snowflake = str(snowflake)\n\n def __str__(self):\n # This is overridden for model comparison between IDs.\n return self._snowflake\n\n def __int__(self):\n # Easier to use for HTTP calling instead of int(str(obj)).\n return int(self._snowflake)\n\n @property\n def increment(self) -> int:\n \"\"\"\n This is the 'Increment' portion of the snowflake.\n This is incremented for every ID generated on that process.\n\n :return: An integer denoting the increment.\n \"\"\"\n return int(self._snowflake) & 0xFFF\n\n @property\n def worker_id(self) -> int:\n \"\"\"\n This is the Internal Worker ID of the snowflake.\n :return: An integer denoting the internal worker ID.\n \"\"\"\n return (int(self._snowflake) & 0x3E0000) >> 17\n\n @property\n def process_id(self) -> int:\n \"\"\"\n This is the Internal Process ID of the snowflake.\n :return: An integer denoting the internal process ID.\n \"\"\"\n return (int(self._snowflake) & 0x1F000) >> 12\n\n @property\n def epoch(self) -> float:\n \"\"\"\n This is the Timestamp field of the snowflake.\n\n :return: A float containing the seconds since Discord Epoch.\n \"\"\"\n return floor(((int(self._snowflake) >> 22) + 1420070400000) / 1000)\n\n @property\n def timestamp(self) -> datetime.datetime:\n \"\"\"\n The Datetime object variation of the Timestamp field of the snowflake.\n\n :return: The converted Datetime object from the Epoch. This respects UTC.\n \"\"\"\n return datetime.datetime.utcfromtimestamp(self.epoch)\n\n # ---- Extra stuff that might be helpful.\n\n def __hash__(self):\n return hash(self._snowflake)\n\n # Do we need not equals, equals, gt/lt/ge/le?\n # If so, list them under. By Discord API this may not be needed\n # but end users might.\n\n\nclass Format:\n \"\"\"\n This object is used to respectively format markdown strings\n provided by the WYSIWYG text editor for ease-of-accessibility\n and simple implementations into bots.\n\n .. note::\n All base strings are given brackets before being f-string\n parsable to make conversion simplified.\n\n .. warning::\n the ``stylize()`` method must be used if you're actually\n looking to give a **str** specific result.\n \"\"\"\n\n USER = \"<@%s>\"\n USER_NICK = \"<@!%s>\"\n CHANNEL = \"<#%s>\"\n ROLE = \"<@&%s>\"\n EMOJI = \"<:%s:%d>\"\n EMOJI_ANIMATED = \"<a:%s:%d>\"\n TIMESTAMP = \"<t:%s>\"\n TIMESTAMP_SHORT_T = \"<t:%s:t>\"\n TIMESTAMP_LONG_T = \"<t:%s:T>\"\n TIMESTAMP_SHORT_D = \"<t:%s:d>\"\n TIMESTAMP_LONG_D = \"<t:%s:D>\"\n TIMESTAMP_SHORT_DT = TIMESTAMP\n TIMESTAMP_LONG_DT = \"<t:%s:F>\"\n TIMESTAMP_RELATIVE = \"<t:%s:R>\"\n\n @classmethod\n def stylize(cls, format: str, **kwargs) -> str:\n r\"\"\"\n This takes a format style from the object and\n converts it into a usable string for ease.\n\n :param format: The format string to use.\n :type format: str\n :param \\**kwargs: Multiple key-word arguments to use, where key=value is format=value.\n :type \\**kwargs: dict\n :return: The formatted string.\n :rtype: str\n \"\"\"\n new: str = f\"\" # noqa: F541\n for kwarg in kwargs:\n if format == kwarg:\n new %= format\n return new\n\n\nclass MISSING:\n \"\"\"A pseudosentinel based from an empty object. This does violate PEP, but, I don't care.\"\"\"\n\n ...\n", "path": "interactions/api/models/misc.py"}]} | 3,054 | 872 |
gh_patches_debug_75 | rasdani/github-patches | git_diff | kedro-org__kedro-2092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release Kedro `0.18.4`
### Depends on:
- Dataset issues
- Spaceflights tutorial documentation
- Open PRs related to datasets:
- [x] https://github.com/kedro-org/kedro/pull/2082
- [x] https://github.com/kedro-org/kedro/pull/1746
- [x] https://github.com/kedro-org/kedro/pull/1992
- [x] https://github.com/kedro-org/kedro/pull/1865
- [x] https://github.com/kedro-org/kedro/pull/1312
- [x] https://github.com/kedro-org/kedro/pull/1844
- [x] https://github.com/kedro-org/kedro/pull/1962
- [x] https://github.com/kedro-org/kedro/pull/1964
- [x] https://github.com/kedro-org/kedro/pull/1931
- [x] https://github.com/kedro-org/kedro/pull/1587
For the above PRs: if it's nearly finished, but the author isn't responding, we as a team can take over and finish the PR. If the PR still needs a lot of work and the author isn't responding, I suggest we close it and ask them to re-open in the new `kedro-datasets` repo.
</issue>
<code>
[start of kedro/__init__.py]
1 """Kedro is a framework that makes it easy to build robust and scalable
2 data pipelines by providing uniform project templates, data abstraction,
3 configuration and pipeline assembly.
4 """
5
6 __version__ = "0.18.3"
7
8
9 import logging
10
11 logging.getLogger(__name__).addHandler(logging.NullHandler())
12
[end of kedro/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kedro/__init__.py b/kedro/__init__.py
--- a/kedro/__init__.py
+++ b/kedro/__init__.py
@@ -3,7 +3,7 @@
configuration and pipeline assembly.
"""
-__version__ = "0.18.3"
+__version__ = "0.18.4"
import logging
| {"golden_diff": "diff --git a/kedro/__init__.py b/kedro/__init__.py\n--- a/kedro/__init__.py\n+++ b/kedro/__init__.py\n@@ -3,7 +3,7 @@\n configuration and pipeline assembly.\n \"\"\"\n \n-__version__ = \"0.18.3\"\n+__version__ = \"0.18.4\"\n \n \n import logging\n", "issue": "Release Kedro `0.18.4`\n### Depends on:\n- Dataset issues\n- Spaceflights tutorial documentation\n- Open PRs related to datasets:\n - [x] https://github.com/kedro-org/kedro/pull/2082\n - [x] https://github.com/kedro-org/kedro/pull/1746\n - [x] https://github.com/kedro-org/kedro/pull/1992\n - [x] https://github.com/kedro-org/kedro/pull/1865\n - [x] https://github.com/kedro-org/kedro/pull/1312\n - [x] https://github.com/kedro-org/kedro/pull/1844\n - [x] https://github.com/kedro-org/kedro/pull/1962\n - [x] https://github.com/kedro-org/kedro/pull/1964\n - [x] https://github.com/kedro-org/kedro/pull/1931\n - [x] https://github.com/kedro-org/kedro/pull/1587\n\nFor the above PRs: if it's nearly finished, but the author isn't responding, we as a team can take over and finish the PR. If the PR still needs a lot of work and the author isn't responding, I suggest we close it and ask them to re-open in the new `kedro-datasets` repo. \n\n", "before_files": [{"content": "\"\"\"Kedro is a framework that makes it easy to build robust and scalable\ndata pipelines by providing uniform project templates, data abstraction,\nconfiguration and pipeline assembly.\n\"\"\"\n\n__version__ = \"0.18.3\"\n\n\nimport logging\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "kedro/__init__.py"}]} | 961 | 87 |
gh_patches_debug_8282 | rasdani/github-patches | git_diff | Pyomo__pyomo-823 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problems with setup.py when failures arise
I'm seeing the following error on Linux using Python 3.7. FWIW, the underlying error is that the PyUtilib master branch hasn't been installed, which is now required to install the Pyomo master branch:
> Traceback (most recent call last):
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/distutils/core.py", line 148, in setup
> dist.run_commands()
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/distutils/dist.py", line 966, in run_commands
> self.run_command(cmd)
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/distutils/dist.py", line 985, in run_command
> cmd_obj.run()
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/command/develop.py", line 38, in run
> self.install_for_development()
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/command/develop.py", line 154, in install_for_development
> self.process_distribution(None, self.dist, not self.no_deps)
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/command/easy_install.py", line 752, in process_distribution
> [requirement], self.local_index, self.easy_install
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/pkg_resources/__init__.py", line 780, in resolve
> replace_conflicting=replace_conflicting
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/pkg_resources/__init__.py", line 1063, in best_match
> return self.obtain(req, installer)
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/pkg_resources/__init__.py", line 1075, in obtain
> return installer(requirement)
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/command/easy_install.py", line 673, in easy_install
> raise DistutilsError(msg)
> distutils.errors.DistutilsError: Could not find suitable distribution for Requirement.parse('PyUtilib>=5.6.6.dev0')
>
> During handling of the above exception, another exception occurred:
>
> Traceback (most recent call last):
> File "setup.py", line 191, in <module>
> run_setup()
> File "setup.py", line 187, in run_setup
> """
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/__init__.py", line 143, in setup
> return distutils.core.setup(**attrs)
> File "/home/wehart/anaconda3/envs/simple/lib/python3.7/distutils/core.py", line 163, in setup
> raise SystemExit("error: " + str(msg))
> SystemExit: error: Could not find suitable distribution for Requirement.parse('PyUtilib>=5.6.6.dev0')
>
> During handling of the above exception, another exception occurred:
>
> Traceback (most recent call last):
> File "setup.py", line 197, in <module>
> if 'Microsoft Visual C++' not in e_info.message:
> AttributeError: 'SystemExit' object has no attribute 'message'
</issue>
<code>
[start of setup.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 """
12 Script to generate the installer for pyomo.
13 """
14
15 import sys
16 import os
17
18
19 def _find_packages(path):
20 """
21 Generate a list of nested packages
22 """
23 pkg_list = []
24 if not os.path.exists(path):
25 return []
26 if not os.path.exists(path+os.sep+"__init__.py"):
27 return []
28 else:
29 pkg_list.append(path)
30 for root, dirs, files in os.walk(path, topdown=True):
31 if root in pkg_list and "__init__.py" in files:
32 for name in dirs:
33 if os.path.exists(root+os.sep+name+os.sep+"__init__.py"):
34 pkg_list.append(root+os.sep+name)
35 return [pkg for pkg in map(lambda x:x.replace(os.sep, "."), pkg_list)]
36
37
38 def read(*rnames):
39 return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
40
41 requires = [
42 'PyUtilib>=5.6.6.dev0',
43 'appdirs',
44 'ply',
45 'six>=1.4',
46 ]
47 if sys.version_info < (2, 7):
48 requires.append('argparse')
49 requires.append('unittest2')
50 requires.append('ordereddict')
51
52 from setuptools import setup
53 import sys
54
55 CYTHON_REQUIRED = "required"
56 if 'develop' in sys.argv:
57 using_cython = False
58 else:
59 using_cython = "automatic"
60 if '--with-cython' in sys.argv:
61 using_cython = CYTHON_REQUIRED
62 sys.argv.remove('--with-cython')
63 if '--without-cython' in sys.argv:
64 using_cython = False
65 sys.argv.remove('--without-cython')
66
67 ext_modules = []
68 if using_cython:
69 try:
70 import platform
71 if platform.python_implementation() != "CPython":
72 # break out of this try-except (disable Cython)
73 raise RuntimeError("Cython is only supported under CPython")
74 from Cython.Build import cythonize
75 #
76 # Note: The Cython developers recommend that you destribute C source
77 # files to users. But this is fine for evaluating the utility of Cython
78 #
79 import shutil
80 files = [
81 "pyomo/core/expr/expr_pyomo5.pyx",
82 "pyomo/core/expr/numvalue.pyx",
83 "pyomo/core/util.pyx",
84 "pyomo/repn/standard_repn.pyx",
85 "pyomo/repn/plugins/cpxlp.pyx",
86 "pyomo/repn/plugins/gams_writer.pyx",
87 "pyomo/repn/plugins/baron_writer.pyx",
88 "pyomo/repn/plugins/ampl/ampl_.pyx",
89 ]
90 for f in files:
91 shutil.copyfile(f[:-1], f)
92 ext_modules = cythonize(files)
93 except:
94 if using_cython == CYTHON_REQUIRED:
95 print("""
96 ERROR: Cython was explicitly requested with --with-cython, but cythonization
97 of core Pyomo modules failed.
98 """)
99 raise
100 using_cython = False
101
102 packages = _find_packages('pyomo')
103
104 def run_setup():
105 setup(name='Pyomo',
106 #
107 # Note: trunk should have *next* major.minor
108 # VOTD and Final releases will have major.minor.revnum
109 #
110 # When cutting a release, ALSO update _major/_minor/_revnum in
111 #
112 # pyomo/pyomo/version/__init__.py
113 # pyomo/RELEASE.txt
114 #
115 version='5.6.2.dev0',
116 maintainer='William E. Hart',
117 maintainer_email='[email protected]',
118 url='http://pyomo.org',
119 license='BSD',
120 platforms=["any"],
121 description='Pyomo: Python Optimization Modeling Objects',
122 long_description=read('README.txt'),
123 classifiers=[
124 'Development Status :: 5 - Production/Stable',
125 'Intended Audience :: End Users/Desktop',
126 'Intended Audience :: Science/Research',
127 'License :: OSI Approved :: BSD License',
128 'Natural Language :: English',
129 'Operating System :: MacOS',
130 'Operating System :: Microsoft :: Windows',
131 'Operating System :: Unix',
132 'Programming Language :: Python',
133 'Programming Language :: Python :: 2',
134 'Programming Language :: Python :: 2.7',
135 'Programming Language :: Python :: 3',
136 'Programming Language :: Python :: 3.4',
137 'Programming Language :: Python :: 3.5',
138 'Programming Language :: Python :: 3.6',
139 'Programming Language :: Python :: 3.7',
140 'Programming Language :: Python :: Implementation :: CPython',
141 'Programming Language :: Python :: Implementation :: Jython',
142 'Programming Language :: Python :: Implementation :: PyPy',
143 'Topic :: Scientific/Engineering :: Mathematics',
144 'Topic :: Software Development :: Libraries :: Python Modules' ],
145 packages=packages,
146 keywords=['optimization'],
147 install_requires=requires,
148 ext_modules = ext_modules,
149 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
150 entry_points="""
151 [console_scripts]
152 runbenders=pyomo.pysp.benders:Benders_main
153 evaluate_xhat=pyomo.pysp.evaluate_xhat:EvaluateXhat_main
154 runph=pyomo.pysp.phinit:PH_main
155 runef=pyomo.pysp.ef_writer_script:main
156 phsolverserver=pyomo.pysp.phsolverserver:main
157 scenariotreeserver=pyomo.pysp.scenariotree.server_pyro:main
158 computeconf=pyomo.pysp.computeconf:main
159
160 results_schema=pyomo.scripting.commands:results_schema
161 pyro_mip_server = pyomo.scripting.pyro_mip_server:main
162 test.pyomo = pyomo.scripting.runtests:runPyomoTests
163 pyomo = pyomo.scripting.pyomo_main:main
164 pyomo_ns = pyomo.scripting.commands:pyomo_ns
165 pyomo_nsc = pyomo.scripting.commands:pyomo_nsc
166 kill_pyro_mip_servers = pyomo.scripting.commands:kill_pyro_mip_servers
167 launch_pyro_mip_servers = pyomo.scripting.commands:launch_pyro_mip_servers
168 readsol = pyomo.scripting.commands:readsol
169 OSSolverService = pyomo.scripting.commands:OSSolverService
170 pyomo_python = pyomo.scripting.commands:pyomo_python
171 pyomo_old=pyomo.scripting.pyomo_command:main
172 get_pyomo_extras = scripts.get_pyomo_extras:main
173
174 [pyomo.command]
175 pyomo.runbenders=pyomo.pysp.benders
176 pyomo.evaluate_xhat=pyomo.pysp.evaluate_xhat
177 pyomo.runph=pyomo.pysp.phinit
178 pyomo.runef=pyomo.pysp.ef_writer_script
179 pyomo.phsolverserver=pyomo.pysp.phsolverserver
180 pyomo.scenariotreeserver=pyomo.pysp.scenariotree.server_pyro
181 pyomo.computeconf=pyomo.pysp.computeconf
182
183 pyomo.help = pyomo.scripting.driver_help
184 pyomo.test.pyomo = pyomo.scripting.runtests
185 pyomo.pyro_mip_server = pyomo.scripting.pyro_mip_server
186 pyomo.results_schema=pyomo.scripting.commands
187 """
188 )
189
190 try:
191 run_setup()
192 except SystemExit as e_info:
193 # Cython can generate a SystemExit exception on Windows if the
194 # environment is missing / has an incorrect Microsoft compiler.
195 # Since Cython is not strictly required, we will disable Cython and
196 # try re-running setup(), but only for this very specific situation.
197 if 'Microsoft Visual C++' not in e_info.message:
198 raise
199 elif using_cython == CYTHON_REQUIRED:
200 print("""
201 ERROR: Cython was explicitly requested with --with-cython, but cythonization
202 of core Pyomo modules failed.
203 """)
204 raise
205 else:
206 print("""
207 ERROR: setup() failed:
208 %s
209 Re-running setup() without the Cython modules
210 """ % (e_info.message,))
211 ext_modules = []
212 run_setup()
213 print("""
214 WARNING: Installation completed successfully, but the attempt to cythonize
215 core Pyomo modules failed. Cython provides performance
216 optimizations and is not required for any Pyomo functionality.
217 Cython returned the following error:
218 "%s"
219 """ % (e_info.message,))
220
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -194,7 +194,7 @@
# environment is missing / has an incorrect Microsoft compiler.
# Since Cython is not strictly required, we will disable Cython and
# try re-running setup(), but only for this very specific situation.
- if 'Microsoft Visual C++' not in e_info.message:
+ if 'Microsoft Visual C++' not in str(e_info):
raise
elif using_cython == CYTHON_REQUIRED:
print("""
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -194,7 +194,7 @@\n # environment is missing / has an incorrect Microsoft compiler.\n # Since Cython is not strictly required, we will disable Cython and\n # try re-running setup(), but only for this very specific situation.\n- if 'Microsoft Visual C++' not in e_info.message:\n+ if 'Microsoft Visual C++' not in str(e_info):\n raise\n elif using_cython == CYTHON_REQUIRED:\n print(\"\"\"\n", "issue": "Problems with setup.py when failures arise\nI'm seeing the following error on Linux using Python 3.7. FWIW, the underlying error is that the PyUtilib master branch hasn't been installed, which is now required to install the Pyomo master branch:\r\n\r\n> Traceback (most recent call last):\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/distutils/core.py\", line 148, in setup\r\n> dist.run_commands()\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/distutils/dist.py\", line 966, in run_commands\r\n> self.run_command(cmd)\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/distutils/dist.py\", line 985, in run_command\r\n> cmd_obj.run()\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/command/develop.py\", line 38, in run\r\n> self.install_for_development()\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/command/develop.py\", line 154, in install_for_development\r\n> self.process_distribution(None, self.dist, not self.no_deps)\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/command/easy_install.py\", line 752, in process_distribution\r\n> [requirement], self.local_index, self.easy_install\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 780, in resolve\r\n> replace_conflicting=replace_conflicting\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 1063, in best_match\r\n> return self.obtain(req, installer)\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 1075, in obtain\r\n> return installer(requirement)\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/command/easy_install.py\", line 673, in easy_install\r\n> raise DistutilsError(msg)\r\n> distutils.errors.DistutilsError: Could not find suitable distribution for Requirement.parse('PyUtilib>=5.6.6.dev0')\r\n> \r\n> During handling of the above exception, another exception occurred:\r\n> \r\n> Traceback (most recent call last):\r\n> File \"setup.py\", line 191, in <module>\r\n> run_setup()\r\n> File \"setup.py\", line 187, in run_setup\r\n> \"\"\"\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/site-packages/setuptools/__init__.py\", line 143, in setup\r\n> return distutils.core.setup(**attrs)\r\n> File \"/home/wehart/anaconda3/envs/simple/lib/python3.7/distutils/core.py\", line 163, in setup\r\n> raise SystemExit(\"error: \" + str(msg))\r\n> SystemExit: error: Could not find suitable distribution for Requirement.parse('PyUtilib>=5.6.6.dev0')\r\n> \r\n> During handling of the above exception, another exception occurred:\r\n> \r\n> Traceback (most recent call last):\r\n> File \"setup.py\", line 197, in <module>\r\n> if 'Microsoft Visual C++' not in e_info.message:\r\n> AttributeError: 'SystemExit' object has no attribute 'message'\r\n\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"\nScript to generate the installer for pyomo.\n\"\"\"\n\nimport sys\nimport os\n\n\ndef _find_packages(path):\n \"\"\"\n Generate a list of nested packages\n \"\"\"\n pkg_list = []\n if not os.path.exists(path):\n return []\n if not os.path.exists(path+os.sep+\"__init__.py\"):\n return []\n else:\n pkg_list.append(path)\n for root, dirs, files in os.walk(path, topdown=True):\n if root in pkg_list and \"__init__.py\" in files:\n for name in dirs:\n if os.path.exists(root+os.sep+name+os.sep+\"__init__.py\"):\n pkg_list.append(root+os.sep+name)\n return [pkg for pkg in map(lambda x:x.replace(os.sep, \".\"), pkg_list)]\n\n\ndef read(*rnames):\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\n\nrequires = [\n 'PyUtilib>=5.6.6.dev0',\n 'appdirs',\n 'ply',\n 'six>=1.4',\n ]\nif sys.version_info < (2, 7):\n requires.append('argparse')\n requires.append('unittest2')\n requires.append('ordereddict')\n\nfrom setuptools import setup\nimport sys\n\nCYTHON_REQUIRED = \"required\"\nif 'develop' in sys.argv:\n using_cython = False\nelse:\n using_cython = \"automatic\"\nif '--with-cython' in sys.argv:\n using_cython = CYTHON_REQUIRED\n sys.argv.remove('--with-cython')\nif '--without-cython' in sys.argv:\n using_cython = False\n sys.argv.remove('--without-cython')\n\next_modules = []\nif using_cython:\n try:\n import platform\n if platform.python_implementation() != \"CPython\":\n # break out of this try-except (disable Cython)\n raise RuntimeError(\"Cython is only supported under CPython\")\n from Cython.Build import cythonize\n #\n # Note: The Cython developers recommend that you destribute C source\n # files to users. But this is fine for evaluating the utility of Cython\n #\n import shutil\n files = [\n \"pyomo/core/expr/expr_pyomo5.pyx\",\n \"pyomo/core/expr/numvalue.pyx\",\n \"pyomo/core/util.pyx\",\n \"pyomo/repn/standard_repn.pyx\",\n \"pyomo/repn/plugins/cpxlp.pyx\",\n \"pyomo/repn/plugins/gams_writer.pyx\",\n \"pyomo/repn/plugins/baron_writer.pyx\",\n \"pyomo/repn/plugins/ampl/ampl_.pyx\",\n ]\n for f in files:\n shutil.copyfile(f[:-1], f)\n ext_modules = cythonize(files)\n except:\n if using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n using_cython = False\n\npackages = _find_packages('pyomo')\n\ndef run_setup():\n setup(name='Pyomo',\n #\n # Note: trunk should have *next* major.minor\n # VOTD and Final releases will have major.minor.revnum\n #\n # When cutting a release, ALSO update _major/_minor/_revnum in\n #\n # pyomo/pyomo/version/__init__.py\n # pyomo/RELEASE.txt\n #\n version='5.6.2.dev0',\n maintainer='William E. Hart',\n maintainer_email='[email protected]',\n url='http://pyomo.org',\n license='BSD',\n platforms=[\"any\"],\n description='Pyomo: Python Optimization Modeling Objects',\n long_description=read('README.txt'),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: Jython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules' ],\n packages=packages,\n keywords=['optimization'],\n install_requires=requires,\n ext_modules = ext_modules,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points=\"\"\"\n [console_scripts]\n runbenders=pyomo.pysp.benders:Benders_main\n evaluate_xhat=pyomo.pysp.evaluate_xhat:EvaluateXhat_main\n runph=pyomo.pysp.phinit:PH_main\n runef=pyomo.pysp.ef_writer_script:main\n phsolverserver=pyomo.pysp.phsolverserver:main\n scenariotreeserver=pyomo.pysp.scenariotree.server_pyro:main\n computeconf=pyomo.pysp.computeconf:main\n\n results_schema=pyomo.scripting.commands:results_schema\n pyro_mip_server = pyomo.scripting.pyro_mip_server:main\n test.pyomo = pyomo.scripting.runtests:runPyomoTests\n pyomo = pyomo.scripting.pyomo_main:main\n pyomo_ns = pyomo.scripting.commands:pyomo_ns\n pyomo_nsc = pyomo.scripting.commands:pyomo_nsc\n kill_pyro_mip_servers = pyomo.scripting.commands:kill_pyro_mip_servers\n launch_pyro_mip_servers = pyomo.scripting.commands:launch_pyro_mip_servers\n readsol = pyomo.scripting.commands:readsol\n OSSolverService = pyomo.scripting.commands:OSSolverService\n pyomo_python = pyomo.scripting.commands:pyomo_python\n pyomo_old=pyomo.scripting.pyomo_command:main\n get_pyomo_extras = scripts.get_pyomo_extras:main\n\n [pyomo.command]\n pyomo.runbenders=pyomo.pysp.benders\n pyomo.evaluate_xhat=pyomo.pysp.evaluate_xhat\n pyomo.runph=pyomo.pysp.phinit\n pyomo.runef=pyomo.pysp.ef_writer_script\n pyomo.phsolverserver=pyomo.pysp.phsolverserver\n pyomo.scenariotreeserver=pyomo.pysp.scenariotree.server_pyro\n pyomo.computeconf=pyomo.pysp.computeconf\n\n pyomo.help = pyomo.scripting.driver_help\n pyomo.test.pyomo = pyomo.scripting.runtests\n pyomo.pyro_mip_server = pyomo.scripting.pyro_mip_server\n pyomo.results_schema=pyomo.scripting.commands\n \"\"\"\n )\n\ntry:\n run_setup()\nexcept SystemExit as e_info:\n # Cython can generate a SystemExit exception on Windows if the\n # environment is missing / has an incorrect Microsoft compiler.\n # Since Cython is not strictly required, we will disable Cython and\n # try re-running setup(), but only for this very specific situation.\n if 'Microsoft Visual C++' not in e_info.message:\n raise\n elif using_cython == CYTHON_REQUIRED:\n print(\"\"\"\nERROR: Cython was explicitly requested with --with-cython, but cythonization\n of core Pyomo modules failed.\n\"\"\")\n raise\n else:\n print(\"\"\"\nERROR: setup() failed:\n %s\nRe-running setup() without the Cython modules\n\"\"\" % (e_info.message,))\n ext_modules = []\n run_setup()\n print(\"\"\"\nWARNING: Installation completed successfully, but the attempt to cythonize\n core Pyomo modules failed. Cython provides performance\n optimizations and is not required for any Pyomo functionality.\n Cython returned the following error:\n \"%s\"\n\"\"\" % (e_info.message,))\n", "path": "setup.py"}]} | 3,875 | 124 |
gh_patches_debug_39485 | rasdani/github-patches | git_diff | Kinto__kinto-953 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add limit for cache size (especially memory)
original: https://github.com/mozilla-services/cliquet/issues/406
That's an improvement so we control the cache size. Otherwise it's a free wheel that eventually crashes the server.
Which makes me think: if I do a load test on stage today, is the kinto process growing indefinitely in memory ?
</issue>
<code>
[start of kinto/core/__init__.py]
1 """Main entry point
2 """
3 import pkg_resources
4
5 from cornice import Service as CorniceService
6 from pyramid.settings import aslist
7
8 from kinto.core import errors
9 from kinto.core import events
10 from kinto.core.initialization import ( # NOQA
11 initialize, install_middlewares,
12 load_default_settings)
13 from kinto.core.utils import (
14 follow_subrequest, current_service, current_resource_name,
15 prefixed_userid, prefixed_principals)
16 from kinto.core.logs import logger
17
18
19 # Module version, as defined in PEP-0396.
20 __version__ = pkg_resources.get_distribution('kinto').version # FIXME?
21
22
23 DEFAULT_SETTINGS = {
24 'backoff': None,
25 'batch_max_requests': 25,
26 'cache_backend': '',
27 'cache_url': '',
28 'cache_pool_size': 25,
29 'cache_prefix': '',
30 'cors_origins': '*',
31 'cors_max_age_seconds': 3600,
32 'eos': None,
33 'eos_message': None,
34 'eos_url': None,
35 'error_info_link': 'https://github.com/Kinto/kinto/issues/',
36 'http_host': None,
37 'http_scheme': None,
38 'id_generator': 'kinto.core.storage.generators.UUID4',
39 'includes': '',
40 'initialization_sequence': (
41 'kinto.core.initialization.setup_request_bound_data',
42 'kinto.core.initialization.setup_json_serializer',
43 'kinto.core.initialization.setup_logging',
44 'kinto.core.initialization.setup_storage',
45 'kinto.core.initialization.setup_permission',
46 'kinto.core.initialization.setup_cache',
47 'kinto.core.initialization.setup_requests_scheme',
48 'kinto.core.initialization.setup_version_redirection',
49 'kinto.core.initialization.setup_deprecation',
50 'kinto.core.initialization.setup_authentication',
51 'kinto.core.initialization.setup_backoff',
52 'kinto.core.initialization.setup_statsd',
53 'kinto.core.initialization.setup_listeners',
54 'kinto.core.events.setup_transaction_hook',
55 ),
56 'event_listeners': '',
57 'heartbeat_timeout_seconds': 10,
58 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',
59 'newrelic_config': None,
60 'newrelic_env': 'dev',
61 'paginate_by': None,
62 'permission_backend': '',
63 'permission_url': '',
64 'permission_pool_size': 25,
65 'profiler_dir': '/tmp',
66 'profiler_enabled': False,
67 'project_docs': '',
68 'project_name': '',
69 'project_version': '',
70 'readonly': False,
71 'retry_after_seconds': 30,
72 'statsd_backend': 'kinto.core.statsd',
73 'statsd_prefix': 'kinto.core',
74 'statsd_url': None,
75 'storage_backend': '',
76 'storage_url': '',
77 'storage_max_fetch_size': 10000,
78 'storage_pool_size': 25,
79 'tm.annotate_user': False, # Do annotate transactions with the user-id.
80 'transaction_per_request': True,
81 'userid_hmac_secret': '',
82 'version_json_path': 'version.json',
83 'version_prefix_redirect_enabled': True,
84 'trailing_slash_redirect_enabled': True,
85 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',
86 'multiauth.policies': 'basicauth',
87 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'
88 'BasicAuthAuthenticationPolicy'),
89 'multiauth.authorization_policy': ('kinto.core.authorization.'
90 'AuthorizationPolicy')
91 }
92
93
94 class Service(CorniceService):
95 """Subclass of the default cornice service.
96
97 This is useful in order to attach specific behaviours without monkey
98 patching the default cornice service (which would impact other uses of it)
99 """
100 default_cors_headers = ('Backoff', 'Retry-After', 'Alert',
101 'Content-Length')
102
103 def error_handler(self, request):
104 return errors.json_error_handler(request)
105
106 @classmethod
107 def init_from_settings(cls, settings):
108 cls.cors_origins = tuple(aslist(settings['cors_origins']))
109 cors_max_age = settings['cors_max_age_seconds']
110 cls.cors_max_age = int(cors_max_age) if cors_max_age else None
111
112
113 def includeme(config):
114 settings = config.get_settings()
115
116 # Heartbeat registry.
117 config.registry.heartbeats = {}
118
119 # Public settings registry.
120 config.registry.public_settings = {'batch_max_requests', 'readonly'}
121
122 # Directive to declare arbitrary API capabilities.
123 def add_api_capability(config, identifier, description="", url="", **kw):
124 existing = config.registry.api_capabilities.get(identifier)
125 if existing:
126 error_msg = "The '%s' API capability was already registered (%s)."
127 raise ValueError(error_msg % (identifier, existing))
128
129 capability = dict(description=description, url=url, **kw)
130 config.registry.api_capabilities[identifier] = capability
131
132 config.add_directive('add_api_capability', add_api_capability)
133 config.registry.api_capabilities = {}
134
135 # Resource events helpers.
136 config.add_request_method(events.get_resource_events,
137 name='get_resource_events')
138 config.add_request_method(events.notify_resource_event,
139 name='notify_resource_event')
140
141 # Setup cornice.
142 config.include("cornice")
143
144 # Per-request transaction.
145 config.include("pyramid_tm")
146
147 # Add CORS settings to the base kinto.core Service class.
148 Service.init_from_settings(settings)
149
150 # Setup components.
151 for step in aslist(settings['initialization_sequence']):
152 step_func = config.maybe_dotted(step)
153 step_func(config)
154
155 # Custom helpers.
156 config.add_request_method(follow_subrequest)
157 config.add_request_method(prefixed_userid, property=True)
158 config.add_request_method(prefixed_principals, reify=True)
159 config.add_request_method(lambda r: {
160 'id': r.prefixed_userid,
161 'principals': r.prefixed_principals},
162 name='get_user_info')
163 config.add_request_method(current_resource_name, reify=True)
164 config.add_request_method(current_service, reify=True)
165 config.commit()
166
167 # Include plugins after init, unlike pyramid includes.
168 includes = aslist(settings['includes'])
169 for app in includes:
170 config.include(app)
171
172 # # Show settings to output.
173 # for key, value in settings.items():
174 # logger.info('Using %s = %s' % (key, value))
175
176 # Scan views.
177 config.scan("kinto.core.views")
178
179 # Give sign of life.
180 msg = "Running %(project_name)s %(project_version)s."
181 logger.info(msg % settings)
182
[end of kinto/core/__init__.py]
[start of kinto/core/cache/memory.py]
1 from kinto.core.cache import CacheBase
2 from kinto.core.utils import msec_time, synchronized
3
4
5 class Cache(CacheBase):
6 """Cache backend implementation in local process memory.
7
8 Enable in configuration::
9
10 kinto.cache_backend = kinto.core.cache.memory
11
12 :noindex:
13 """
14
15 def __init__(self, *args, **kwargs):
16 super(Cache, self).__init__(*args, **kwargs)
17 self.flush()
18
19 def initialize_schema(self, dry_run=False):
20 # Nothing to do.
21 pass
22
23 def flush(self):
24 self._ttl = {}
25 self._store = {}
26
27 @synchronized
28 def ttl(self, key):
29 ttl = self._ttl.get(self.prefix + key)
30 if ttl is not None:
31 return (ttl - msec_time()) / 1000.0
32 return -1
33
34 @synchronized
35 def expire(self, key, ttl):
36 self._ttl[self.prefix + key] = msec_time() + int(ttl * 1000.0)
37
38 @synchronized
39 def set(self, key, value, ttl=None):
40 if ttl is not None:
41 self.expire(key, ttl)
42 self._store[self.prefix + key] = value
43
44 @synchronized
45 def get(self, key):
46 current = msec_time()
47 expired = [k for k, v in self._ttl.items() if current >= v]
48 for expired_item_key in expired:
49 self.delete(expired_item_key[len(self.prefix):])
50 return self._store.get(self.prefix + key)
51
52 @synchronized
53 def delete(self, key):
54 key = self.prefix + key
55 self._ttl.pop(key, None)
56 self._store.pop(key, None)
57
58
59 def load_from_config(config):
60 settings = config.get_settings()
61 return Cache(cache_prefix=settings['cache_prefix'])
62
[end of kinto/core/cache/memory.py]
[start of kinto/core/cache/__init__.py]
1 import random
2
3 from kinto.core.logs import logger
4
5
6 _HEARTBEAT_DELETE_RATE = 0.5
7 _HEARTBEAT_KEY = '__heartbeat__'
8 _HEARTBEAT_TTL_SECONDS = 3600
9
10
11 class CacheBase(object):
12
13 def __init__(self, *args, **kwargs):
14 self.prefix = kwargs['cache_prefix']
15 pass
16
17 def initialize_schema(self, dry_run=False):
18 """Create every necessary objects (like tables or indices) in the
19 backend.
20
21 This is executed when the ``kinto migrate`` command is run.
22
23 :param bool dry_run: simulate instead of executing the operations.
24 """
25 raise NotImplementedError
26
27 def flush(self):
28 """Delete every values."""
29 raise NotImplementedError
30
31 def ttl(self, key):
32 """Obtain the expiration value of the specified `key`.
33
34 :param str key: key
35 :returns: number of seconds or negative if no TTL.
36 :rtype: float
37 """
38 raise NotImplementedError
39
40 def expire(self, key, ttl):
41 """Set the expiration value `ttl` for the specified `key`.
42
43 :param str key: key
44 :param float ttl: number of seconds
45 """
46 raise NotImplementedError
47
48 def set(self, key, value, ttl=None):
49 """Store a value with the specified `key`. If `ttl` is provided,
50 set an expiration value.
51
52 :param str key: key
53 :param str value: value to store
54 :param float ttl: expire after number of seconds
55 """
56 raise NotImplementedError
57
58 def get(self, key):
59 """Obtain the value of the specified `key`.
60
61 :param str key: key
62 :returns: the stored value or None if missing.
63 :rtype: str
64 """
65 raise NotImplementedError
66
67 def delete(self, key):
68 """Delete the value of the specified `key`.
69
70 :param str key: key
71 """
72 raise NotImplementedError
73
74
75 def heartbeat(backend):
76 def ping(request):
77 """Test that cache backend is operationnal.
78
79 :param request: current request object
80 :type request: :class:`~pyramid:pyramid.request.Request`
81 :returns: ``True`` is everything is ok, ``False`` otherwise.
82 :rtype: bool
83 """
84 # No specific case for readonly mode because the cache should
85 # continue to work in that mode.
86 try:
87 if random.random() < _HEARTBEAT_DELETE_RATE:
88 backend.delete(_HEARTBEAT_KEY)
89 else:
90 backend.set(_HEARTBEAT_KEY, 'alive', _HEARTBEAT_TTL_SECONDS)
91 return True
92 except:
93 logger.exception("Heartbeat Failure")
94 return False
95
96 return ping
97
[end of kinto/core/cache/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py
--- a/kinto/core/__init__.py
+++ b/kinto/core/__init__.py
@@ -27,6 +27,7 @@
'cache_url': '',
'cache_pool_size': 25,
'cache_prefix': '',
+ 'cache_max_size_bytes': 524288,
'cors_origins': '*',
'cors_max_age_seconds': 3600,
'eos': None,
diff --git a/kinto/core/cache/__init__.py b/kinto/core/cache/__init__.py
--- a/kinto/core/cache/__init__.py
+++ b/kinto/core/cache/__init__.py
@@ -12,7 +12,7 @@
def __init__(self, *args, **kwargs):
self.prefix = kwargs['cache_prefix']
- pass
+ self.max_size_bytes = kwargs.get('cache_max_size_bytes')
def initialize_schema(self, dry_run=False):
"""Create every necessary objects (like tables or indices) in the
diff --git a/kinto/core/cache/memory.py b/kinto/core/cache/memory.py
--- a/kinto/core/cache/memory.py
+++ b/kinto/core/cache/memory.py
@@ -21,8 +21,25 @@
pass
def flush(self):
+ self._created_at = {}
self._ttl = {}
self._store = {}
+ self._quota = 0
+
+ def _clean_expired(self):
+ current = msec_time()
+ expired = [k for k, v in self._ttl.items() if current >= v]
+ for expired_item_key in expired:
+ self.delete(expired_item_key[len(self.prefix):])
+
+ def _clean_oversized(self):
+ if self._quota < self.max_size_bytes:
+ return
+
+ for key, value in sorted(self._created_at.items(), key=lambda k: k[1]):
+ if self._quota < (self.max_size_bytes * 0.8):
+ break
+ self.delete(key[len(self.prefix):])
@synchronized
def ttl(self, key):
@@ -37,25 +54,36 @@
@synchronized
def set(self, key, value, ttl=None):
+ self._clean_expired()
+ self._clean_oversized()
if ttl is not None:
self.expire(key, ttl)
- self._store[self.prefix + key] = value
+ item_key = self.prefix + key
+ self._store[item_key] = value
+ self._created_at[item_key] = msec_time()
+ self._quota += size_of(item_key, value)
@synchronized
def get(self, key):
- current = msec_time()
- expired = [k for k, v in self._ttl.items() if current >= v]
- for expired_item_key in expired:
- self.delete(expired_item_key[len(self.prefix):])
+ self._clean_expired()
return self._store.get(self.prefix + key)
@synchronized
def delete(self, key):
key = self.prefix + key
self._ttl.pop(key, None)
- self._store.pop(key, None)
+ self._created_at.pop(key, None)
+ value = self._store.pop(key, None)
+ self._quota -= size_of(key, value)
def load_from_config(config):
settings = config.get_settings()
- return Cache(cache_prefix=settings['cache_prefix'])
+ return Cache(cache_prefix=settings['cache_prefix'],
+ cache_max_size_bytes=settings['cache_max_size_bytes'])
+
+
+def size_of(key, value):
+ # Key used for ttl, created_at and store.
+ # Int size is 24 bytes one for ttl and one for created_at values
+ return len(key) * 3 + len(str(value)) + 24 * 2
| {"golden_diff": "diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py\n--- a/kinto/core/__init__.py\n+++ b/kinto/core/__init__.py\n@@ -27,6 +27,7 @@\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n+ 'cache_max_size_bytes': 524288,\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\ndiff --git a/kinto/core/cache/__init__.py b/kinto/core/cache/__init__.py\n--- a/kinto/core/cache/__init__.py\n+++ b/kinto/core/cache/__init__.py\n@@ -12,7 +12,7 @@\n \n def __init__(self, *args, **kwargs):\n self.prefix = kwargs['cache_prefix']\n- pass\n+ self.max_size_bytes = kwargs.get('cache_max_size_bytes')\n \n def initialize_schema(self, dry_run=False):\n \"\"\"Create every necessary objects (like tables or indices) in the\ndiff --git a/kinto/core/cache/memory.py b/kinto/core/cache/memory.py\n--- a/kinto/core/cache/memory.py\n+++ b/kinto/core/cache/memory.py\n@@ -21,8 +21,25 @@\n pass\n \n def flush(self):\n+ self._created_at = {}\n self._ttl = {}\n self._store = {}\n+ self._quota = 0\n+\n+ def _clean_expired(self):\n+ current = msec_time()\n+ expired = [k for k, v in self._ttl.items() if current >= v]\n+ for expired_item_key in expired:\n+ self.delete(expired_item_key[len(self.prefix):])\n+\n+ def _clean_oversized(self):\n+ if self._quota < self.max_size_bytes:\n+ return\n+\n+ for key, value in sorted(self._created_at.items(), key=lambda k: k[1]):\n+ if self._quota < (self.max_size_bytes * 0.8):\n+ break\n+ self.delete(key[len(self.prefix):])\n \n @synchronized\n def ttl(self, key):\n@@ -37,25 +54,36 @@\n \n @synchronized\n def set(self, key, value, ttl=None):\n+ self._clean_expired()\n+ self._clean_oversized()\n if ttl is not None:\n self.expire(key, ttl)\n- self._store[self.prefix + key] = value\n+ item_key = self.prefix + key\n+ self._store[item_key] = value\n+ self._created_at[item_key] = msec_time()\n+ self._quota += size_of(item_key, value)\n \n @synchronized\n def get(self, key):\n- current = msec_time()\n- expired = [k for k, v in self._ttl.items() if current >= v]\n- for expired_item_key in expired:\n- self.delete(expired_item_key[len(self.prefix):])\n+ self._clean_expired()\n return self._store.get(self.prefix + key)\n \n @synchronized\n def delete(self, key):\n key = self.prefix + key\n self._ttl.pop(key, None)\n- self._store.pop(key, None)\n+ self._created_at.pop(key, None)\n+ value = self._store.pop(key, None)\n+ self._quota -= size_of(key, value)\n \n \n def load_from_config(config):\n settings = config.get_settings()\n- return Cache(cache_prefix=settings['cache_prefix'])\n+ return Cache(cache_prefix=settings['cache_prefix'],\n+ cache_max_size_bytes=settings['cache_max_size_bytes'])\n+\n+\n+def size_of(key, value):\n+ # Key used for ttl, created_at and store.\n+ # Int size is 24 bytes one for ttl and one for created_at values\n+ return len(key) * 3 + len(str(value)) + 24 * 2\n", "issue": "Add limit for cache size (especially memory)\noriginal: https://github.com/mozilla-services/cliquet/issues/406\n\nThat's an improvement so we control the cache size. Otherwise it's a free wheel that eventually crashes the server. \n\nWhich makes me think: if I do a load test on stage today, is the kinto process growing indefinitely in memory ?\n\n", "before_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport pkg_resources\n\nfrom cornice import Service as CorniceService\nfrom pyramid.settings import aslist\n\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name,\n prefixed_userid, prefixed_principals)\nfrom kinto.core.logs import logger\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'heartbeat_timeout_seconds': 10,\n 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': '/tmp',\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_backend': 'kinto.core.statsd',\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_json_path': 'version.json',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy')\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, request):\n return errors.json_error_handler(request)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '%s' API capability was already registered (%s).\"\n raise ValueError(error_msg % (identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(follow_subrequest)\n config.add_request_method(prefixed_userid, property=True)\n config.add_request_method(prefixed_principals, reify=True)\n config.add_request_method(lambda r: {\n 'id': r.prefixed_userid,\n 'principals': r.prefixed_principals},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using %s = %s' % (key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"Running %(project_name)s %(project_version)s.\"\n logger.info(msg % settings)\n", "path": "kinto/core/__init__.py"}, {"content": "from kinto.core.cache import CacheBase\nfrom kinto.core.utils import msec_time, synchronized\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend implementation in local process memory.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.memory\n\n :noindex:\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Cache, self).__init__(*args, **kwargs)\n self.flush()\n\n def initialize_schema(self, dry_run=False):\n # Nothing to do.\n pass\n\n def flush(self):\n self._ttl = {}\n self._store = {}\n\n @synchronized\n def ttl(self, key):\n ttl = self._ttl.get(self.prefix + key)\n if ttl is not None:\n return (ttl - msec_time()) / 1000.0\n return -1\n\n @synchronized\n def expire(self, key, ttl):\n self._ttl[self.prefix + key] = msec_time() + int(ttl * 1000.0)\n\n @synchronized\n def set(self, key, value, ttl=None):\n if ttl is not None:\n self.expire(key, ttl)\n self._store[self.prefix + key] = value\n\n @synchronized\n def get(self, key):\n current = msec_time()\n expired = [k for k, v in self._ttl.items() if current >= v]\n for expired_item_key in expired:\n self.delete(expired_item_key[len(self.prefix):])\n return self._store.get(self.prefix + key)\n\n @synchronized\n def delete(self, key):\n key = self.prefix + key\n self._ttl.pop(key, None)\n self._store.pop(key, None)\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n return Cache(cache_prefix=settings['cache_prefix'])\n", "path": "kinto/core/cache/memory.py"}, {"content": "import random\n\nfrom kinto.core.logs import logger\n\n\n_HEARTBEAT_DELETE_RATE = 0.5\n_HEARTBEAT_KEY = '__heartbeat__'\n_HEARTBEAT_TTL_SECONDS = 3600\n\n\nclass CacheBase(object):\n\n def __init__(self, *args, **kwargs):\n self.prefix = kwargs['cache_prefix']\n pass\n\n def initialize_schema(self, dry_run=False):\n \"\"\"Create every necessary objects (like tables or indices) in the\n backend.\n\n This is executed when the ``kinto migrate`` command is run.\n\n :param bool dry_run: simulate instead of executing the operations.\n \"\"\"\n raise NotImplementedError\n\n def flush(self):\n \"\"\"Delete every values.\"\"\"\n raise NotImplementedError\n\n def ttl(self, key):\n \"\"\"Obtain the expiration value of the specified `key`.\n\n :param str key: key\n :returns: number of seconds or negative if no TTL.\n :rtype: float\n \"\"\"\n raise NotImplementedError\n\n def expire(self, key, ttl):\n \"\"\"Set the expiration value `ttl` for the specified `key`.\n\n :param str key: key\n :param float ttl: number of seconds\n \"\"\"\n raise NotImplementedError\n\n def set(self, key, value, ttl=None):\n \"\"\"Store a value with the specified `key`. If `ttl` is provided,\n set an expiration value.\n\n :param str key: key\n :param str value: value to store\n :param float ttl: expire after number of seconds\n \"\"\"\n raise NotImplementedError\n\n def get(self, key):\n \"\"\"Obtain the value of the specified `key`.\n\n :param str key: key\n :returns: the stored value or None if missing.\n :rtype: str\n \"\"\"\n raise NotImplementedError\n\n def delete(self, key):\n \"\"\"Delete the value of the specified `key`.\n\n :param str key: key\n \"\"\"\n raise NotImplementedError\n\n\ndef heartbeat(backend):\n def ping(request):\n \"\"\"Test that cache backend is operationnal.\n\n :param request: current request object\n :type request: :class:`~pyramid:pyramid.request.Request`\n :returns: ``True`` is everything is ok, ``False`` otherwise.\n :rtype: bool\n \"\"\"\n # No specific case for readonly mode because the cache should\n # continue to work in that mode.\n try:\n if random.random() < _HEARTBEAT_DELETE_RATE:\n backend.delete(_HEARTBEAT_KEY)\n else:\n backend.set(_HEARTBEAT_KEY, 'alive', _HEARTBEAT_TTL_SECONDS)\n return True\n except:\n logger.exception(\"Heartbeat Failure\")\n return False\n\n return ping\n", "path": "kinto/core/cache/__init__.py"}]} | 3,859 | 874 |
gh_patches_debug_16511 | rasdani/github-patches | git_diff | cupy__cupy-7405 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop support for Python 3.7, NumPy 1.20, and SciPy 1.6 on document and setup.py
#7405 has some CI issues, so we update the documentation and setup.py first for the next release.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 import glob
4 import os
5 from setuptools import setup, find_packages
6 import sys
7
8 source_root = os.path.abspath(os.path.dirname(__file__))
9 sys.path.append(os.path.join(source_root, 'install'))
10
11 import cupy_builder # NOQA
12 from cupy_builder import cupy_setup_build # NOQA
13
14 ctx = cupy_builder.Context(source_root)
15 cupy_builder.initialize(ctx)
16 if not cupy_builder.preflight_check(ctx):
17 sys.exit(1)
18
19
20 # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)
21 setup_requires = [
22 'Cython>=0.29.22,<3',
23 'fastrlock>=0.5',
24 ]
25 install_requires = [
26 'numpy>=1.20,<1.27', # see #4773
27 'fastrlock>=0.5',
28 ]
29 extras_require = {
30 'all': [
31 'scipy>=1.6,<1.12', # see #4773
32 'Cython>=0.29.22,<3',
33 'optuna>=2.0',
34 ],
35 # TODO(kmaehashi): remove stylecheck and update the contribution guide
36 'stylecheck': [
37 'autopep8==1.5.5',
38 'flake8==3.8.4',
39 'pbr==5.5.1',
40 'pycodestyle==2.6.0',
41
42 'mypy==0.950',
43 'types-setuptools==57.4.14',
44 ],
45 'test': [
46 # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.
47 # pytest < 7.2 has some different behavior that makes our CI fail
48 'pytest>=7.2',
49 'hypothesis>=6.37.2,<6.55.0',
50 ],
51 }
52 tests_require = extras_require['test']
53
54
55 # List of files that needs to be in the distribution (sdist/wheel).
56 # Notes:
57 # - Files only needed in sdist should be added to `MANIFEST.in`.
58 # - The following glob (`**`) ignores items starting with `.`.
59 cupy_package_data = [
60 'cupy/cuda/cupy_thrust.cu',
61 'cupy/cuda/cupy_cub.cu',
62 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback
63 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback
64 'cupy/cuda/cupy_cufft.h', # for cuFFT callback
65 'cupy/cuda/cufft.pxd', # for cuFFT callback
66 'cupy/cuda/cufft.pyx', # for cuFFT callback
67 'cupy/random/cupy_distributions.cu',
68 'cupy/random/cupy_distributions.cuh',
69 ] + [
70 x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)
71 if os.path.isfile(x)
72 ]
73
74 package_data = {
75 'cupy': [
76 os.path.relpath(x, 'cupy') for x in cupy_package_data
77 ],
78 }
79
80 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)
81
82
83 if len(sys.argv) < 2 or sys.argv[1] == 'egg_info':
84 # Extensions are unnecessary for egg_info generation as all sources files
85 # can be enumerated via MANIFEST.in.
86 ext_modules = []
87 else:
88 ext_modules = cupy_setup_build.get_ext_modules(True, ctx)
89
90
91 # Get __version__ variable
92 with open(os.path.join(source_root, 'cupy', '_version.py')) as f:
93 exec(f.read())
94
95 long_description = None
96 if ctx.long_description_path is not None:
97 with open(ctx.long_description_path) as f:
98 long_description = f.read()
99
100
101 CLASSIFIERS = """\
102 Development Status :: 5 - Production/Stable
103 Intended Audience :: Science/Research
104 Intended Audience :: Developers
105 License :: OSI Approved :: MIT License
106 Programming Language :: Python
107 Programming Language :: Python :: 3
108 Programming Language :: Python :: 3.7
109 Programming Language :: Python :: 3.8
110 Programming Language :: Python :: 3.9
111 Programming Language :: Python :: 3.10
112 Programming Language :: Python :: 3.11
113 Programming Language :: Python :: 3 :: Only
114 Programming Language :: Cython
115 Topic :: Software Development
116 Topic :: Scientific/Engineering
117 Operating System :: POSIX
118 Operating System :: Microsoft :: Windows
119 """
120
121
122 setup(
123 name=ctx.package_name,
124 version=__version__, # NOQA
125 description='CuPy: NumPy & SciPy for GPU',
126 long_description=long_description,
127 author='Seiya Tokui',
128 author_email='[email protected]',
129 maintainer='CuPy Developers',
130 url='https://cupy.dev/',
131 license='MIT License',
132 project_urls={
133 "Bug Tracker": "https://github.com/cupy/cupy/issues",
134 "Documentation": "https://docs.cupy.dev/",
135 "Source Code": "https://github.com/cupy/cupy",
136 },
137 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
138 packages=find_packages(exclude=['install', 'tests']),
139 package_data=package_data,
140 zip_safe=False,
141 python_requires='>=3.7',
142 setup_requires=setup_requires,
143 install_requires=install_requires,
144 tests_require=tests_require,
145 extras_require=extras_require,
146 ext_modules=ext_modules,
147 cmdclass={'build_ext': cupy_builder._command.custom_build_ext},
148 )
149
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -105,7 +105,6 @@
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 3
-Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
@@ -138,7 +137,7 @@
packages=find_packages(exclude=['install', 'tests']),
package_data=package_data,
zip_safe=False,
- python_requires='>=3.7',
+ python_requires='>=3.8',
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -105,7 +105,6 @@\n License :: OSI Approved :: MIT License\n Programming Language :: Python\n Programming Language :: Python :: 3\n-Programming Language :: Python :: 3.7\n Programming Language :: Python :: 3.8\n Programming Language :: Python :: 3.9\n Programming Language :: Python :: 3.10\n@@ -138,7 +137,7 @@\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n- python_requires='>=3.7',\n+ python_requires='>=3.8',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n", "issue": "Drop support for Python 3.7, NumPy 1.20, and SciPy 1.6 on document and setup.py\n#7405 has some CI issues, so we update the documentation and setup.py first for the next release.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nsource_root = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(source_root, 'install'))\n\nimport cupy_builder # NOQA\nfrom cupy_builder import cupy_setup_build # NOQA\n\nctx = cupy_builder.Context(source_root)\ncupy_builder.initialize(ctx)\nif not cupy_builder.preflight_check(ctx):\n sys.exit(1)\n\n\n# TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\nsetup_requires = [\n 'Cython>=0.29.22,<3',\n 'fastrlock>=0.5',\n]\ninstall_requires = [\n 'numpy>=1.20,<1.27', # see #4773\n 'fastrlock>=0.5',\n]\nextras_require = {\n 'all': [\n 'scipy>=1.6,<1.12', # see #4773\n 'Cython>=0.29.22,<3',\n 'optuna>=2.0',\n ],\n # TODO(kmaehashi): remove stylecheck and update the contribution guide\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n\n 'mypy==0.950',\n 'types-setuptools==57.4.14',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n # pytest < 7.2 has some different behavior that makes our CI fail\n 'pytest>=7.2',\n 'hypothesis>=6.37.2,<6.55.0',\n ],\n}\ntests_require = extras_require['test']\n\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)\n\n\nif len(sys.argv) < 2 or sys.argv[1] == 'egg_info':\n # Extensions are unnecessary for egg_info generation as all sources files\n # can be enumerated via MANIFEST.in.\n ext_modules = []\nelse:\n ext_modules = cupy_setup_build.get_ext_modules(True, ctx)\n\n\n# Get __version__ variable\nwith open(os.path.join(source_root, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nlong_description = None\nif ctx.long_description_path is not None:\n with open(ctx.long_description_path) as f:\n long_description = f.read()\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3.10\nProgramming Language :: Python :: 3.11\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=ctx.package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy & SciPy for GPU',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n maintainer='CuPy Developers',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.7',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': cupy_builder._command.custom_build_ext},\n)\n", "path": "setup.py"}]} | 2,176 | 176 |
gh_patches_debug_13939 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1639 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add new method to return the columns for MySQL datasources :electric_plug: :1234:
When MindsDB creates a new MySQL datasource we get information for columns by fetching all datasources. The problem here is that if datasource is big it takes a lot of time. We need a new get_columns method to return the columns name per datasource. The PR should include this method inside the MySQL class .
## Steps :male_detective: :female_detective:
- Implement in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51
- Push to staging branch
## Additional rewards :1st_place_medal:
Each code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/
</issue>
<code>
[start of mindsdb/integrations/mysql/mysql.py]
1 import os
2 import shutil
3 import tempfile
4
5 from contextlib import closing
6 import mysql.connector
7
8 from lightwood.api import dtype
9 from mindsdb.integrations.base import Integration
10 from mindsdb.utilities.log import log
11
12
13 class MySQLConnectionChecker:
14 def __init__(self, **kwargs):
15 self.host = kwargs.get('host')
16 self.port = kwargs.get('port')
17 self.user = kwargs.get('user')
18 self.password = kwargs.get('password')
19 self.ssl = kwargs.get('ssl')
20 self.ssl_ca = kwargs.get('ssl_ca')
21 self.ssl_cert = kwargs.get('ssl_cert')
22 self.ssl_key = kwargs.get('ssl_key')
23
24 def _get_connnection(self):
25 config = {
26 "host": self.host,
27 "port": self.port,
28 "user": self.user,
29 "password": self.password
30 }
31 if self.ssl is True:
32 config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]
33 if self.ssl_ca is not None:
34 config["ssl_ca"] = self.ssl_ca
35 if self.ssl_cert is not None:
36 config["ssl_cert"] = self.ssl_cert
37 if self.ssl_key is not None:
38 config["ssl_key"] = self.ssl_key
39 return mysql.connector.connect(**config)
40
41 def check_connection(self):
42 try:
43 con = self._get_connnection()
44 with closing(con) as con:
45 connected = con.is_connected()
46 except Exception:
47 connected = False
48 return connected
49
50
51 class MySQL(Integration, MySQLConnectionChecker):
52 def __init__(self, config, name, db_info):
53 super().__init__(config, name)
54 self.user = db_info.get('user')
55 self.password = db_info.get('password')
56 self.host = db_info.get('host')
57 self.port = db_info.get('port')
58 self.ssl = db_info.get('ssl')
59 self.ssl_ca = db_info.get('ssl_ca')
60 self.ssl_cert = db_info.get('ssl_cert')
61 self.ssl_key = db_info.get('ssl_key')
62
63 def _to_mysql_table(self, dtype_dict, predicted_cols, columns):
64 subtype_map = {
65 dtype.integer: 'int',
66 dtype.float: 'double',
67 dtype.binary: 'bool',
68 dtype.date: 'Date',
69 dtype.datetime: 'Datetime',
70 dtype.binary: 'VARCHAR(500)',
71 dtype.categorical: 'VARCHAR(500)',
72 dtype.tags: 'VARCHAR(500)',
73 dtype.image: 'VARCHAR(500)',
74 dtype.video: 'VARCHAR(500)',
75 dtype.audio: 'VARCHAR(500)',
76 dtype.short_text: 'VARCHAR(500)',
77 dtype.rich_text: 'VARCHAR(500)',
78 dtype.array: 'VARCHAR(500)'
79 }
80
81 column_declaration = []
82 for name in columns:
83 try:
84 col_subtype = dtype_dict[name]
85 new_type = subtype_map[col_subtype]
86 column_declaration.append(f' `{name}` {new_type} ')
87 if name in predicted_cols:
88 column_declaration.append(f' `{name}_original` {new_type} ')
89 except Exception as e:
90 log.error(f'Error: can not determine mysql data type for column {name}: {e}')
91
92 return column_declaration
93
94 def _escape_table_name(self, name):
95 return '`' + name.replace('`', '``') + '`'
96
97 def _query(self, query):
98 con = self._get_connnection()
99 with closing(con) as con:
100 cur = con.cursor(dictionary=True, buffered=True)
101 cur.execute(query)
102 res = True
103 try:
104 res = cur.fetchall()
105 except Exception:
106 pass
107 con.commit()
108
109 return res
110
111 def _get_connect_string(self, table):
112 user = f"{self.config['api']['mysql']['user']}_{self.name}"
113 password = self.config['api']['mysql']['password']
114 host = self.config['api']['mysql']['host']
115 port = self.config['api']['mysql']['port']
116
117 if password is None or password == '':
118 connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'
119 else:
120 connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'
121
122 return connect
123
124 def setup(self):
125 self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')
126 self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')
127
128 connect = self._get_connect_string('predictors')
129
130 q = f"""
131 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (
132 name VARCHAR(500),
133 status VARCHAR(500),
134 accuracy VARCHAR(500),
135 predict VARCHAR(500),
136 select_data_query VARCHAR(500),
137 external_datasource VARCHAR(500),
138 training_options VARCHAR(500),
139 key name_key (name)
140 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
141 """
142 self._query(q)
143
144 connect = self._get_connect_string('commands')
145
146 q = f"""
147 CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (
148 command VARCHAR(500),
149 key command_key (command)
150 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
151 """
152 self._query(q)
153
154 def register_predictors(self, model_data_arr):
155 for model_meta in model_data_arr:
156 name = model_meta['name']
157 predict = model_meta['predict']
158 if not isinstance(predict, list):
159 predict = [predict]
160 columns_sql = ','.join(self._to_mysql_table(
161 model_meta['dtype_dict'],
162 predict,
163 list(model_meta['dtype_dict'].keys())
164 ))
165 columns_sql += ',`when_data` varchar(500)'
166 columns_sql += ',`select_data_query` varchar(500)'
167 columns_sql += ',`external_datasource` varchar(500)'
168 for col in predict:
169 columns_sql += f',`{col}_confidence` double'
170 if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):
171 columns_sql += f',`{col}_min` double'
172 columns_sql += f',`{col}_max` double'
173 columns_sql += f',`{col}_explain` varchar(500)'
174
175 connect = self._get_connect_string(name)
176
177 self.unregister_predictor(name)
178 q = f"""
179 CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (
180 {columns_sql},
181 index when_data_index (when_data),
182 index select_data_query_index (select_data_query),
183 index external_datasource_index (external_datasource)
184 ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';
185 """
186 self._query(q)
187
188 def unregister_predictor(self, name):
189 q = f"""
190 drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};
191 """
192 self._query(q)
193
194 def get_row_count(self, query):
195 q = f"""
196 SELECT COUNT(*) as count
197 FROM ({query}) as query;"""
198 result = self._query(q)
199 return result[0]['count']
200
201 def get_tables_list(self):
202 q= f"""
203 SHOW TABLES;
204 """
205 result = self._query(q)
206 return result
207
[end of mindsdb/integrations/mysql/mysql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py
--- a/mindsdb/integrations/mysql/mysql.py
+++ b/mindsdb/integrations/mysql/mysql.py
@@ -197,10 +197,19 @@
FROM ({query}) as query;"""
result = self._query(q)
return result[0]['count']
+
+ def get_columns(self):
+ q = f"""SELECT COLUMN_NAME ,TABLE_NAME
+ FROM INFORMATION_SCHEMA.COLUMNS
+ WHERE TABLE_SCHEMA = database()
+ ORDER BY COLUMN_NAME, TABLE_NAME;"""
+ columns_list = self._query(q)
+ columns = [f"{columns[0]}.{columns[1]}" for columns in columns_list]
+ return columns
def get_tables_list(self):
q= f"""
SHOW TABLES;
"""
result = self._query(q)
- return result
+ return result
\ No newline at end of file
| {"golden_diff": "diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py\n--- a/mindsdb/integrations/mysql/mysql.py\n+++ b/mindsdb/integrations/mysql/mysql.py\n@@ -197,10 +197,19 @@\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n+\n+ def get_columns(self):\n+ q = f\"\"\"SELECT COLUMN_NAME ,TABLE_NAME\n+ FROM INFORMATION_SCHEMA.COLUMNS \n+ WHERE TABLE_SCHEMA = database()\n+ ORDER BY COLUMN_NAME, TABLE_NAME;\"\"\"\n+ columns_list = self._query(q)\n+ columns = [f\"{columns[0]}.{columns[1]}\" for columns in columns_list]\n+ return columns\n \n def get_tables_list(self):\n q= f\"\"\"\n SHOW TABLES;\n \"\"\"\n result = self._query(q)\n- return result\n+ return result\n\\ No newline at end of file\n", "issue": "Add new method to return the columns for MySQL datasources :electric_plug: :1234: \nWhen MindsDB creates a new MySQL datasource we get information for columns by fetching all datasources. The problem here is that if datasource is big it takes a lot of time. We need a new get_columns method to return the columns name per datasource. The PR should include this method inside the MySQL class .\r\n\r\n## Steps :male_detective: :female_detective: \r\n\r\n- Implement in https://github.com/mindsdb/mindsdb/blob/stable/mindsdb/integrations/mysql/mysql.py#L51\r\n- Push to staging branch\r\n\r\n## Additional rewards :1st_place_medal: \r\n\r\nEach code PR brings :three: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/\r\n \r\n\r\n\n", "before_files": [{"content": "import os\nimport shutil\nimport tempfile\n\nfrom contextlib import closing\nimport mysql.connector\n\nfrom lightwood.api import dtype\nfrom mindsdb.integrations.base import Integration\nfrom mindsdb.utilities.log import log\n\n\nclass MySQLConnectionChecker:\n def __init__(self, **kwargs):\n self.host = kwargs.get('host')\n self.port = kwargs.get('port')\n self.user = kwargs.get('user')\n self.password = kwargs.get('password')\n self.ssl = kwargs.get('ssl')\n self.ssl_ca = kwargs.get('ssl_ca')\n self.ssl_cert = kwargs.get('ssl_cert')\n self.ssl_key = kwargs.get('ssl_key')\n\n def _get_connnection(self):\n config = {\n \"host\": self.host,\n \"port\": self.port,\n \"user\": self.user,\n \"password\": self.password\n }\n if self.ssl is True:\n config['client_flags'] = [mysql.connector.constants.ClientFlag.SSL]\n if self.ssl_ca is not None:\n config[\"ssl_ca\"] = self.ssl_ca\n if self.ssl_cert is not None:\n config[\"ssl_cert\"] = self.ssl_cert\n if self.ssl_key is not None:\n config[\"ssl_key\"] = self.ssl_key\n return mysql.connector.connect(**config)\n\n def check_connection(self):\n try:\n con = self._get_connnection()\n with closing(con) as con:\n connected = con.is_connected()\n except Exception:\n connected = False\n return connected\n\n\nclass MySQL(Integration, MySQLConnectionChecker):\n def __init__(self, config, name, db_info):\n super().__init__(config, name)\n self.user = db_info.get('user')\n self.password = db_info.get('password')\n self.host = db_info.get('host')\n self.port = db_info.get('port')\n self.ssl = db_info.get('ssl')\n self.ssl_ca = db_info.get('ssl_ca')\n self.ssl_cert = db_info.get('ssl_cert')\n self.ssl_key = db_info.get('ssl_key')\n\n def _to_mysql_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'int',\n dtype.float: 'double',\n dtype.binary: 'bool',\n dtype.date: 'Date',\n dtype.datetime: 'Datetime',\n dtype.binary: 'VARCHAR(500)',\n dtype.categorical: 'VARCHAR(500)',\n dtype.tags: 'VARCHAR(500)',\n dtype.image: 'VARCHAR(500)',\n dtype.video: 'VARCHAR(500)',\n dtype.audio: 'VARCHAR(500)',\n dtype.short_text: 'VARCHAR(500)',\n dtype.rich_text: 'VARCHAR(500)',\n dtype.array: 'VARCHAR(500)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map[col_subtype]\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_original` {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine mysql data type for column {name}: {e}')\n\n return column_declaration\n\n def _escape_table_name(self, name):\n return '`' + name.replace('`', '``') + '`'\n\n def _query(self, query):\n con = self._get_connnection()\n with closing(con) as con:\n cur = con.cursor(dictionary=True, buffered=True)\n cur.execute(query)\n res = True\n try:\n res = cur.fetchall()\n except Exception:\n pass\n con.commit()\n\n return res\n\n def _get_connect_string(self, table):\n user = f\"{self.config['api']['mysql']['user']}_{self.name}\"\n password = self.config['api']['mysql']['password']\n host = self.config['api']['mysql']['host']\n port = self.config['api']['mysql']['port']\n\n if password is None or password == '':\n connect = f'mysql://{user}@{host}:{port}/mindsdb/{table}'\n else:\n connect = f'mysql://{user}:{password}@{host}:{port}/mindsdb/{table}'\n\n return connect\n\n def setup(self):\n self._query(f'DROP DATABASE IF EXISTS {self.mindsdb_database}')\n self._query(f'CREATE DATABASE IF NOT EXISTS {self.mindsdb_database}')\n\n connect = self._get_connect_string('predictors')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (\n name VARCHAR(500),\n status VARCHAR(500),\n accuracy VARCHAR(500),\n predict VARCHAR(500),\n select_data_query VARCHAR(500),\n external_datasource VARCHAR(500),\n training_options VARCHAR(500),\n key name_key (name)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n connect = self._get_connect_string('commands')\n\n q = f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.mindsdb_database}.commands (\n command VARCHAR(500),\n key command_key (command)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def register_predictors(self, model_data_arr):\n for model_meta in model_data_arr:\n name = model_meta['name']\n predict = model_meta['predict']\n if not isinstance(predict, list):\n predict = [predict]\n columns_sql = ','.join(self._to_mysql_table(\n model_meta['dtype_dict'],\n predict,\n list(model_meta['dtype_dict'].keys())\n ))\n columns_sql += ',`when_data` varchar(500)'\n columns_sql += ',`select_data_query` varchar(500)'\n columns_sql += ',`external_datasource` varchar(500)'\n for col in predict:\n columns_sql += f',`{col}_confidence` double'\n if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):\n columns_sql += f',`{col}_min` double'\n columns_sql += f',`{col}_max` double'\n columns_sql += f',`{col}_explain` varchar(500)'\n\n connect = self._get_connect_string(name)\n\n self.unregister_predictor(name)\n q = f\"\"\"\n CREATE TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (\n {columns_sql},\n index when_data_index (when_data),\n index select_data_query_index (select_data_query),\n index external_datasource_index (external_datasource)\n ) ENGINE=FEDERATED CHARSET=utf8 CONNECTION='{connect}';\n \"\"\"\n self._query(q)\n\n def unregister_predictor(self, name):\n q = f\"\"\"\n drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};\n \"\"\"\n self._query(q)\n\n def get_row_count(self, query):\n q = f\"\"\" \n SELECT COUNT(*) as count\n FROM ({query}) as query;\"\"\"\n result = self._query(q)\n return result[0]['count']\n \n def get_tables_list(self):\n q= f\"\"\"\n SHOW TABLES;\n \"\"\"\n result = self._query(q)\n return result\n", "path": "mindsdb/integrations/mysql/mysql.py"}]} | 2,872 | 219 |
gh_patches_debug_13201 | rasdani/github-patches | git_diff | mesonbuild__meson-2541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Linker error while building gtkdoc
I'm trying to make GStreamer modules build with meson inside Cerbero. When it builds gtkdoc I get this error:
```
Error in gtkdoc helper script:
'gtkdoc-scangobj' failed with status 1
ld: unrecognized option '-Wl,-rpath,/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/'
ld: use the --help option for usage information
Linking of scanner failed:
```
The exact command line that produce that error is:
```
gtkdoc-scangobj --types=/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/docs/libs/gst-transcoder.types --module=gst-transcoder --cflags="-I/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/gst-libs -I/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/gst-libs -I/home/xclaesse/programmation/cerbero/build/dist/linux_x86_64/include/glib-2.0 -I/home/xclaesse/programmation/cerbero/build/dist/linux_x86_64/lib/glib-2.0/include -pthread -I/home/xclaesse/programmation/cerbero/build/dist/linux_x86_64/include/gstreamer-1.0" --ldflags="-L/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/ -Wl,-rpath,/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/ -lgsttranscoder-1.0 -L/home/xclaesse/programmation/cerbero/build/dist/linux_x86_64/lib -lglib-2.0 -lgobject-2.0 -lgstreamer-1.0 -lgstpbutils-1.0"
```
GStreamer's gtkdoc seems to build fine with meson when using gst-build, so I don't understand what's the difference when ran inside cerbero env.
Any clue?
</issue>
<code>
[start of mesonbuild/scripts/gtkdochelper.py]
1 # Copyright 2015-2016 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sys, os
16 import subprocess
17 import shutil
18 import argparse
19 from ..mesonlib import MesonException, Popen_safe
20 from . import destdir_join
21
22 parser = argparse.ArgumentParser()
23
24 parser.add_argument('--sourcedir', dest='sourcedir')
25 parser.add_argument('--builddir', dest='builddir')
26 parser.add_argument('--subdir', dest='subdir')
27 parser.add_argument('--headerdirs', dest='headerdirs')
28 parser.add_argument('--mainfile', dest='mainfile')
29 parser.add_argument('--modulename', dest='modulename')
30 parser.add_argument('--htmlargs', dest='htmlargs', default='')
31 parser.add_argument('--scanargs', dest='scanargs', default='')
32 parser.add_argument('--scanobjsargs', dest='scanobjsargs', default='')
33 parser.add_argument('--gobjects-types-file', dest='gobject_typesfile', default='')
34 parser.add_argument('--fixxrefargs', dest='fixxrefargs', default='')
35 parser.add_argument('--mkdbargs', dest='mkdbargs', default='')
36 parser.add_argument('--ld', dest='ld', default='')
37 parser.add_argument('--cc', dest='cc', default='')
38 parser.add_argument('--ldflags', dest='ldflags', default='')
39 parser.add_argument('--cflags', dest='cflags', default='')
40 parser.add_argument('--content-files', dest='content_files', default='')
41 parser.add_argument('--expand-content-files', dest='expand_content_files', default='')
42 parser.add_argument('--html-assets', dest='html_assets', default='')
43 parser.add_argument('--ignore-headers', dest='ignore_headers', default='')
44 parser.add_argument('--namespace', dest='namespace', default='')
45 parser.add_argument('--mode', dest='mode', default='')
46 parser.add_argument('--installdir', dest='install_dir')
47
48 def gtkdoc_run_check(cmd, cwd):
49 # Put stderr into stdout since we want to print it out anyway.
50 # This preserves the order of messages.
51 p, out = Popen_safe(cmd, cwd=cwd, stderr=subprocess.STDOUT)[0:2]
52 if p.returncode != 0:
53 err_msg = ["{!r} failed with status {:d}".format(cmd[0], p.returncode)]
54 if out:
55 err_msg.append(out)
56 raise MesonException('\n'.join(err_msg))
57
58 def build_gtkdoc(source_root, build_root, doc_subdir, src_subdirs,
59 main_file, module,
60 html_args, scan_args, fixxref_args, mkdb_args,
61 gobject_typesfile, scanobjs_args, ld, cc, ldflags, cflags,
62 html_assets, content_files, ignore_headers, namespace,
63 expand_content_files, mode):
64 print("Building documentation for %s" % module)
65
66 src_dir_args = []
67 for src_dir in src_subdirs:
68 if not os.path.isabs(src_dir):
69 dirs = [os.path.join(source_root, src_dir),
70 os.path.join(build_root, src_dir)]
71 else:
72 dirs = [src_dir]
73 src_dir_args += ['--source-dir=' + d for d in dirs]
74
75 doc_src = os.path.join(source_root, doc_subdir)
76 abs_out = os.path.join(build_root, doc_subdir)
77 htmldir = os.path.join(abs_out, 'html')
78
79 content_files += [main_file]
80 sections = os.path.join(doc_src, module + "-sections.txt")
81 if os.path.exists(sections):
82 content_files.append(sections)
83
84 overrides = os.path.join(doc_src, module + "-overrides.txt")
85 if os.path.exists(overrides):
86 content_files.append(overrides)
87
88 # Copy files to build directory
89 for f in content_files:
90 f_abs = os.path.join(doc_src, f)
91 shutil.copyfile(f_abs, os.path.join(
92 abs_out, os.path.basename(f_abs)))
93
94 shutil.rmtree(htmldir, ignore_errors=True)
95 try:
96 os.mkdir(htmldir)
97 except Exception:
98 pass
99
100 for f in html_assets:
101 f_abs = os.path.join(doc_src, f)
102 shutil.copyfile(f_abs, os.path.join(htmldir, os.path.basename(f_abs)))
103
104 scan_cmd = ['gtkdoc-scan', '--module=' + module] + src_dir_args
105 if ignore_headers:
106 scan_cmd.append('--ignore-headers=' + ' '.join(ignore_headers))
107 # Add user-specified arguments
108 scan_cmd += scan_args
109 gtkdoc_run_check(scan_cmd, abs_out)
110
111 if gobject_typesfile:
112 scanobjs_cmd = ['gtkdoc-scangobj'] + scanobjs_args + ['--types=' + gobject_typesfile,
113 '--module=' + module,
114 '--cflags=' + cflags,
115 '--ldflags=' + ldflags]
116
117 gtkdoc_run_check(scanobjs_cmd, abs_out)
118
119 # Make docbook files
120 if mode == 'auto':
121 # Guessing is probably a poor idea but these keeps compat
122 # with previous behavior
123 if main_file.endswith('sgml'):
124 modeflag = '--sgml-mode'
125 else:
126 modeflag = '--xml-mode'
127 elif mode == 'xml':
128 modeflag = '--xml-mode'
129 elif mode == 'sgml':
130 modeflag = '--sgml-mode'
131 else: # none
132 modeflag = None
133
134 mkdb_cmd = ['gtkdoc-mkdb',
135 '--module=' + module,
136 '--output-format=xml',
137 '--expand-content-files=' + ' '.join(expand_content_files),
138 ] + src_dir_args
139 if namespace:
140 mkdb_cmd.append('--name-space=' + namespace)
141 if modeflag:
142 mkdb_cmd.append(modeflag)
143 if len(main_file) > 0:
144 # Yes, this is the flag even if the file is in xml.
145 mkdb_cmd.append('--main-sgml-file=' + main_file)
146 # Add user-specified arguments
147 mkdb_cmd += mkdb_args
148 gtkdoc_run_check(mkdb_cmd, abs_out)
149
150 # Make HTML documentation
151 mkhtml_cmd = ['gtkdoc-mkhtml',
152 '--path=' + ':'.join((doc_src, abs_out)),
153 module,
154 ] + html_args
155 if len(main_file) > 0:
156 mkhtml_cmd.append('../' + main_file)
157 else:
158 mkhtml_cmd.append('%s-docs.xml' % module)
159 # html gen must be run in the HTML dir
160 gtkdoc_run_check(mkhtml_cmd, os.path.join(abs_out, 'html'))
161
162 # Fix cross-references in HTML files
163 fixref_cmd = ['gtkdoc-fixxref',
164 '--module=' + module,
165 '--module-dir=html'] + fixxref_args
166 gtkdoc_run_check(fixref_cmd, abs_out)
167
168 def install_gtkdoc(build_root, doc_subdir, install_prefix, datadir, module):
169 source = os.path.join(build_root, doc_subdir, 'html')
170 final_destination = os.path.join(install_prefix, datadir, module)
171 shutil.rmtree(final_destination, ignore_errors=True)
172 shutil.copytree(source, final_destination)
173
174 def run(args):
175 options = parser.parse_args(args)
176 if len(options.htmlargs) > 0:
177 htmlargs = options.htmlargs.split('@@')
178 else:
179 htmlargs = []
180 if len(options.scanargs) > 0:
181 scanargs = options.scanargs.split('@@')
182 else:
183 scanargs = []
184 if len(options.scanobjsargs) > 0:
185 scanobjsargs = options.scanobjsargs.split('@@')
186 else:
187 scanobjsargs = []
188 if len(options.fixxrefargs) > 0:
189 fixxrefargs = options.fixxrefargs.split('@@')
190 else:
191 fixxrefargs = []
192 if len(options.mkdbargs) > 0:
193 mkdbargs = options.mkdbargs.split('@@')
194 else:
195 mkdbargs = []
196 build_gtkdoc(
197 options.sourcedir,
198 options.builddir,
199 options.subdir,
200 options.headerdirs.split('@@'),
201 options.mainfile,
202 options.modulename,
203 htmlargs,
204 scanargs,
205 fixxrefargs,
206 mkdbargs,
207 options.gobject_typesfile,
208 scanobjsargs,
209 options.ld,
210 options.cc,
211 options.ldflags,
212 options.cflags,
213 options.html_assets.split('@@') if options.html_assets else [],
214 options.content_files.split('@@') if options.content_files else [],
215 options.ignore_headers.split('@@') if options.ignore_headers else [],
216 options.namespace,
217 options.expand_content_files.split('@@') if options.expand_content_files else [],
218 options.mode)
219
220 if 'MESON_INSTALL_PREFIX' in os.environ:
221 destdir = os.environ.get('DESTDIR', '')
222 install_prefix = destdir_join(destdir, os.environ['MESON_INSTALL_PREFIX'])
223 install_dir = options.install_dir if options.install_dir else options.modulename
224 if os.path.isabs(install_dir):
225 install_dir = destdir_join(destdir, install_dir)
226 install_gtkdoc(options.builddir,
227 options.subdir,
228 install_prefix,
229 'share/gtk-doc/html',
230 install_dir)
231 return 0
232
233 if __name__ == '__main__':
234 sys.exit(run(sys.argv[1:]))
235
[end of mesonbuild/scripts/gtkdochelper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesonbuild/scripts/gtkdochelper.py b/mesonbuild/scripts/gtkdochelper.py
--- a/mesonbuild/scripts/gtkdochelper.py
+++ b/mesonbuild/scripts/gtkdochelper.py
@@ -112,7 +112,8 @@
scanobjs_cmd = ['gtkdoc-scangobj'] + scanobjs_args + ['--types=' + gobject_typesfile,
'--module=' + module,
'--cflags=' + cflags,
- '--ldflags=' + ldflags]
+ '--ldflags=' + ldflags,
+ '--ld=' + ld]
gtkdoc_run_check(scanobjs_cmd, abs_out)
| {"golden_diff": "diff --git a/mesonbuild/scripts/gtkdochelper.py b/mesonbuild/scripts/gtkdochelper.py\n--- a/mesonbuild/scripts/gtkdochelper.py\n+++ b/mesonbuild/scripts/gtkdochelper.py\n@@ -112,7 +112,8 @@\n scanobjs_cmd = ['gtkdoc-scangobj'] + scanobjs_args + ['--types=' + gobject_typesfile,\n '--module=' + module,\n '--cflags=' + cflags,\n- '--ldflags=' + ldflags]\n+ '--ldflags=' + ldflags,\n+ '--ld=' + ld]\n \n gtkdoc_run_check(scanobjs_cmd, abs_out)\n", "issue": "Linker error while building gtkdoc\nI'm trying to make GStreamer modules build with meson inside Cerbero. When it builds gtkdoc I get this error:\r\n\r\n```\r\nError in gtkdoc helper script:\r\n'gtkdoc-scangobj' failed with status 1\r\nld: unrecognized option '-Wl,-rpath,/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/'\r\nld: use the --help option for usage information\r\nLinking of scanner failed: \r\n```\r\n\r\nThe exact command line that produce that error is:\r\n\r\n```\r\ngtkdoc-scangobj --types=/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/docs/libs/gst-transcoder.types --module=gst-transcoder --cflags=\"-I/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/gst-libs -I/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/gst-libs -I/home/xclaesse/programmation/cerbero/build/dist/linux_x86_64/include/glib-2.0 -I/home/xclaesse/programmation/cerbero/build/dist/linux_x86_64/lib/glib-2.0/include -pthread -I/home/xclaesse/programmation/cerbero/build/dist/linux_x86_64/include/gstreamer-1.0\" --ldflags=\"-L/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/ -Wl,-rpath,/home/xclaesse/programmation/cerbero/build/sources/linux_x86_64/gst-transcoder-1.9/cerbero-build-dir/ -lgsttranscoder-1.0 -L/home/xclaesse/programmation/cerbero/build/dist/linux_x86_64/lib -lglib-2.0 -lgobject-2.0 -lgstreamer-1.0 -lgstpbutils-1.0\"\r\n```\r\n\r\nGStreamer's gtkdoc seems to build fine with meson when using gst-build, so I don't understand what's the difference when ran inside cerbero env.\r\n\r\nAny clue?\n", "before_files": [{"content": "# Copyright 2015-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys, os\nimport subprocess\nimport shutil\nimport argparse\nfrom ..mesonlib import MesonException, Popen_safe\nfrom . import destdir_join\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--sourcedir', dest='sourcedir')\nparser.add_argument('--builddir', dest='builddir')\nparser.add_argument('--subdir', dest='subdir')\nparser.add_argument('--headerdirs', dest='headerdirs')\nparser.add_argument('--mainfile', dest='mainfile')\nparser.add_argument('--modulename', dest='modulename')\nparser.add_argument('--htmlargs', dest='htmlargs', default='')\nparser.add_argument('--scanargs', dest='scanargs', default='')\nparser.add_argument('--scanobjsargs', dest='scanobjsargs', default='')\nparser.add_argument('--gobjects-types-file', dest='gobject_typesfile', default='')\nparser.add_argument('--fixxrefargs', dest='fixxrefargs', default='')\nparser.add_argument('--mkdbargs', dest='mkdbargs', default='')\nparser.add_argument('--ld', dest='ld', default='')\nparser.add_argument('--cc', dest='cc', default='')\nparser.add_argument('--ldflags', dest='ldflags', default='')\nparser.add_argument('--cflags', dest='cflags', default='')\nparser.add_argument('--content-files', dest='content_files', default='')\nparser.add_argument('--expand-content-files', dest='expand_content_files', default='')\nparser.add_argument('--html-assets', dest='html_assets', default='')\nparser.add_argument('--ignore-headers', dest='ignore_headers', default='')\nparser.add_argument('--namespace', dest='namespace', default='')\nparser.add_argument('--mode', dest='mode', default='')\nparser.add_argument('--installdir', dest='install_dir')\n\ndef gtkdoc_run_check(cmd, cwd):\n # Put stderr into stdout since we want to print it out anyway.\n # This preserves the order of messages.\n p, out = Popen_safe(cmd, cwd=cwd, stderr=subprocess.STDOUT)[0:2]\n if p.returncode != 0:\n err_msg = [\"{!r} failed with status {:d}\".format(cmd[0], p.returncode)]\n if out:\n err_msg.append(out)\n raise MesonException('\\n'.join(err_msg))\n\ndef build_gtkdoc(source_root, build_root, doc_subdir, src_subdirs,\n main_file, module,\n html_args, scan_args, fixxref_args, mkdb_args,\n gobject_typesfile, scanobjs_args, ld, cc, ldflags, cflags,\n html_assets, content_files, ignore_headers, namespace,\n expand_content_files, mode):\n print(\"Building documentation for %s\" % module)\n\n src_dir_args = []\n for src_dir in src_subdirs:\n if not os.path.isabs(src_dir):\n dirs = [os.path.join(source_root, src_dir),\n os.path.join(build_root, src_dir)]\n else:\n dirs = [src_dir]\n src_dir_args += ['--source-dir=' + d for d in dirs]\n\n doc_src = os.path.join(source_root, doc_subdir)\n abs_out = os.path.join(build_root, doc_subdir)\n htmldir = os.path.join(abs_out, 'html')\n\n content_files += [main_file]\n sections = os.path.join(doc_src, module + \"-sections.txt\")\n if os.path.exists(sections):\n content_files.append(sections)\n\n overrides = os.path.join(doc_src, module + \"-overrides.txt\")\n if os.path.exists(overrides):\n content_files.append(overrides)\n\n # Copy files to build directory\n for f in content_files:\n f_abs = os.path.join(doc_src, f)\n shutil.copyfile(f_abs, os.path.join(\n abs_out, os.path.basename(f_abs)))\n\n shutil.rmtree(htmldir, ignore_errors=True)\n try:\n os.mkdir(htmldir)\n except Exception:\n pass\n\n for f in html_assets:\n f_abs = os.path.join(doc_src, f)\n shutil.copyfile(f_abs, os.path.join(htmldir, os.path.basename(f_abs)))\n\n scan_cmd = ['gtkdoc-scan', '--module=' + module] + src_dir_args\n if ignore_headers:\n scan_cmd.append('--ignore-headers=' + ' '.join(ignore_headers))\n # Add user-specified arguments\n scan_cmd += scan_args\n gtkdoc_run_check(scan_cmd, abs_out)\n\n if gobject_typesfile:\n scanobjs_cmd = ['gtkdoc-scangobj'] + scanobjs_args + ['--types=' + gobject_typesfile,\n '--module=' + module,\n '--cflags=' + cflags,\n '--ldflags=' + ldflags]\n\n gtkdoc_run_check(scanobjs_cmd, abs_out)\n\n # Make docbook files\n if mode == 'auto':\n # Guessing is probably a poor idea but these keeps compat\n # with previous behavior\n if main_file.endswith('sgml'):\n modeflag = '--sgml-mode'\n else:\n modeflag = '--xml-mode'\n elif mode == 'xml':\n modeflag = '--xml-mode'\n elif mode == 'sgml':\n modeflag = '--sgml-mode'\n else: # none\n modeflag = None\n\n mkdb_cmd = ['gtkdoc-mkdb',\n '--module=' + module,\n '--output-format=xml',\n '--expand-content-files=' + ' '.join(expand_content_files),\n ] + src_dir_args\n if namespace:\n mkdb_cmd.append('--name-space=' + namespace)\n if modeflag:\n mkdb_cmd.append(modeflag)\n if len(main_file) > 0:\n # Yes, this is the flag even if the file is in xml.\n mkdb_cmd.append('--main-sgml-file=' + main_file)\n # Add user-specified arguments\n mkdb_cmd += mkdb_args\n gtkdoc_run_check(mkdb_cmd, abs_out)\n\n # Make HTML documentation\n mkhtml_cmd = ['gtkdoc-mkhtml',\n '--path=' + ':'.join((doc_src, abs_out)),\n module,\n ] + html_args\n if len(main_file) > 0:\n mkhtml_cmd.append('../' + main_file)\n else:\n mkhtml_cmd.append('%s-docs.xml' % module)\n # html gen must be run in the HTML dir\n gtkdoc_run_check(mkhtml_cmd, os.path.join(abs_out, 'html'))\n\n # Fix cross-references in HTML files\n fixref_cmd = ['gtkdoc-fixxref',\n '--module=' + module,\n '--module-dir=html'] + fixxref_args\n gtkdoc_run_check(fixref_cmd, abs_out)\n\ndef install_gtkdoc(build_root, doc_subdir, install_prefix, datadir, module):\n source = os.path.join(build_root, doc_subdir, 'html')\n final_destination = os.path.join(install_prefix, datadir, module)\n shutil.rmtree(final_destination, ignore_errors=True)\n shutil.copytree(source, final_destination)\n\ndef run(args):\n options = parser.parse_args(args)\n if len(options.htmlargs) > 0:\n htmlargs = options.htmlargs.split('@@')\n else:\n htmlargs = []\n if len(options.scanargs) > 0:\n scanargs = options.scanargs.split('@@')\n else:\n scanargs = []\n if len(options.scanobjsargs) > 0:\n scanobjsargs = options.scanobjsargs.split('@@')\n else:\n scanobjsargs = []\n if len(options.fixxrefargs) > 0:\n fixxrefargs = options.fixxrefargs.split('@@')\n else:\n fixxrefargs = []\n if len(options.mkdbargs) > 0:\n mkdbargs = options.mkdbargs.split('@@')\n else:\n mkdbargs = []\n build_gtkdoc(\n options.sourcedir,\n options.builddir,\n options.subdir,\n options.headerdirs.split('@@'),\n options.mainfile,\n options.modulename,\n htmlargs,\n scanargs,\n fixxrefargs,\n mkdbargs,\n options.gobject_typesfile,\n scanobjsargs,\n options.ld,\n options.cc,\n options.ldflags,\n options.cflags,\n options.html_assets.split('@@') if options.html_assets else [],\n options.content_files.split('@@') if options.content_files else [],\n options.ignore_headers.split('@@') if options.ignore_headers else [],\n options.namespace,\n options.expand_content_files.split('@@') if options.expand_content_files else [],\n options.mode)\n\n if 'MESON_INSTALL_PREFIX' in os.environ:\n destdir = os.environ.get('DESTDIR', '')\n install_prefix = destdir_join(destdir, os.environ['MESON_INSTALL_PREFIX'])\n install_dir = options.install_dir if options.install_dir else options.modulename\n if os.path.isabs(install_dir):\n install_dir = destdir_join(destdir, install_dir)\n install_gtkdoc(options.builddir,\n options.subdir,\n install_prefix,\n 'share/gtk-doc/html',\n install_dir)\n return 0\n\nif __name__ == '__main__':\n sys.exit(run(sys.argv[1:]))\n", "path": "mesonbuild/scripts/gtkdochelper.py"}]} | 3,823 | 146 |
gh_patches_debug_18390 | rasdani/github-patches | git_diff | tornadoweb__tornado-2783 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Should the Blog example use bcrypt's checkpw instead of ==
bcrypt 3.1.0 added `checkpw` which (I believe) is the recommended way to compare passwords to the hashed password now.
- `checkpw` code [here](https://github.com/pyca/bcrypt/blob/master/src/bcrypt/__init__.py#L97)
- tornado's blog example using `==` [here](https://github.com/tornadoweb/tornado/blob/stable/demos/blog/blog.py#L211)
The current example looks like this:
``` python
@gen.coroutine
def post(self):
# [...]
hashed_password = yield executor.submit(
bcrypt.hashpw,
tornado.escape.utf8(self.get_argument("password")),
tornado.escape.utf8(author.hashed_password))
if hashed_password == author.hashed_password:
self.set_secure_cookie("blogdemo_user", str(author.id))
self.redirect(self.get_argument("next", "/"))
else:
self.render("login.html", error="incorrect password")
```
But maybe it should look like this?
``` python
@gen.coroutine
def post(self):
# [...]
password_equal = yield executor.submit(
bcrypt.checkpw,
tornado.escape.utf8(self.get_argument("password")),
tornado.escape.utf8(author.hashed_password))
if password_equal:
self.set_secure_cookie("blogdemo_user", str(author.id))
self.redirect(self.get_argument("next", "/"))
else:
self.render("login.html", error="incorrect password")
```
</issue>
<code>
[start of demos/blog/blog.py]
1 #!/usr/bin/env python3
2 #
3 # Copyright 2009 Facebook
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License"); you may
6 # not use this file except in compliance with the License. You may obtain
7 # a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14 # License for the specific language governing permissions and limitations
15 # under the License.
16
17 import aiopg
18 import bcrypt
19 import markdown
20 import os.path
21 import psycopg2
22 import re
23 import tornado.escape
24 import tornado.httpserver
25 import tornado.ioloop
26 import tornado.locks
27 import tornado.options
28 import tornado.web
29 import unicodedata
30
31 from tornado.options import define, options
32
33 define("port", default=8888, help="run on the given port", type=int)
34 define("db_host", default="127.0.0.1", help="blog database host")
35 define("db_port", default=5432, help="blog database port")
36 define("db_database", default="blog", help="blog database name")
37 define("db_user", default="blog", help="blog database user")
38 define("db_password", default="blog", help="blog database password")
39
40
41 class NoResultError(Exception):
42 pass
43
44
45 async def maybe_create_tables(db):
46 try:
47 with (await db.cursor()) as cur:
48 await cur.execute("SELECT COUNT(*) FROM entries LIMIT 1")
49 await cur.fetchone()
50 except psycopg2.ProgrammingError:
51 with open("schema.sql") as f:
52 schema = f.read()
53 with (await db.cursor()) as cur:
54 await cur.execute(schema)
55
56
57 class Application(tornado.web.Application):
58 def __init__(self, db):
59 self.db = db
60 handlers = [
61 (r"/", HomeHandler),
62 (r"/archive", ArchiveHandler),
63 (r"/feed", FeedHandler),
64 (r"/entry/([^/]+)", EntryHandler),
65 (r"/compose", ComposeHandler),
66 (r"/auth/create", AuthCreateHandler),
67 (r"/auth/login", AuthLoginHandler),
68 (r"/auth/logout", AuthLogoutHandler),
69 ]
70 settings = dict(
71 blog_title=u"Tornado Blog",
72 template_path=os.path.join(os.path.dirname(__file__), "templates"),
73 static_path=os.path.join(os.path.dirname(__file__), "static"),
74 ui_modules={"Entry": EntryModule},
75 xsrf_cookies=True,
76 cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
77 login_url="/auth/login",
78 debug=True,
79 )
80 super(Application, self).__init__(handlers, **settings)
81
82
83 class BaseHandler(tornado.web.RequestHandler):
84 def row_to_obj(self, row, cur):
85 """Convert a SQL row to an object supporting dict and attribute access."""
86 obj = tornado.util.ObjectDict()
87 for val, desc in zip(row, cur.description):
88 obj[desc.name] = val
89 return obj
90
91 async def execute(self, stmt, *args):
92 """Execute a SQL statement.
93
94 Must be called with ``await self.execute(...)``
95 """
96 with (await self.application.db.cursor()) as cur:
97 await cur.execute(stmt, args)
98
99 async def query(self, stmt, *args):
100 """Query for a list of results.
101
102 Typical usage::
103
104 results = await self.query(...)
105
106 Or::
107
108 for row in await self.query(...)
109 """
110 with (await self.application.db.cursor()) as cur:
111 await cur.execute(stmt, args)
112 return [self.row_to_obj(row, cur) for row in await cur.fetchall()]
113
114 async def queryone(self, stmt, *args):
115 """Query for exactly one result.
116
117 Raises NoResultError if there are no results, or ValueError if
118 there are more than one.
119 """
120 results = await self.query(stmt, *args)
121 if len(results) == 0:
122 raise NoResultError()
123 elif len(results) > 1:
124 raise ValueError("Expected 1 result, got %d" % len(results))
125 return results[0]
126
127 async def prepare(self):
128 # get_current_user cannot be a coroutine, so set
129 # self.current_user in prepare instead.
130 user_id = self.get_secure_cookie("blogdemo_user")
131 if user_id:
132 self.current_user = await self.queryone(
133 "SELECT * FROM authors WHERE id = %s", int(user_id)
134 )
135
136 async def any_author_exists(self):
137 return bool(await self.query("SELECT * FROM authors LIMIT 1"))
138
139
140 class HomeHandler(BaseHandler):
141 async def get(self):
142 entries = await self.query(
143 "SELECT * FROM entries ORDER BY published DESC LIMIT 5"
144 )
145 if not entries:
146 self.redirect("/compose")
147 return
148 self.render("home.html", entries=entries)
149
150
151 class EntryHandler(BaseHandler):
152 async def get(self, slug):
153 entry = await self.queryone("SELECT * FROM entries WHERE slug = %s", slug)
154 if not entry:
155 raise tornado.web.HTTPError(404)
156 self.render("entry.html", entry=entry)
157
158
159 class ArchiveHandler(BaseHandler):
160 async def get(self):
161 entries = await self.query("SELECT * FROM entries ORDER BY published DESC")
162 self.render("archive.html", entries=entries)
163
164
165 class FeedHandler(BaseHandler):
166 async def get(self):
167 entries = await self.query(
168 "SELECT * FROM entries ORDER BY published DESC LIMIT 10"
169 )
170 self.set_header("Content-Type", "application/atom+xml")
171 self.render("feed.xml", entries=entries)
172
173
174 class ComposeHandler(BaseHandler):
175 @tornado.web.authenticated
176 async def get(self):
177 id = self.get_argument("id", None)
178 entry = None
179 if id:
180 entry = await self.queryone("SELECT * FROM entries WHERE id = %s", int(id))
181 self.render("compose.html", entry=entry)
182
183 @tornado.web.authenticated
184 async def post(self):
185 id = self.get_argument("id", None)
186 title = self.get_argument("title")
187 text = self.get_argument("markdown")
188 html = markdown.markdown(text)
189 if id:
190 try:
191 entry = await self.queryone(
192 "SELECT * FROM entries WHERE id = %s", int(id)
193 )
194 except NoResultError:
195 raise tornado.web.HTTPError(404)
196 slug = entry.slug
197 await self.execute(
198 "UPDATE entries SET title = %s, markdown = %s, html = %s "
199 "WHERE id = %s",
200 title,
201 text,
202 html,
203 int(id),
204 )
205 else:
206 slug = unicodedata.normalize("NFKD", title)
207 slug = re.sub(r"[^\w]+", " ", slug)
208 slug = "-".join(slug.lower().strip().split())
209 slug = slug.encode("ascii", "ignore").decode("ascii")
210 if not slug:
211 slug = "entry"
212 while True:
213 e = await self.query("SELECT * FROM entries WHERE slug = %s", slug)
214 if not e:
215 break
216 slug += "-2"
217 await self.execute(
218 "INSERT INTO entries (author_id,title,slug,markdown,html,published,updated)"
219 "VALUES (%s,%s,%s,%s,%s,CURRENT_TIMESTAMP,CURRENT_TIMESTAMP)",
220 self.current_user.id,
221 title,
222 slug,
223 text,
224 html,
225 )
226 self.redirect("/entry/" + slug)
227
228
229 class AuthCreateHandler(BaseHandler):
230 def get(self):
231 self.render("create_author.html")
232
233 async def post(self):
234 if await self.any_author_exists():
235 raise tornado.web.HTTPError(400, "author already created")
236 hashed_password = await tornado.ioloop.IOLoop.current().run_in_executor(
237 None,
238 bcrypt.hashpw,
239 tornado.escape.utf8(self.get_argument("password")),
240 bcrypt.gensalt(),
241 )
242 author = await self.queryone(
243 "INSERT INTO authors (email, name, hashed_password) "
244 "VALUES (%s, %s, %s) RETURNING id",
245 self.get_argument("email"),
246 self.get_argument("name"),
247 tornado.escape.to_unicode(hashed_password),
248 )
249 self.set_secure_cookie("blogdemo_user", str(author.id))
250 self.redirect(self.get_argument("next", "/"))
251
252
253 class AuthLoginHandler(BaseHandler):
254 async def get(self):
255 # If there are no authors, redirect to the account creation page.
256 if not await self.any_author_exists():
257 self.redirect("/auth/create")
258 else:
259 self.render("login.html", error=None)
260
261 async def post(self):
262 try:
263 author = await self.queryone(
264 "SELECT * FROM authors WHERE email = %s", self.get_argument("email")
265 )
266 except NoResultError:
267 self.render("login.html", error="email not found")
268 return
269 hashed_password = await tornado.ioloop.IOLoop.current().run_in_executor(
270 None,
271 bcrypt.hashpw,
272 tornado.escape.utf8(self.get_argument("password")),
273 tornado.escape.utf8(author.hashed_password),
274 )
275 hashed_password = tornado.escape.to_unicode(hashed_password)
276 if hashed_password == author.hashed_password:
277 self.set_secure_cookie("blogdemo_user", str(author.id))
278 self.redirect(self.get_argument("next", "/"))
279 else:
280 self.render("login.html", error="incorrect password")
281
282
283 class AuthLogoutHandler(BaseHandler):
284 def get(self):
285 self.clear_cookie("blogdemo_user")
286 self.redirect(self.get_argument("next", "/"))
287
288
289 class EntryModule(tornado.web.UIModule):
290 def render(self, entry):
291 return self.render_string("modules/entry.html", entry=entry)
292
293
294 async def main():
295 tornado.options.parse_command_line()
296
297 # Create the global connection pool.
298 async with aiopg.create_pool(
299 host=options.db_host,
300 port=options.db_port,
301 user=options.db_user,
302 password=options.db_password,
303 dbname=options.db_database,
304 ) as db:
305 await maybe_create_tables(db)
306 app = Application(db)
307 app.listen(options.port)
308
309 # In this demo the server will simply run until interrupted
310 # with Ctrl-C, but if you want to shut down more gracefully,
311 # call shutdown_event.set().
312 shutdown_event = tornado.locks.Event()
313 await shutdown_event.wait()
314
315
316 if __name__ == "__main__":
317 tornado.ioloop.IOLoop.current().run_sync(main)
318
[end of demos/blog/blog.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/demos/blog/blog.py b/demos/blog/blog.py
--- a/demos/blog/blog.py
+++ b/demos/blog/blog.py
@@ -266,14 +266,13 @@
except NoResultError:
self.render("login.html", error="email not found")
return
- hashed_password = await tornado.ioloop.IOLoop.current().run_in_executor(
+ password_equal = await tornado.ioloop.IOLoop.current().run_in_executor(
None,
- bcrypt.hashpw,
+ bcrypt.checkpw,
tornado.escape.utf8(self.get_argument("password")),
tornado.escape.utf8(author.hashed_password),
)
- hashed_password = tornado.escape.to_unicode(hashed_password)
- if hashed_password == author.hashed_password:
+ if password_equal:
self.set_secure_cookie("blogdemo_user", str(author.id))
self.redirect(self.get_argument("next", "/"))
else:
| {"golden_diff": "diff --git a/demos/blog/blog.py b/demos/blog/blog.py\n--- a/demos/blog/blog.py\n+++ b/demos/blog/blog.py\n@@ -266,14 +266,13 @@\n except NoResultError:\n self.render(\"login.html\", error=\"email not found\")\n return\n- hashed_password = await tornado.ioloop.IOLoop.current().run_in_executor(\n+ password_equal = await tornado.ioloop.IOLoop.current().run_in_executor(\n None,\n- bcrypt.hashpw,\n+ bcrypt.checkpw,\n tornado.escape.utf8(self.get_argument(\"password\")),\n tornado.escape.utf8(author.hashed_password),\n )\n- hashed_password = tornado.escape.to_unicode(hashed_password)\n- if hashed_password == author.hashed_password:\n+ if password_equal:\n self.set_secure_cookie(\"blogdemo_user\", str(author.id))\n self.redirect(self.get_argument(\"next\", \"/\"))\n else:\n", "issue": "Should the Blog example use bcrypt's checkpw instead of ==\nbcrypt 3.1.0 added `checkpw` which (I believe) is the recommended way to compare passwords to the hashed password now. \n- `checkpw` code [here](https://github.com/pyca/bcrypt/blob/master/src/bcrypt/__init__.py#L97)\n- tornado's blog example using `==` [here](https://github.com/tornadoweb/tornado/blob/stable/demos/blog/blog.py#L211)\n\nThe current example looks like this:\n\n``` python\n @gen.coroutine\n def post(self):\n # [...]\n hashed_password = yield executor.submit(\n bcrypt.hashpw, \n tornado.escape.utf8(self.get_argument(\"password\")),\n tornado.escape.utf8(author.hashed_password))\n if hashed_password == author.hashed_password:\n self.set_secure_cookie(\"blogdemo_user\", str(author.id))\n self.redirect(self.get_argument(\"next\", \"/\"))\n else:\n self.render(\"login.html\", error=\"incorrect password\")\n```\n\nBut maybe it should look like this?\n\n``` python\n @gen.coroutine\n def post(self):\n # [...]\n password_equal = yield executor.submit(\n bcrypt.checkpw, \n tornado.escape.utf8(self.get_argument(\"password\")),\n tornado.escape.utf8(author.hashed_password))\n if password_equal:\n self.set_secure_cookie(\"blogdemo_user\", str(author.id))\n self.redirect(self.get_argument(\"next\", \"/\"))\n else:\n self.render(\"login.html\", error=\"incorrect password\")\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n#\n# Copyright 2009 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport aiopg\nimport bcrypt\nimport markdown\nimport os.path\nimport psycopg2\nimport re\nimport tornado.escape\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.locks\nimport tornado.options\nimport tornado.web\nimport unicodedata\n\nfrom tornado.options import define, options\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\ndefine(\"db_host\", default=\"127.0.0.1\", help=\"blog database host\")\ndefine(\"db_port\", default=5432, help=\"blog database port\")\ndefine(\"db_database\", default=\"blog\", help=\"blog database name\")\ndefine(\"db_user\", default=\"blog\", help=\"blog database user\")\ndefine(\"db_password\", default=\"blog\", help=\"blog database password\")\n\n\nclass NoResultError(Exception):\n pass\n\n\nasync def maybe_create_tables(db):\n try:\n with (await db.cursor()) as cur:\n await cur.execute(\"SELECT COUNT(*) FROM entries LIMIT 1\")\n await cur.fetchone()\n except psycopg2.ProgrammingError:\n with open(\"schema.sql\") as f:\n schema = f.read()\n with (await db.cursor()) as cur:\n await cur.execute(schema)\n\n\nclass Application(tornado.web.Application):\n def __init__(self, db):\n self.db = db\n handlers = [\n (r\"/\", HomeHandler),\n (r\"/archive\", ArchiveHandler),\n (r\"/feed\", FeedHandler),\n (r\"/entry/([^/]+)\", EntryHandler),\n (r\"/compose\", ComposeHandler),\n (r\"/auth/create\", AuthCreateHandler),\n (r\"/auth/login\", AuthLoginHandler),\n (r\"/auth/logout\", AuthLogoutHandler),\n ]\n settings = dict(\n blog_title=u\"Tornado Blog\",\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n ui_modules={\"Entry\": EntryModule},\n xsrf_cookies=True,\n cookie_secret=\"__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__\",\n login_url=\"/auth/login\",\n debug=True,\n )\n super(Application, self).__init__(handlers, **settings)\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n def row_to_obj(self, row, cur):\n \"\"\"Convert a SQL row to an object supporting dict and attribute access.\"\"\"\n obj = tornado.util.ObjectDict()\n for val, desc in zip(row, cur.description):\n obj[desc.name] = val\n return obj\n\n async def execute(self, stmt, *args):\n \"\"\"Execute a SQL statement.\n\n Must be called with ``await self.execute(...)``\n \"\"\"\n with (await self.application.db.cursor()) as cur:\n await cur.execute(stmt, args)\n\n async def query(self, stmt, *args):\n \"\"\"Query for a list of results.\n\n Typical usage::\n\n results = await self.query(...)\n\n Or::\n\n for row in await self.query(...)\n \"\"\"\n with (await self.application.db.cursor()) as cur:\n await cur.execute(stmt, args)\n return [self.row_to_obj(row, cur) for row in await cur.fetchall()]\n\n async def queryone(self, stmt, *args):\n \"\"\"Query for exactly one result.\n\n Raises NoResultError if there are no results, or ValueError if\n there are more than one.\n \"\"\"\n results = await self.query(stmt, *args)\n if len(results) == 0:\n raise NoResultError()\n elif len(results) > 1:\n raise ValueError(\"Expected 1 result, got %d\" % len(results))\n return results[0]\n\n async def prepare(self):\n # get_current_user cannot be a coroutine, so set\n # self.current_user in prepare instead.\n user_id = self.get_secure_cookie(\"blogdemo_user\")\n if user_id:\n self.current_user = await self.queryone(\n \"SELECT * FROM authors WHERE id = %s\", int(user_id)\n )\n\n async def any_author_exists(self):\n return bool(await self.query(\"SELECT * FROM authors LIMIT 1\"))\n\n\nclass HomeHandler(BaseHandler):\n async def get(self):\n entries = await self.query(\n \"SELECT * FROM entries ORDER BY published DESC LIMIT 5\"\n )\n if not entries:\n self.redirect(\"/compose\")\n return\n self.render(\"home.html\", entries=entries)\n\n\nclass EntryHandler(BaseHandler):\n async def get(self, slug):\n entry = await self.queryone(\"SELECT * FROM entries WHERE slug = %s\", slug)\n if not entry:\n raise tornado.web.HTTPError(404)\n self.render(\"entry.html\", entry=entry)\n\n\nclass ArchiveHandler(BaseHandler):\n async def get(self):\n entries = await self.query(\"SELECT * FROM entries ORDER BY published DESC\")\n self.render(\"archive.html\", entries=entries)\n\n\nclass FeedHandler(BaseHandler):\n async def get(self):\n entries = await self.query(\n \"SELECT * FROM entries ORDER BY published DESC LIMIT 10\"\n )\n self.set_header(\"Content-Type\", \"application/atom+xml\")\n self.render(\"feed.xml\", entries=entries)\n\n\nclass ComposeHandler(BaseHandler):\n @tornado.web.authenticated\n async def get(self):\n id = self.get_argument(\"id\", None)\n entry = None\n if id:\n entry = await self.queryone(\"SELECT * FROM entries WHERE id = %s\", int(id))\n self.render(\"compose.html\", entry=entry)\n\n @tornado.web.authenticated\n async def post(self):\n id = self.get_argument(\"id\", None)\n title = self.get_argument(\"title\")\n text = self.get_argument(\"markdown\")\n html = markdown.markdown(text)\n if id:\n try:\n entry = await self.queryone(\n \"SELECT * FROM entries WHERE id = %s\", int(id)\n )\n except NoResultError:\n raise tornado.web.HTTPError(404)\n slug = entry.slug\n await self.execute(\n \"UPDATE entries SET title = %s, markdown = %s, html = %s \"\n \"WHERE id = %s\",\n title,\n text,\n html,\n int(id),\n )\n else:\n slug = unicodedata.normalize(\"NFKD\", title)\n slug = re.sub(r\"[^\\w]+\", \" \", slug)\n slug = \"-\".join(slug.lower().strip().split())\n slug = slug.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n if not slug:\n slug = \"entry\"\n while True:\n e = await self.query(\"SELECT * FROM entries WHERE slug = %s\", slug)\n if not e:\n break\n slug += \"-2\"\n await self.execute(\n \"INSERT INTO entries (author_id,title,slug,markdown,html,published,updated)\"\n \"VALUES (%s,%s,%s,%s,%s,CURRENT_TIMESTAMP,CURRENT_TIMESTAMP)\",\n self.current_user.id,\n title,\n slug,\n text,\n html,\n )\n self.redirect(\"/entry/\" + slug)\n\n\nclass AuthCreateHandler(BaseHandler):\n def get(self):\n self.render(\"create_author.html\")\n\n async def post(self):\n if await self.any_author_exists():\n raise tornado.web.HTTPError(400, \"author already created\")\n hashed_password = await tornado.ioloop.IOLoop.current().run_in_executor(\n None,\n bcrypt.hashpw,\n tornado.escape.utf8(self.get_argument(\"password\")),\n bcrypt.gensalt(),\n )\n author = await self.queryone(\n \"INSERT INTO authors (email, name, hashed_password) \"\n \"VALUES (%s, %s, %s) RETURNING id\",\n self.get_argument(\"email\"),\n self.get_argument(\"name\"),\n tornado.escape.to_unicode(hashed_password),\n )\n self.set_secure_cookie(\"blogdemo_user\", str(author.id))\n self.redirect(self.get_argument(\"next\", \"/\"))\n\n\nclass AuthLoginHandler(BaseHandler):\n async def get(self):\n # If there are no authors, redirect to the account creation page.\n if not await self.any_author_exists():\n self.redirect(\"/auth/create\")\n else:\n self.render(\"login.html\", error=None)\n\n async def post(self):\n try:\n author = await self.queryone(\n \"SELECT * FROM authors WHERE email = %s\", self.get_argument(\"email\")\n )\n except NoResultError:\n self.render(\"login.html\", error=\"email not found\")\n return\n hashed_password = await tornado.ioloop.IOLoop.current().run_in_executor(\n None,\n bcrypt.hashpw,\n tornado.escape.utf8(self.get_argument(\"password\")),\n tornado.escape.utf8(author.hashed_password),\n )\n hashed_password = tornado.escape.to_unicode(hashed_password)\n if hashed_password == author.hashed_password:\n self.set_secure_cookie(\"blogdemo_user\", str(author.id))\n self.redirect(self.get_argument(\"next\", \"/\"))\n else:\n self.render(\"login.html\", error=\"incorrect password\")\n\n\nclass AuthLogoutHandler(BaseHandler):\n def get(self):\n self.clear_cookie(\"blogdemo_user\")\n self.redirect(self.get_argument(\"next\", \"/\"))\n\n\nclass EntryModule(tornado.web.UIModule):\n def render(self, entry):\n return self.render_string(\"modules/entry.html\", entry=entry)\n\n\nasync def main():\n tornado.options.parse_command_line()\n\n # Create the global connection pool.\n async with aiopg.create_pool(\n host=options.db_host,\n port=options.db_port,\n user=options.db_user,\n password=options.db_password,\n dbname=options.db_database,\n ) as db:\n await maybe_create_tables(db)\n app = Application(db)\n app.listen(options.port)\n\n # In this demo the server will simply run until interrupted\n # with Ctrl-C, but if you want to shut down more gracefully,\n # call shutdown_event.set().\n shutdown_event = tornado.locks.Event()\n await shutdown_event.wait()\n\n\nif __name__ == \"__main__\":\n tornado.ioloop.IOLoop.current().run_sync(main)\n", "path": "demos/blog/blog.py"}]} | 4,030 | 204 |
gh_patches_debug_8373 | rasdani/github-patches | git_diff | PyGithub__PyGithub-2443 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add cryptography as dependency (for app-based auth)
Using [the new app-based auth ](https://github.com/PyGithub/PyGithub/pull/1986) results to the error below when `cryptography` is not installed. It should be added as a dependency of this.
```
Traceback (most recent call last):
File "***", line 60, in ***
github = Github(
File "/usr/local/lib/python3.9/site-packages/github/MainClass.py", line 122, in __init__
self.__requester = Requester(
File "/usr/local/lib/python3.9/site-packages/github/Requester.py", line 332, in __init__
self._refresh_token()
File "/usr/local/lib/python3.9/site-packages/github/Requester.py", line 396, in _refresh_token
self.__installation_authorization = self._get_installation_authorization()
File "/usr/local/lib/python3.9/site-packages/github/Requester.py", line 377, in _get_installation_authorization
integration = GithubIntegration.GithubIntegration(
File "/usr/local/lib/python3.9/site-packages/github/GithubIntegration.py", line 52, in __init__
jwt=self.create_jwt(),
File "/usr/local/lib/python3.9/site-packages/github/GithubIntegration.py", line 106, in create_jwt
encrypted = jwt.encode(payload, key=self.private_key, algorithm="RS256")
File "/usr/local/lib/python3.9/site-packages/jwt/api_jwt.py", line 67, in encode
return api_jws.encode(json_payload, key, algorithm, headers, json_encoder)
File "/usr/local/lib/python3.9/site-packages/jwt/api_jws.py", line 152, in encode
alg_obj = self.get_algorithm_by_name(algorithm_)
File "/usr/local/lib/python3.9/site-packages/jwt/api_jws.py", line 91, in get_algorithm_by_name
raise NotImplementedError(
NotImplementedError: Algorithm 'RS256' could not be found. Do you have cryptography installed?
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2012 Vincent Jacques <[email protected]> #
6 # Copyright 2012 Zearin <[email protected]> #
7 # Copyright 2013 Vincent Jacques <[email protected]> #
8 # Copyright 2014 Tomas Radej <[email protected]> #
9 # Copyright 2014 Vincent Jacques <[email protected]> #
10 # Copyright 2015 Jimmy Zelinskie <[email protected]> #
11 # Copyright 2016 Felix Yan <[email protected]> #
12 # Copyright 2016 Jakub Wilk <[email protected]> #
13 # Copyright 2016 Jannis Gebauer <[email protected]> #
14 # Copyright 2016 Peter Buckley <[email protected]> #
15 # Copyright 2017 Hugo <[email protected]> #
16 # Copyright 2017 Jannis Gebauer <[email protected]> #
17 # Copyright 2017 Jannis Gebauer <[email protected]> #
18 # Copyright 2017 Nhomar Hernandez <[email protected]> #
19 # Copyright 2017 Paul Ortman <[email protected]> #
20 # Copyright 2018 Jason White <[email protected]> #
21 # Copyright 2018 Mike Miller <[email protected]> #
22 # Copyright 2018 Wan Liuyang <[email protected]> #
23 # Copyright 2018 sfdye <[email protected]> #
24 # #
25 # This file is part of PyGithub. #
26 # http://pygithub.readthedocs.io/ #
27 # #
28 # PyGithub is free software: you can redistribute it and/or modify it under #
29 # the terms of the GNU Lesser General Public License as published by the Free #
30 # Software Foundation, either version 3 of the License, or (at your option) #
31 # any later version. #
32 # #
33 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
34 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
35 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
36 # details. #
37 # #
38 # You should have received a copy of the GNU Lesser General Public License #
39 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
40 # #
41 ################################################################################
42
43 import textwrap
44
45 import setuptools
46
47 if __name__ == "__main__":
48 setuptools.setup(
49 name="PyGithub",
50 use_scm_version=True,
51 setup_requires=["setuptools_scm"],
52 description="Use the full Github API v3",
53 author="Vincent Jacques",
54 author_email="[email protected]",
55 url="https://github.com/pygithub/pygithub",
56 project_urls={
57 "Documentation": "http://pygithub.readthedocs.io/en/latest/",
58 "Source": "https://github.com/pygithub/pygithub",
59 "Tracker": "https://github.com/pygithub/pygithub/issues",
60 },
61 long_description=textwrap.dedent(
62 """\
63 (Very short) Tutorial
64 =====================
65
66 First create a Github instance::
67
68 from github import Github
69
70 # using username and password
71 g = Github("user", "password")
72
73 # or using an access token
74 g = Github("access_token")
75
76 Then play with your Github objects::
77
78 for repo in g.get_user().get_repos():
79 print(repo.name)
80 repo.edit(has_wiki=False)
81
82 Reference documentation
83 =======================
84
85 See http://pygithub.readthedocs.io/en/latest/"""
86 ),
87 packages=["github"],
88 package_data={"github": ["py.typed", "*.pyi"]},
89 classifiers=[
90 "Development Status :: 5 - Production/Stable",
91 "Environment :: Web Environment",
92 "Intended Audience :: Developers",
93 "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
94 "Operating System :: OS Independent",
95 "Programming Language :: Python",
96 "Programming Language :: Python :: 3",
97 "Programming Language :: Python :: 3.7",
98 "Programming Language :: Python :: 3.8",
99 "Programming Language :: Python :: 3.9",
100 "Programming Language :: Python :: 3.10",
101 "Programming Language :: Python :: 3.11",
102 "Topic :: Software Development",
103 ],
104 python_requires=">=3.7",
105 install_requires=[
106 "deprecated",
107 "pyjwt>=2.4.0",
108 "pynacl>=1.4.0",
109 "requests>=2.14.0",
110 ],
111 extras_require={"integrations": ["cryptography"]},
112 tests_require=["cryptography", "httpretty>=1.0.3"],
113 )
114
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -104,10 +104,11 @@
python_requires=">=3.7",
install_requires=[
"deprecated",
- "pyjwt>=2.4.0",
+ "pyjwt[crypto]>=2.4.0",
"pynacl>=1.4.0",
"requests>=2.14.0",
],
- extras_require={"integrations": ["cryptography"]},
- tests_require=["cryptography", "httpretty>=1.0.3"],
+ # can be removed, still here to avoid breaking user code
+ extras_require={"integrations": []},
+ tests_require=["httpretty>=1.0.3"],
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -104,10 +104,11 @@\n python_requires=\">=3.7\",\n install_requires=[\n \"deprecated\",\n- \"pyjwt>=2.4.0\",\n+ \"pyjwt[crypto]>=2.4.0\",\n \"pynacl>=1.4.0\",\n \"requests>=2.14.0\",\n ],\n- extras_require={\"integrations\": [\"cryptography\"]},\n- tests_require=[\"cryptography\", \"httpretty>=1.0.3\"],\n+ # can be removed, still here to avoid breaking user code\n+ extras_require={\"integrations\": []},\n+ tests_require=[\"httpretty>=1.0.3\"],\n )\n", "issue": "Add cryptography as dependency (for app-based auth)\nUsing [the new app-based auth ](https://github.com/PyGithub/PyGithub/pull/1986) results to the error below when `cryptography` is not installed. It should be added as a dependency of this.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"***\", line 60, in ***\r\n github = Github(\r\n File \"/usr/local/lib/python3.9/site-packages/github/MainClass.py\", line 122, in __init__\r\n self.__requester = Requester(\r\n File \"/usr/local/lib/python3.9/site-packages/github/Requester.py\", line 332, in __init__\r\n self._refresh_token()\r\n File \"/usr/local/lib/python3.9/site-packages/github/Requester.py\", line 396, in _refresh_token\r\n self.__installation_authorization = self._get_installation_authorization()\r\n File \"/usr/local/lib/python3.9/site-packages/github/Requester.py\", line 377, in _get_installation_authorization\r\n integration = GithubIntegration.GithubIntegration(\r\n File \"/usr/local/lib/python3.9/site-packages/github/GithubIntegration.py\", line 52, in __init__\r\n jwt=self.create_jwt(),\r\n File \"/usr/local/lib/python3.9/site-packages/github/GithubIntegration.py\", line 106, in create_jwt\r\n encrypted = jwt.encode(payload, key=self.private_key, algorithm=\"RS256\")\r\n File \"/usr/local/lib/python3.9/site-packages/jwt/api_jwt.py\", line 67, in encode\r\n return api_jws.encode(json_payload, key, algorithm, headers, json_encoder)\r\n File \"/usr/local/lib/python3.9/site-packages/jwt/api_jws.py\", line 152, in encode\r\n alg_obj = self.get_algorithm_by_name(algorithm_)\r\n File \"/usr/local/lib/python3.9/site-packages/jwt/api_jws.py\", line 91, in get_algorithm_by_name\r\n raise NotImplementedError(\r\nNotImplementedError: Algorithm 'RS256' could not be found. Do you have cryptography installed?\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Tomas Radej <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Jimmy Zelinskie <[email protected]> #\n# Copyright 2016 Felix Yan <[email protected]> #\n# Copyright 2016 Jakub Wilk <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Hugo <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2017 Nhomar Hernandez <[email protected]> #\n# Copyright 2017 Paul Ortman <[email protected]> #\n# Copyright 2018 Jason White <[email protected]> #\n# Copyright 2018 Mike Miller <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport textwrap\n\nimport setuptools\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"PyGithub\",\n use_scm_version=True,\n setup_requires=[\"setuptools_scm\"],\n description=\"Use the full Github API v3\",\n author=\"Vincent Jacques\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pygithub/pygithub\",\n project_urls={\n \"Documentation\": \"http://pygithub.readthedocs.io/en/latest/\",\n \"Source\": \"https://github.com/pygithub/pygithub\",\n \"Tracker\": \"https://github.com/pygithub/pygithub/issues\",\n },\n long_description=textwrap.dedent(\n \"\"\"\\\n (Very short) Tutorial\n =====================\n\n First create a Github instance::\n\n from github import Github\n\n # using username and password\n g = Github(\"user\", \"password\")\n\n # or using an access token\n g = Github(\"access_token\")\n\n Then play with your Github objects::\n\n for repo in g.get_user().get_repos():\n print(repo.name)\n repo.edit(has_wiki=False)\n\n Reference documentation\n =======================\n\n See http://pygithub.readthedocs.io/en/latest/\"\"\"\n ),\n packages=[\"github\"],\n package_data={\"github\": [\"py.typed\", \"*.pyi\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Software Development\",\n ],\n python_requires=\">=3.7\",\n install_requires=[\n \"deprecated\",\n \"pyjwt>=2.4.0\",\n \"pynacl>=1.4.0\",\n \"requests>=2.14.0\",\n ],\n extras_require={\"integrations\": [\"cryptography\"]},\n tests_require=[\"cryptography\", \"httpretty>=1.0.3\"],\n )\n", "path": "setup.py"}]} | 2,369 | 176 |
gh_patches_debug_13872 | rasdani/github-patches | git_diff | LMFDB__lmfdb-4241 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bad links on HMF browse pages
A report from the bug report system, from Paul Gunnells:
"On
https://www.lmfdb.org/ModularForm/GL2/TotallyReal/browse/2/
the links in the middle column (the Number of newforms column) are all the same and point to https://www.lmfdb.org/ModularForm/GL2/TotallyReal/?field_label=2.2.497.1 instead of urls the modular form data they're supposed to."
</issue>
<code>
[start of lmfdb/hilbert_modular_forms/hmf_stats.py]
1 # -*- coding: utf-8 -*-
2 from flask import url_for
3 from lmfdb import db
4 from lmfdb.utils import comma
5 from lmfdb.utils.display_stats import StatsDisplay, proportioners, totaler
6 from lmfdb.logger import make_logger
7 from lmfdb.number_fields.web_number_field import nf_display_knowl
8 from sage.misc.cachefunc import cached_method
9
10 logger = make_logger("hmf")
11
12 class HMFstats(StatsDisplay):
13 """
14 Class for creating and displaying statistics for Hilbert modular forms
15 """
16 def __init__(self):
17 self.nforms = db.hmf_forms.count()
18
19 table = db.hmf_forms
20 baseurl_func = ".hilbert_modular_form_render_webpage"
21
22 stat_list = [
23 {'cols': ['level_norm', 'deg'],
24 'totaler': totaler(),
25 'proportioner': proportioners.per_col_total},
26 {'cols': ['level_norm', 'dimension'],
27 'totaler': totaler(),
28 'proportioner': proportioners.per_col_total},
29 ]
30 buckets = {'level_norm': ['1', '2-10', '11-100', '101-1000', '1001-10000'],
31 'dimension': ['1', '2', '3', '4', '5-10', '11-20', '21-100', '101-1000']}
32 knowls = {'level_norm': 'mf.hilbert.level_norm',
33 'dimension': 'mf.hilbert.dimension',
34 'deg': 'nf.degree'}
35 short_display = {'deg': 'degree'}
36
37 @property
38 def short_summary(self):
39 return self.summary + " Here are some <a href='%s'>further statistics</a>." % (url_for(".statistics"),)
40
41 @property
42 def summary(self):
43 hmf_knowl = '<a knowl="mf.hilbert">Hilbert modular forms</a>'
44 nf_knowl = '<a knowl="nf.totally_real">totally real number fields</a>'
45 deg_knowl = '<a knowl="nf.degree">degree</a>'
46 return "The database currently contains %s %s over %s %s of %s 2 to %s." % (comma(self.nforms), hmf_knowl, self.counts()["nfields"], nf_knowl, deg_knowl, self.counts()["maxdeg"])
47
48 def degree_summary(self, d):
49 stats = self.statistics(d)
50 hmf_knowl = '<a knowl="mf.hilbert">Hilbert modular forms</a>'
51 nf_knowl = '<a knowl="nf.totally_real">totally real number fields</a>'
52 deg_knowl = '<a knowl="nf.degree">degree</a>'
53 level_knowl = '<a knowl="mf.hilbert.level_norm">level norm</a>'
54 return ''.join([r'The database currently contains %s ' % stats['nforms'],
55 hmf_knowl,
56 r' defined over %s ' % stats['nfields'],
57 nf_knowl,
58 r' of %s %s, with ' % (deg_knowl, d),
59 level_knowl,
60 r' up to %s.' % stats['maxnorm']])
61
62 @cached_method
63 def counts(self):
64 counts = {}
65
66
67 counts['nforms'] = self.nforms
68 counts['nforms_c'] = comma(self.nforms)
69
70 attrs = ["degree", "discriminant", "label"]
71 fields = list(db.hmf_fields.search({}, attrs, sort=attrs))
72 degrees = sorted(set(F["degree"] for F in fields))
73 by_deg = {d: [F for F in fields if F["degree"] == d] for d in degrees}
74 counts["degrees"] = degrees
75 counts["nfields"] = len(fields)
76 counts["nfields_c"] = comma(len(fields))
77 counts["maxdeg"] = max(degrees)
78 counts["max_deg_c"] = comma(max(degrees))
79 counts["fields_by_degree"] = {d : [F["label"] for F in by_deg[d]] for d in degrees}
80 counts["nfields_by_degree"] = {d : len(by_deg[d]) for d in degrees}
81 counts["max_disc_by_degree"] = {d : max(F["discriminant"] for F in by_deg[d]) for d in degrees}
82 return counts
83
84 @cached_method
85 def statistics(self, d=None):
86 if d is not None:
87 return self.statistics()[int(d)]
88 nstats = db.hmf_forms.stats.numstats("level_norm", "field_label")
89 counts = db.hmf_forms.stats.column_counts("field_label")
90 nstats_by_deg = db.hmf_forms.stats.numstats("level_norm", "deg")
91 counts_by_deg = db.hmf_forms.stats.column_counts("deg")
92 C = self.counts()
93 stats = {d: {"fields": C["fields_by_degree"][d],
94 "nfields": C["nfields_by_degree"][d],
95 "nforms": counts_by_deg[d],
96 "maxnorm": nstats_by_deg[d]["max"],
97 "counts": {F: {"nforms": counts[F],
98 "maxnorm": nstats[F]["max"],
99 "field_knowl": nf_display_knowl(F, F),
100 "forms": lambda : url_for('hmf.hilbert_modular_form_render_webpage', field_label=F)}
101 for F in C["fields_by_degree"][d]}}
102 for d in C["degrees"]}
103 return stats
104
105 def setup(self, attributes=None, delete=False):
106 if attributes is None:
107 # Per-degree statistics aren't updated by the normal setup function
108 # The assert is for pyflakes
109 assert self.statistics()
110 super().setup(attributes, delete)
111
[end of lmfdb/hilbert_modular_forms/hmf_stats.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lmfdb/hilbert_modular_forms/hmf_stats.py b/lmfdb/hilbert_modular_forms/hmf_stats.py
--- a/lmfdb/hilbert_modular_forms/hmf_stats.py
+++ b/lmfdb/hilbert_modular_forms/hmf_stats.py
@@ -97,7 +97,7 @@
"counts": {F: {"nforms": counts[F],
"maxnorm": nstats[F]["max"],
"field_knowl": nf_display_knowl(F, F),
- "forms": lambda : url_for('hmf.hilbert_modular_form_render_webpage', field_label=F)}
+ "forms": lambda label: url_for('hmf.hilbert_modular_form_render_webpage', field_label=label)}
for F in C["fields_by_degree"][d]}}
for d in C["degrees"]}
return stats
| {"golden_diff": "diff --git a/lmfdb/hilbert_modular_forms/hmf_stats.py b/lmfdb/hilbert_modular_forms/hmf_stats.py\n--- a/lmfdb/hilbert_modular_forms/hmf_stats.py\n+++ b/lmfdb/hilbert_modular_forms/hmf_stats.py\n@@ -97,7 +97,7 @@\n \"counts\": {F: {\"nforms\": counts[F],\n \"maxnorm\": nstats[F][\"max\"],\n \"field_knowl\": nf_display_knowl(F, F),\n- \"forms\": lambda : url_for('hmf.hilbert_modular_form_render_webpage', field_label=F)}\n+ \"forms\": lambda label: url_for('hmf.hilbert_modular_form_render_webpage', field_label=label)}\n for F in C[\"fields_by_degree\"][d]}}\n for d in C[\"degrees\"]}\n return stats\n", "issue": "Bad links on HMF browse pages\nA report from the bug report system, from Paul Gunnells: \r\n\r\n\"On \r\n\r\nhttps://www.lmfdb.org/ModularForm/GL2/TotallyReal/browse/2/ \r\n\r\nthe links in the middle column (the Number of newforms column) are all the same and point to https://www.lmfdb.org/ModularForm/GL2/TotallyReal/?field_label=2.2.497.1 instead of urls the modular form data they're supposed to.\"\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom flask import url_for\nfrom lmfdb import db\nfrom lmfdb.utils import comma\nfrom lmfdb.utils.display_stats import StatsDisplay, proportioners, totaler\nfrom lmfdb.logger import make_logger\nfrom lmfdb.number_fields.web_number_field import nf_display_knowl\nfrom sage.misc.cachefunc import cached_method\n\nlogger = make_logger(\"hmf\")\n\nclass HMFstats(StatsDisplay):\n \"\"\"\n Class for creating and displaying statistics for Hilbert modular forms\n \"\"\"\n def __init__(self):\n self.nforms = db.hmf_forms.count()\n\n table = db.hmf_forms\n baseurl_func = \".hilbert_modular_form_render_webpage\"\n\n stat_list = [\n {'cols': ['level_norm', 'deg'],\n 'totaler': totaler(),\n 'proportioner': proportioners.per_col_total},\n {'cols': ['level_norm', 'dimension'],\n 'totaler': totaler(),\n 'proportioner': proportioners.per_col_total},\n ]\n buckets = {'level_norm': ['1', '2-10', '11-100', '101-1000', '1001-10000'],\n 'dimension': ['1', '2', '3', '4', '5-10', '11-20', '21-100', '101-1000']}\n knowls = {'level_norm': 'mf.hilbert.level_norm',\n 'dimension': 'mf.hilbert.dimension',\n 'deg': 'nf.degree'}\n short_display = {'deg': 'degree'}\n\n @property\n def short_summary(self):\n return self.summary + \" Here are some <a href='%s'>further statistics</a>.\" % (url_for(\".statistics\"),)\n\n @property\n def summary(self):\n hmf_knowl = '<a knowl=\"mf.hilbert\">Hilbert modular forms</a>'\n nf_knowl = '<a knowl=\"nf.totally_real\">totally real number fields</a>'\n deg_knowl = '<a knowl=\"nf.degree\">degree</a>'\n return \"The database currently contains %s %s over %s %s of %s 2 to %s.\" % (comma(self.nforms), hmf_knowl, self.counts()[\"nfields\"], nf_knowl, deg_knowl, self.counts()[\"maxdeg\"])\n\n def degree_summary(self, d):\n stats = self.statistics(d)\n hmf_knowl = '<a knowl=\"mf.hilbert\">Hilbert modular forms</a>'\n nf_knowl = '<a knowl=\"nf.totally_real\">totally real number fields</a>'\n deg_knowl = '<a knowl=\"nf.degree\">degree</a>'\n level_knowl = '<a knowl=\"mf.hilbert.level_norm\">level norm</a>'\n return ''.join([r'The database currently contains %s ' % stats['nforms'],\n hmf_knowl,\n r' defined over %s ' % stats['nfields'],\n nf_knowl,\n r' of %s %s, with ' % (deg_knowl, d),\n level_knowl,\n r' up to %s.' % stats['maxnorm']])\n\n @cached_method\n def counts(self):\n counts = {}\n\n\n counts['nforms'] = self.nforms\n counts['nforms_c'] = comma(self.nforms)\n\n attrs = [\"degree\", \"discriminant\", \"label\"]\n fields = list(db.hmf_fields.search({}, attrs, sort=attrs))\n degrees = sorted(set(F[\"degree\"] for F in fields))\n by_deg = {d: [F for F in fields if F[\"degree\"] == d] for d in degrees}\n counts[\"degrees\"] = degrees\n counts[\"nfields\"] = len(fields)\n counts[\"nfields_c\"] = comma(len(fields))\n counts[\"maxdeg\"] = max(degrees)\n counts[\"max_deg_c\"] = comma(max(degrees))\n counts[\"fields_by_degree\"] = {d : [F[\"label\"] for F in by_deg[d]] for d in degrees}\n counts[\"nfields_by_degree\"] = {d : len(by_deg[d]) for d in degrees}\n counts[\"max_disc_by_degree\"] = {d : max(F[\"discriminant\"] for F in by_deg[d]) for d in degrees}\n return counts\n\n @cached_method\n def statistics(self, d=None):\n if d is not None:\n return self.statistics()[int(d)]\n nstats = db.hmf_forms.stats.numstats(\"level_norm\", \"field_label\")\n counts = db.hmf_forms.stats.column_counts(\"field_label\")\n nstats_by_deg = db.hmf_forms.stats.numstats(\"level_norm\", \"deg\")\n counts_by_deg = db.hmf_forms.stats.column_counts(\"deg\")\n C = self.counts()\n stats = {d: {\"fields\": C[\"fields_by_degree\"][d],\n \"nfields\": C[\"nfields_by_degree\"][d],\n \"nforms\": counts_by_deg[d],\n \"maxnorm\": nstats_by_deg[d][\"max\"],\n \"counts\": {F: {\"nforms\": counts[F],\n \"maxnorm\": nstats[F][\"max\"],\n \"field_knowl\": nf_display_knowl(F, F),\n \"forms\": lambda : url_for('hmf.hilbert_modular_form_render_webpage', field_label=F)}\n for F in C[\"fields_by_degree\"][d]}}\n for d in C[\"degrees\"]}\n return stats\n\n def setup(self, attributes=None, delete=False):\n if attributes is None:\n # Per-degree statistics aren't updated by the normal setup function\n # The assert is for pyflakes\n assert self.statistics()\n super().setup(attributes, delete)\n", "path": "lmfdb/hilbert_modular_forms/hmf_stats.py"}]} | 2,185 | 195 |
gh_patches_debug_62231 | rasdani/github-patches | git_diff | obspy__obspy-1673 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Parsing SEED: 'Date is required.' Warning
Hi,
Each time I want to read a dataless with different periods of time, I have this annoying warning message:
```
from obspy.io.xseed import Parser
from obspy import UTCDateTime
Parser('http://geoscope.ipgp.fr/metadata/G/dataless.G.CAN.seed')
/Users/bonaime/git/obspy/obspy/io/xseed/fields.py:374: UserWarning: Date is required. warnings.warn('Date is required.', UserWarning)
```
Is there a nice way to avoid this warning ? I try that but it is not working
``` code
from obspy.io.xseed import Parser
from obspy import UTCDateTime
Parser('http://geoscope.ipgp.fr/metadata/G/dataless.G.CAN.seed').get_paz('G.CAN.00.BHZ', datetime=UTCDateTime())
```
and the result is
```
/Users/bonaime/git/obspy/obspy/io/xseed/fields.py:374: UserWarning: Date is required.
warnings.warn('Date is required.', UserWarning)
Out[1]:
{u'digitizer_gain': 1677720.0,
u'gain': 1.24658e+17,
u'poles': [(-0.0120768+0.011706j),
(-0.0120768-0.011706j),
(-36.4684+66.8452j),
(-36.4684-66.8452j),
(-29.8656+380.54j),
(-29.8656-380.54j),
(-12145.6+0j),
(-12145.6+0j)],
u'seismometer_gain': 3450.0,
u'sensitivity': 5788280000.0,
u'zeros': [0j, 0j]}
```
</issue>
<code>
[start of obspy/io/xseed/blockette/blockette051.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import (absolute_import, division, print_function,
3 unicode_literals)
4 from future.builtins import * # NOQA
5
6 from .blockette import Blockette
7 from ..fields import Integer, VariableString
8
9
10 class Blockette051(Blockette):
11 """
12 Blockette 051: Station Comment Blockette.
13
14 Sample:
15 05100351992,001~1992,002~0740000000
16 """
17
18 id = 51
19 name = "Station Comment"
20 fields = [
21 VariableString(3, "Beginning effective time", 1, 22, 'T'),
22 VariableString(4, "End effective time", 1, 22, 'T', optional=True),
23 Integer(5, "Comment code key", 4, xpath=31),
24 Integer(6, "Comment level", 6, ignore=True)
25 ]
26
[end of obspy/io/xseed/blockette/blockette051.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/obspy/io/xseed/blockette/blockette051.py b/obspy/io/xseed/blockette/blockette051.py
--- a/obspy/io/xseed/blockette/blockette051.py
+++ b/obspy/io/xseed/blockette/blockette051.py
@@ -19,7 +19,7 @@
name = "Station Comment"
fields = [
VariableString(3, "Beginning effective time", 1, 22, 'T'),
- VariableString(4, "End effective time", 1, 22, 'T', optional=True),
+ VariableString(4, "End effective time", 0, 22, 'T', optional=True),
Integer(5, "Comment code key", 4, xpath=31),
Integer(6, "Comment level", 6, ignore=True)
]
| {"golden_diff": "diff --git a/obspy/io/xseed/blockette/blockette051.py b/obspy/io/xseed/blockette/blockette051.py\n--- a/obspy/io/xseed/blockette/blockette051.py\n+++ b/obspy/io/xseed/blockette/blockette051.py\n@@ -19,7 +19,7 @@\n name = \"Station Comment\"\n fields = [\n VariableString(3, \"Beginning effective time\", 1, 22, 'T'),\n- VariableString(4, \"End effective time\", 1, 22, 'T', optional=True),\n+ VariableString(4, \"End effective time\", 0, 22, 'T', optional=True),\n Integer(5, \"Comment code key\", 4, xpath=31),\n Integer(6, \"Comment level\", 6, ignore=True)\n ]\n", "issue": "Parsing SEED: 'Date is required.' Warning\nHi,\n\nEach time I want to read a dataless with different periods of time, I have this annoying warning message:\n\n```\nfrom obspy.io.xseed import Parser\nfrom obspy import UTCDateTime\nParser('http://geoscope.ipgp.fr/metadata/G/dataless.G.CAN.seed')\n/Users/bonaime/git/obspy/obspy/io/xseed/fields.py:374: UserWarning: Date is required. warnings.warn('Date is required.', UserWarning)\n```\n\nIs there a nice way to avoid this warning ? I try that but it is not working\n\n``` code\nfrom obspy.io.xseed import Parser\nfrom obspy import UTCDateTime\nParser('http://geoscope.ipgp.fr/metadata/G/dataless.G.CAN.seed').get_paz('G.CAN.00.BHZ', datetime=UTCDateTime())\n\n```\n\nand the result is\n\n```\n/Users/bonaime/git/obspy/obspy/io/xseed/fields.py:374: UserWarning: Date is required.\n warnings.warn('Date is required.', UserWarning)\nOut[1]:\n{u'digitizer_gain': 1677720.0,\n u'gain': 1.24658e+17,\n u'poles': [(-0.0120768+0.011706j),\n (-0.0120768-0.011706j),\n (-36.4684+66.8452j),\n (-36.4684-66.8452j),\n (-29.8656+380.54j),\n (-29.8656-380.54j),\n (-12145.6+0j),\n (-12145.6+0j)],\n u'seismometer_gain': 3450.0,\n u'sensitivity': 5788280000.0,\n u'zeros': [0j, 0j]}\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nfrom .blockette import Blockette\nfrom ..fields import Integer, VariableString\n\n\nclass Blockette051(Blockette):\n \"\"\"\n Blockette 051: Station Comment Blockette.\n\n Sample:\n 05100351992,001~1992,002~0740000000\n \"\"\"\n\n id = 51\n name = \"Station Comment\"\n fields = [\n VariableString(3, \"Beginning effective time\", 1, 22, 'T'),\n VariableString(4, \"End effective time\", 1, 22, 'T', optional=True),\n Integer(5, \"Comment code key\", 4, xpath=31),\n Integer(6, \"Comment level\", 6, ignore=True)\n ]\n", "path": "obspy/io/xseed/blockette/blockette051.py"}]} | 1,291 | 197 |
gh_patches_debug_12780 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2017 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E7003 Errors when using Fn::Transform inside a Mapping
*cfn-lint version: 0.49.2*
*Description of issue.*
#2006 tightened what is considered valid for use in a Mapping. This causes it to reject what otherwise appears to be a valid use of `Fn::Transform` as the body of a Mapping.
For example, this snippet is valid CFN:
```yaml
Mappings:
AwsAgentPlatformMap:
Fn::Transform:
Name: AWS::Include
Parameters:
Location: s3://my-bucket-name/version/3.0.1/amazonlinux2/a-json-file.json
```
This usage trips the newly enhanced regex:
```
E7003 Mapping key (Fn::Transform) has invalid name. Name has to be alphanumeric, '-' or '.'
```
</issue>
<code>
[start of src/cfnlint/rules/mappings/KeyName.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6 import six
7 from cfnlint.rules import CloudFormationLintRule
8 from cfnlint.rules import RuleMatch
9 from cfnlint.helpers import REGEX_ALPHANUMERIC
10
11
12 class KeyName(CloudFormationLintRule):
13 """Check if Mapping Keys are type string"""
14 id = 'E7003'
15 shortdesc = 'Mapping keys are strings and alphanumeric'
16 description = 'Check if Mappings keys are properly typed as strings and alphanumeric'
17 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'
18 tags = ['mappings']
19
20 def check_attribute(self, key, path):
21 """ Check the key name for string and alphanumeric"""
22 matches = []
23 if not isinstance(key, six.string_types):
24 message = 'Mapping attribute ({0}) has to be a string.'
25 matches.append(RuleMatch(path[:], message.format(key)))
26 elif not re.match(REGEX_ALPHANUMERIC, key):
27 message = 'Mapping attribute ({0}) has invalid name. Name has to be alphanumeric.'
28 matches.append(RuleMatch(path[:], message.format(key)))
29
30 return matches
31
32 def check_key(self, key, path):
33 """ Check the key name for string and alphanumeric"""
34 matches = []
35 if not isinstance(key, six.string_types):
36 message = 'Mapping key ({0}) has to be a string.'
37 matches.append(RuleMatch(path[:], message.format(key)))
38 elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key):
39 message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \'-\' or \'.\''
40 matches.append(RuleMatch(path[:], message.format(key)))
41
42 return matches
43
44 def match(self, cfn):
45 matches = []
46
47 mappings = cfn.template.get('Mappings', {})
48 for mapping_name, mapping_value in mappings.items():
49 if isinstance(mapping_value, dict):
50 for key_name, key_value in mapping_value.items():
51 matches.extend(self.check_key(
52 key_name, ['Mappings', mapping_name, key_name]))
53 if isinstance(key_value, dict):
54 for sub_key_name, _ in key_value.items():
55 matches.extend(
56 self.check_attribute(
57 sub_key_name, ['Mappings', mapping_name, key_name, sub_key_name]))
58
59 return matches
60
[end of src/cfnlint/rules/mappings/KeyName.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/mappings/KeyName.py b/src/cfnlint/rules/mappings/KeyName.py
--- a/src/cfnlint/rules/mappings/KeyName.py
+++ b/src/cfnlint/rules/mappings/KeyName.py
@@ -35,7 +35,7 @@
if not isinstance(key, six.string_types):
message = 'Mapping key ({0}) has to be a string.'
matches.append(RuleMatch(path[:], message.format(key)))
- elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key):
+ elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key) and key != 'Fn::Transform':
message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \'-\' or \'.\''
matches.append(RuleMatch(path[:], message.format(key)))
| {"golden_diff": "diff --git a/src/cfnlint/rules/mappings/KeyName.py b/src/cfnlint/rules/mappings/KeyName.py\n--- a/src/cfnlint/rules/mappings/KeyName.py\n+++ b/src/cfnlint/rules/mappings/KeyName.py\n@@ -35,7 +35,7 @@\n if not isinstance(key, six.string_types):\n message = 'Mapping key ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n- elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key):\n+ elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key) and key != 'Fn::Transform':\n message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \\'-\\' or \\'.\\''\n matches.append(RuleMatch(path[:], message.format(key)))\n", "issue": "E7003 Errors when using Fn::Transform inside a Mapping\n*cfn-lint version: 0.49.2*\r\n\r\n*Description of issue.*\r\n#2006 tightened what is considered valid for use in a Mapping. This causes it to reject what otherwise appears to be a valid use of `Fn::Transform` as the body of a Mapping.\r\n\r\nFor example, this snippet is valid CFN:\r\n\r\n```yaml\r\nMappings:\r\n AwsAgentPlatformMap:\r\n Fn::Transform:\r\n Name: AWS::Include\r\n Parameters:\r\n Location: s3://my-bucket-name/version/3.0.1/amazonlinux2/a-json-file.json\r\n```\r\n\r\nThis usage trips the newly enhanced regex:\r\n\r\n```\r\nE7003 Mapping key (Fn::Transform) has invalid name. Name has to be alphanumeric, '-' or '.'\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import REGEX_ALPHANUMERIC\n\n\nclass KeyName(CloudFormationLintRule):\n \"\"\"Check if Mapping Keys are type string\"\"\"\n id = 'E7003'\n shortdesc = 'Mapping keys are strings and alphanumeric'\n description = 'Check if Mappings keys are properly typed as strings and alphanumeric'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'\n tags = ['mappings']\n\n def check_attribute(self, key, path):\n \"\"\" Check the key name for string and alphanumeric\"\"\"\n matches = []\n if not isinstance(key, six.string_types):\n message = 'Mapping attribute ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n elif not re.match(REGEX_ALPHANUMERIC, key):\n message = 'Mapping attribute ({0}) has invalid name. Name has to be alphanumeric.'\n matches.append(RuleMatch(path[:], message.format(key)))\n\n return matches\n\n def check_key(self, key, path):\n \"\"\" Check the key name for string and alphanumeric\"\"\"\n matches = []\n if not isinstance(key, six.string_types):\n message = 'Mapping key ({0}) has to be a string.'\n matches.append(RuleMatch(path[:], message.format(key)))\n elif not re.match('^[a-zA-Z0-9.-]{1,255}$', key):\n message = 'Mapping key ({0}) has invalid name. Name has to be alphanumeric, \\'-\\' or \\'.\\''\n matches.append(RuleMatch(path[:], message.format(key)))\n\n return matches\n\n def match(self, cfn):\n matches = []\n\n mappings = cfn.template.get('Mappings', {})\n for mapping_name, mapping_value in mappings.items():\n if isinstance(mapping_value, dict):\n for key_name, key_value in mapping_value.items():\n matches.extend(self.check_key(\n key_name, ['Mappings', mapping_name, key_name]))\n if isinstance(key_value, dict):\n for sub_key_name, _ in key_value.items():\n matches.extend(\n self.check_attribute(\n sub_key_name, ['Mappings', mapping_name, key_name, sub_key_name]))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/KeyName.py"}]} | 1,370 | 200 |
gh_patches_debug_26751 | rasdani/github-patches | git_diff | blaze__blaze-1196 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Blaze server yaml file error
Testing the Blaze server from a file and getting the following error:
``` python
$ blaze-server server.yaml
Traceback (most recent call last):
File "/anaconda/envs/ep-blaze/bin/blaze-server", line 6, in <module>
sys.exit(_main())
File "/anaconda/envs/ep-blaze/lib/python2.7/site-packages/blaze/server/spider.py", line 130, in _main
ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
File "/anaconda/envs/ep-blaze/lib/python2.7/site-packages/blaze/server/spider.py", line 130, in <genexpr>
ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
AttributeError: 'dict' object has no attribute 'Exception'
```
I believe problem is in this line:
https://github.com/ContinuumIO/blaze/blob/06991f6d368f23700019e36b337ea2800f37ab14/blaze/server/spider.py#L130
when no ignored_exception in the args is passed.
</issue>
<code>
[start of blaze/server/spider.py]
1 #!/usr/bin/env python
2
3 from __future__ import absolute_import
4
5 import os
6 import sys
7 import argparse
8
9 import yaml
10
11 from odo import resource
12 from odo.utils import ignoring
13
14 from .server import Server, DEFAULT_PORT
15
16
17 __all__ = 'spider', 'from_yaml'
18
19
20 def _spider(resource_path, ignore, followlinks, hidden):
21 resources = {}
22 for filename in (os.path.join(resource_path, x)
23 for x in os.listdir(resource_path)):
24 basename = os.path.basename(filename)
25 if (basename.startswith(os.curdir) and not hidden or
26 os.path.islink(filename) and not followlinks):
27 continue
28 if os.path.isdir(filename):
29 new_resources = _spider(filename, ignore=ignore,
30 followlinks=followlinks, hidden=hidden)
31 if new_resources:
32 resources[basename] = new_resources
33 else:
34 with ignoring(*ignore):
35 resources[basename] = resource(filename)
36 return resources
37
38
39 def spider(path, ignore=(ValueError, NotImplementedError), followlinks=True,
40 hidden=False):
41 """Traverse a directory and call ``odo.resource`` on its contentso
42
43 Parameters
44 ----------
45 path : str
46 Path to a directory of resources to load
47 ignore : tuple of Exception, optional
48 Ignore these exceptions when calling resource
49 followlinks : bool, optional
50 Follow symbolic links
51 hidden : bool, optional
52 Load hidden files
53
54 Returns
55 -------
56 dict
57 Possibly nested dictionary of containing basenames mapping to resources
58 """
59 return {
60 os.path.basename(path): _spider(path, ignore=ignore,
61 followlinks=followlinks,
62 hidden=hidden)
63 }
64
65
66 def from_yaml(path, ignore=(ValueError, NotImplementedError), followlinks=True,
67 hidden=False):
68 """Construct a dictionary of resources from a YAML specification.
69
70 Parameters
71 ----------
72 path : str
73 Path to a YAML specification of resources to load
74 ignore : tuple of Exception, optional
75 Ignore these exceptions when calling resource
76 followlinks : bool, optional
77 Follow symbolic links
78 hidden : bool, optional
79 Load hidden files
80
81 Returns
82 -------
83 dict
84 A dictionary mapping top level keys in a YAML file to resources.
85
86 See Also
87 --------
88 spider : Traverse a directory tree for resources
89 """
90 resources = {}
91 for name, info in yaml.load(path.read()).items():
92 if 'source' not in info:
93 raise ValueError('source key not found for data source named %r' %
94 name)
95 source = info['source']
96 if os.path.isdir(source):
97 resources[name] = spider(os.path.expanduser(source),
98 ignore=ignore,
99 followlinks=followlinks,
100 hidden=hidden)
101 else:
102 resources[name] = resource(source, dshape=info.get('dshape'))
103 return resources
104
105
106 def _parse_args():
107 p = argparse.ArgumentParser(
108 formatter_class=argparse.ArgumentDefaultsHelpFormatter)
109 p.add_argument('path', type=argparse.FileType('r'), nargs='?',
110 default=sys.stdin,
111 help='A YAML file specifying the resources to load')
112 p.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,
113 help='Port number')
114 p.add_argument('-H', '--host', type=str, default='127.0.0.1',
115 help='Host name. Use 0.0.0.0 to listen on all public IPs')
116 p.add_argument('-l', '--follow-links', action='store_true',
117 help='Follow links when listing files')
118 p.add_argument('-e', '--ignored-exception', nargs='*',
119 default=['Exception'],
120 help='Exceptions to ignore when calling resource on a file')
121 p.add_argument('-d', '--hidden', action='store_true',
122 help='Call resource on hidden files')
123 p.add_argument('-D', '--debug', action='store_true',
124 help='Start the Flask server in debug mode')
125 return p.parse_args()
126
127
128 def _main():
129 args = _parse_args()
130 ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
131 resources = from_yaml(args.path,
132 ignore=ignore,
133 followlinks=args.follow_links,
134 hidden=args.hidden)
135 Server(resources).run(host=args.host, port=args.port, debug=args.debug)
136
137
138 if __name__ == '__main__':
139 _main()
140
[end of blaze/server/spider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/blaze/server/spider.py b/blaze/server/spider.py
--- a/blaze/server/spider.py
+++ b/blaze/server/spider.py
@@ -13,6 +13,11 @@
from .server import Server, DEFAULT_PORT
+try:
+ import __builtin__ as builtins
+except ImportError:
+ import builtins
+
__all__ = 'spider', 'from_yaml'
@@ -115,7 +120,7 @@
help='Host name. Use 0.0.0.0 to listen on all public IPs')
p.add_argument('-l', '--follow-links', action='store_true',
help='Follow links when listing files')
- p.add_argument('-e', '--ignored-exception', nargs='*',
+ p.add_argument('-e', '--ignored-exception', nargs='+',
default=['Exception'],
help='Exceptions to ignore when calling resource on a file')
p.add_argument('-d', '--hidden', action='store_true',
@@ -127,7 +132,7 @@
def _main():
args = _parse_args()
- ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
+ ignore = tuple(getattr(builtins, e) for e in args.ignored_exception)
resources = from_yaml(args.path,
ignore=ignore,
followlinks=args.follow_links,
| {"golden_diff": "diff --git a/blaze/server/spider.py b/blaze/server/spider.py\n--- a/blaze/server/spider.py\n+++ b/blaze/server/spider.py\n@@ -13,6 +13,11 @@\n \n from .server import Server, DEFAULT_PORT\n \n+try:\n+ import __builtin__ as builtins\n+except ImportError:\n+ import builtins\n+\n \n __all__ = 'spider', 'from_yaml'\n \n@@ -115,7 +120,7 @@\n help='Host name. Use 0.0.0.0 to listen on all public IPs')\n p.add_argument('-l', '--follow-links', action='store_true',\n help='Follow links when listing files')\n- p.add_argument('-e', '--ignored-exception', nargs='*',\n+ p.add_argument('-e', '--ignored-exception', nargs='+',\n default=['Exception'],\n help='Exceptions to ignore when calling resource on a file')\n p.add_argument('-d', '--hidden', action='store_true',\n@@ -127,7 +132,7 @@\n \n def _main():\n args = _parse_args()\n- ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)\n+ ignore = tuple(getattr(builtins, e) for e in args.ignored_exception)\n resources = from_yaml(args.path,\n ignore=ignore,\n followlinks=args.follow_links,\n", "issue": "Blaze server yaml file error\nTesting the Blaze server from a file and getting the following error:\n\n``` python\n$ blaze-server server.yaml\nTraceback (most recent call last):\n File \"/anaconda/envs/ep-blaze/bin/blaze-server\", line 6, in <module>\n sys.exit(_main())\n File \"/anaconda/envs/ep-blaze/lib/python2.7/site-packages/blaze/server/spider.py\", line 130, in _main\n ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)\n File \"/anaconda/envs/ep-blaze/lib/python2.7/site-packages/blaze/server/spider.py\", line 130, in <genexpr>\n ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)\nAttributeError: 'dict' object has no attribute 'Exception'\n```\n\nI believe problem is in this line:\nhttps://github.com/ContinuumIO/blaze/blob/06991f6d368f23700019e36b337ea2800f37ab14/blaze/server/spider.py#L130\nwhen no ignored_exception in the args is passed.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport argparse\n\nimport yaml\n\nfrom odo import resource\nfrom odo.utils import ignoring\n\nfrom .server import Server, DEFAULT_PORT\n\n\n__all__ = 'spider', 'from_yaml'\n\n\ndef _spider(resource_path, ignore, followlinks, hidden):\n resources = {}\n for filename in (os.path.join(resource_path, x)\n for x in os.listdir(resource_path)):\n basename = os.path.basename(filename)\n if (basename.startswith(os.curdir) and not hidden or\n os.path.islink(filename) and not followlinks):\n continue\n if os.path.isdir(filename):\n new_resources = _spider(filename, ignore=ignore,\n followlinks=followlinks, hidden=hidden)\n if new_resources:\n resources[basename] = new_resources\n else:\n with ignoring(*ignore):\n resources[basename] = resource(filename)\n return resources\n\n\ndef spider(path, ignore=(ValueError, NotImplementedError), followlinks=True,\n hidden=False):\n \"\"\"Traverse a directory and call ``odo.resource`` on its contentso\n\n Parameters\n ----------\n path : str\n Path to a directory of resources to load\n ignore : tuple of Exception, optional\n Ignore these exceptions when calling resource\n followlinks : bool, optional\n Follow symbolic links\n hidden : bool, optional\n Load hidden files\n\n Returns\n -------\n dict\n Possibly nested dictionary of containing basenames mapping to resources\n \"\"\"\n return {\n os.path.basename(path): _spider(path, ignore=ignore,\n followlinks=followlinks,\n hidden=hidden)\n }\n\n\ndef from_yaml(path, ignore=(ValueError, NotImplementedError), followlinks=True,\n hidden=False):\n \"\"\"Construct a dictionary of resources from a YAML specification.\n\n Parameters\n ----------\n path : str\n Path to a YAML specification of resources to load\n ignore : tuple of Exception, optional\n Ignore these exceptions when calling resource\n followlinks : bool, optional\n Follow symbolic links\n hidden : bool, optional\n Load hidden files\n\n Returns\n -------\n dict\n A dictionary mapping top level keys in a YAML file to resources.\n\n See Also\n --------\n spider : Traverse a directory tree for resources\n \"\"\"\n resources = {}\n for name, info in yaml.load(path.read()).items():\n if 'source' not in info:\n raise ValueError('source key not found for data source named %r' %\n name)\n source = info['source']\n if os.path.isdir(source):\n resources[name] = spider(os.path.expanduser(source),\n ignore=ignore,\n followlinks=followlinks,\n hidden=hidden)\n else:\n resources[name] = resource(source, dshape=info.get('dshape'))\n return resources\n\n\ndef _parse_args():\n p = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n p.add_argument('path', type=argparse.FileType('r'), nargs='?',\n default=sys.stdin,\n help='A YAML file specifying the resources to load')\n p.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,\n help='Port number')\n p.add_argument('-H', '--host', type=str, default='127.0.0.1',\n help='Host name. Use 0.0.0.0 to listen on all public IPs')\n p.add_argument('-l', '--follow-links', action='store_true',\n help='Follow links when listing files')\n p.add_argument('-e', '--ignored-exception', nargs='*',\n default=['Exception'],\n help='Exceptions to ignore when calling resource on a file')\n p.add_argument('-d', '--hidden', action='store_true',\n help='Call resource on hidden files')\n p.add_argument('-D', '--debug', action='store_true',\n help='Start the Flask server in debug mode')\n return p.parse_args()\n\n\ndef _main():\n args = _parse_args()\n ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)\n resources = from_yaml(args.path,\n ignore=ignore,\n followlinks=args.follow_links,\n hidden=args.hidden)\n Server(resources).run(host=args.host, port=args.port, debug=args.debug)\n\n\nif __name__ == '__main__':\n _main()\n", "path": "blaze/server/spider.py"}]} | 2,077 | 312 |
gh_patches_debug_28855 | rasdani/github-patches | git_diff | ultrabug__py3status-2101 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
external_script modifies numeric output
The external_script module converts numeric values to a numeric type. This removes the original formatting of the input and is undesired.
To reproduce create an external script and simply echo "0.123000", the output in the bar will be "0.123".
</issue>
<code>
[start of py3status/modules/external_script.py]
1 """
2 Display output of a given script.
3
4 Display output of any executable script set by `script_path`. Only the first
5 two lines of output will be used. The first line is used as the displayed
6 text. If the output has two or more lines, the second line is set as the text
7 color (and should hence be a valid hex color code such as #FF0000 for red).
8 The script should not have any parameters, but it could work.
9
10 Configuration parameters:
11 button_show_notification: button to show notification with full output
12 (default None)
13 cache_timeout: how often we refresh this module in seconds
14 (default 15)
15 format: see placeholders below (default '{output}')
16 localize: should script output be localized (if available)
17 (default True)
18 script_path: script you want to show output of (compulsory)
19 (default None)
20 strip_output: shall we strip leading and trailing spaces from output
21 (default False)
22
23 Format placeholders:
24 {lines} number of lines in the output
25 {output} output of script given by "script_path"
26
27 Examples:
28 ```
29 external_script {
30 format = "my name is {output}"
31 script_path = "/usr/bin/whoami"
32 }
33 ```
34
35 @author frimdo [email protected]
36
37 SAMPLE OUTPUT
38 {'full_text': 'script output'}
39
40 example
41 {'full_text': 'It is now: Wed Feb 22 22:24:13'}
42 """
43
44 import re
45
46 STRING_ERROR = "missing script_path"
47
48
49 class Py3status:
50 """
51 """
52
53 # available configuration parameters
54 button_show_notification = None
55 cache_timeout = 15
56 format = "{output}"
57 localize = True
58 script_path = None
59 strip_output = False
60
61 def post_config_hook(self):
62 if not self.script_path:
63 raise Exception(STRING_ERROR)
64
65 def external_script(self):
66 output_lines = None
67 response = {}
68 response["cached_until"] = self.py3.time_in(self.cache_timeout)
69 try:
70 self.output = self.py3.command_output(
71 self.script_path, shell=True, localized=self.localize
72 )
73 output_lines = self.output.splitlines()
74 if len(output_lines) > 1:
75 output_color = output_lines[1]
76 if re.search(r"^#[0-9a-fA-F]{6}$", output_color):
77 response["color"] = output_color
78 except self.py3.CommandError as e:
79 # something went wrong show error to user
80 output = e.output or e.error
81 self.py3.error(output)
82
83 if output_lines:
84 output = output_lines[0]
85 if self.strip_output:
86 output = output.strip()
87 # If we get something that looks numeric then we convert it
88 # to a numeric type because this can be helpful. for example:
89 #
90 # external_script {
91 # format = "file is [\?if=output>10 big|small]"
92 # script_path = "cat /tmp/my_file | wc -l"
93 # }
94 try:
95 output = int(output)
96 except ValueError:
97 try:
98 output = float(output)
99 except ValueError:
100 pass
101 else:
102 output = ""
103
104 response["full_text"] = self.py3.safe_format(
105 self.format, {"output": output, "lines": len(output_lines)}
106 )
107 return response
108
109 def on_click(self, event):
110 button = event["button"]
111 if button == self.button_show_notification:
112 self.py3.notify_user(self.output)
113 self.py3.prevent_refresh()
114
115
116 if __name__ == "__main__":
117 """
118 Run module in test mode.
119 """
120 from py3status.module_test import module_test
121
122 module_test(Py3status)
123
[end of py3status/modules/external_script.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py3status/modules/external_script.py b/py3status/modules/external_script.py
--- a/py3status/modules/external_script.py
+++ b/py3status/modules/external_script.py
@@ -12,6 +12,8 @@
(default None)
cache_timeout: how often we refresh this module in seconds
(default 15)
+ convert_numbers: convert decimal numbers to a numeric type
+ (default True)
format: see placeholders below (default '{output}')
localize: should script output be localized (if available)
(default True)
@@ -53,6 +55,7 @@
# available configuration parameters
button_show_notification = None
cache_timeout = 15
+ convert_numbers = True
format = "{output}"
localize = True
script_path = None
@@ -91,13 +94,14 @@
# format = "file is [\?if=output>10 big|small]"
# script_path = "cat /tmp/my_file | wc -l"
# }
- try:
- output = int(output)
- except ValueError:
+ if self.convert_numbers is True:
try:
- output = float(output)
+ output = int(output)
except ValueError:
- pass
+ try:
+ output = float(output)
+ except ValueError:
+ pass
else:
output = ""
| {"golden_diff": "diff --git a/py3status/modules/external_script.py b/py3status/modules/external_script.py\n--- a/py3status/modules/external_script.py\n+++ b/py3status/modules/external_script.py\n@@ -12,6 +12,8 @@\n (default None)\n cache_timeout: how often we refresh this module in seconds\n (default 15)\n+ convert_numbers: convert decimal numbers to a numeric type\n+ (default True)\n format: see placeholders below (default '{output}')\n localize: should script output be localized (if available)\n (default True)\n@@ -53,6 +55,7 @@\n # available configuration parameters\n button_show_notification = None\n cache_timeout = 15\n+ convert_numbers = True\n format = \"{output}\"\n localize = True\n script_path = None\n@@ -91,13 +94,14 @@\n # format = \"file is [\\?if=output>10 big|small]\"\n # script_path = \"cat /tmp/my_file | wc -l\"\n # }\n- try:\n- output = int(output)\n- except ValueError:\n+ if self.convert_numbers is True:\n try:\n- output = float(output)\n+ output = int(output)\n except ValueError:\n- pass\n+ try:\n+ output = float(output)\n+ except ValueError:\n+ pass\n else:\n output = \"\"\n", "issue": "external_script modifies numeric output\nThe external_script module converts numeric values to a numeric type. This removes the original formatting of the input and is undesired.\r\n\r\nTo reproduce create an external script and simply echo \"0.123000\", the output in the bar will be \"0.123\".\n", "before_files": [{"content": "\"\"\"\nDisplay output of a given script.\n\nDisplay output of any executable script set by `script_path`. Only the first\ntwo lines of output will be used. The first line is used as the displayed\ntext. If the output has two or more lines, the second line is set as the text\ncolor (and should hence be a valid hex color code such as #FF0000 for red).\nThe script should not have any parameters, but it could work.\n\nConfiguration parameters:\n button_show_notification: button to show notification with full output\n (default None)\n cache_timeout: how often we refresh this module in seconds\n (default 15)\n format: see placeholders below (default '{output}')\n localize: should script output be localized (if available)\n (default True)\n script_path: script you want to show output of (compulsory)\n (default None)\n strip_output: shall we strip leading and trailing spaces from output\n (default False)\n\nFormat placeholders:\n {lines} number of lines in the output\n {output} output of script given by \"script_path\"\n\nExamples:\n```\nexternal_script {\n format = \"my name is {output}\"\n script_path = \"/usr/bin/whoami\"\n}\n```\n\n@author frimdo [email protected]\n\nSAMPLE OUTPUT\n{'full_text': 'script output'}\n\nexample\n{'full_text': 'It is now: Wed Feb 22 22:24:13'}\n\"\"\"\n\nimport re\n\nSTRING_ERROR = \"missing script_path\"\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n\n # available configuration parameters\n button_show_notification = None\n cache_timeout = 15\n format = \"{output}\"\n localize = True\n script_path = None\n strip_output = False\n\n def post_config_hook(self):\n if not self.script_path:\n raise Exception(STRING_ERROR)\n\n def external_script(self):\n output_lines = None\n response = {}\n response[\"cached_until\"] = self.py3.time_in(self.cache_timeout)\n try:\n self.output = self.py3.command_output(\n self.script_path, shell=True, localized=self.localize\n )\n output_lines = self.output.splitlines()\n if len(output_lines) > 1:\n output_color = output_lines[1]\n if re.search(r\"^#[0-9a-fA-F]{6}$\", output_color):\n response[\"color\"] = output_color\n except self.py3.CommandError as e:\n # something went wrong show error to user\n output = e.output or e.error\n self.py3.error(output)\n\n if output_lines:\n output = output_lines[0]\n if self.strip_output:\n output = output.strip()\n # If we get something that looks numeric then we convert it\n # to a numeric type because this can be helpful. for example:\n #\n # external_script {\n # format = \"file is [\\?if=output>10 big|small]\"\n # script_path = \"cat /tmp/my_file | wc -l\"\n # }\n try:\n output = int(output)\n except ValueError:\n try:\n output = float(output)\n except ValueError:\n pass\n else:\n output = \"\"\n\n response[\"full_text\"] = self.py3.safe_format(\n self.format, {\"output\": output, \"lines\": len(output_lines)}\n )\n return response\n\n def on_click(self, event):\n button = event[\"button\"]\n if button == self.button_show_notification:\n self.py3.notify_user(self.output)\n self.py3.prevent_refresh()\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/external_script.py"}]} | 1,687 | 315 |
gh_patches_debug_6294 | rasdani/github-patches | git_diff | e-valuation__EvaP-1353 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing a backup made by update_production.sh does not work flawlessly.
Last week we wanted to do a production update. The json dump file created during that update could not be imported without issues:
- The dump does not contain the cronjob user, but foreign key references to it. This can not be imported
- The dump contains data included by django by default (auth, permission, ...). These need to be excluded when importing.
There should be some kind of documentation on what needs to be executed to import this dump back into the database. We should also add some test (could probably just run on travis) that ensures this always works (dump, flush database, migrate, load dump).
</issue>
<code>
[start of evap/evaluation/management/commands/dump_testdata.py]
1 import os
2
3 from django.conf import settings
4 from django.core.management.base import BaseCommand
5 from django.core.management import call_command
6
7
8 class Command(BaseCommand):
9 args = ''
10 help = 'Dumps all relevant contents of the database into test_data.json.'
11 requires_migrations_checks = True
12
13 def handle(self, *args, **options):
14 outfile_name = os.path.join(settings.BASE_DIR, "evaluation", "fixtures", "test_data.json")
15 call_command("dumpdata", "auth.group", "evaluation", "rewards", "grades", indent=2, output=outfile_name)
16
[end of evap/evaluation/management/commands/dump_testdata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/management/commands/dump_testdata.py b/evap/evaluation/management/commands/dump_testdata.py
--- a/evap/evaluation/management/commands/dump_testdata.py
+++ b/evap/evaluation/management/commands/dump_testdata.py
@@ -12,4 +12,6 @@
def handle(self, *args, **options):
outfile_name = os.path.join(settings.BASE_DIR, "evaluation", "fixtures", "test_data.json")
- call_command("dumpdata", "auth.group", "evaluation", "rewards", "grades", indent=2, output=outfile_name)
+ call_command(
+ "dumpdata", "auth.group", "evaluation", "rewards", "grades", indent=2,
+ output=outfile_name, natural_foreign=True, natural_primary=True)
| {"golden_diff": "diff --git a/evap/evaluation/management/commands/dump_testdata.py b/evap/evaluation/management/commands/dump_testdata.py\n--- a/evap/evaluation/management/commands/dump_testdata.py\n+++ b/evap/evaluation/management/commands/dump_testdata.py\n@@ -12,4 +12,6 @@\n \n def handle(self, *args, **options):\n outfile_name = os.path.join(settings.BASE_DIR, \"evaluation\", \"fixtures\", \"test_data.json\")\n- call_command(\"dumpdata\", \"auth.group\", \"evaluation\", \"rewards\", \"grades\", indent=2, output=outfile_name)\n+ call_command(\n+ \"dumpdata\", \"auth.group\", \"evaluation\", \"rewards\", \"grades\", indent=2,\n+ output=outfile_name, natural_foreign=True, natural_primary=True)\n", "issue": "Importing a backup made by update_production.sh does not work flawlessly.\nLast week we wanted to do a production update. The json dump file created during that update could not be imported without issues:\r\n- The dump does not contain the cronjob user, but foreign key references to it. This can not be imported\r\n- The dump contains data included by django by default (auth, permission, ...). These need to be excluded when importing.\r\n\r\nThere should be some kind of documentation on what needs to be executed to import this dump back into the database. We should also add some test (could probably just run on travis) that ensures this always works (dump, flush database, migrate, load dump).\n", "before_files": [{"content": "import os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.core.management import call_command\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'Dumps all relevant contents of the database into test_data.json.'\n requires_migrations_checks = True\n\n def handle(self, *args, **options):\n outfile_name = os.path.join(settings.BASE_DIR, \"evaluation\", \"fixtures\", \"test_data.json\")\n call_command(\"dumpdata\", \"auth.group\", \"evaluation\", \"rewards\", \"grades\", indent=2, output=outfile_name)\n", "path": "evap/evaluation/management/commands/dump_testdata.py"}]} | 843 | 189 |
gh_patches_debug_48905 | rasdani/github-patches | git_diff | hylang__hy-1322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The lexer hits the maximum recursion depth given a file with too many comment lines
$ yes ';' | head -n 500 >/tmp/foo.hy
$ hy /tmp/foo.hy
Traceback (most recent call last):
File "/home/hippo/Desktop/hyenv/bin/hy", line 11, in <module>
load_entry_point('hy', 'console_scripts', 'hy')()
File "/home/hippo/Desktop/hyenv/hy/hy/cmdline.py", line 344, in hy_main
sys.exit(cmdline_handler("hy", sys.argv))
File "/home/hippo/Desktop/hyenv/hy/hy/cmdline.py", line 332, in cmdline_handler
return run_file(options.args[0])
File "/home/hippo/Desktop/hyenv/hy/hy/cmdline.py", line 211, in run_file
pretty_error(import_file_to_module, "__main__", filename)
File "/home/hippo/Desktop/hyenv/hy/hy/cmdline.py", line 184, in pretty_error
return func(*args, **kw)
File "/home/hippo/Desktop/hyenv/hy/hy/importer.py", line 95, in import_file_to_module
_ast = import_file_to_ast(fpath, module_name)
File "/home/hippo/Desktop/hyenv/hy/hy/importer.py", line 53, in import_file_to_ast
return hy_compile(import_file_to_hst(fpath), module_name)
File "/home/hippo/Desktop/hyenv/hy/hy/importer.py", line 41, in import_file_to_hst
return import_buffer_to_hst(f.read())
File "/home/hippo/Desktop/hyenv/hy/hy/importer.py", line 34, in import_buffer_to_hst
return tokenize(buf + "\n")
File "/home/hippo/Desktop/hyenv/hy/hy/lex/__init__.py", line 17, in tokenize
return parser.parse(lexer.lex(buf))
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/parser.py", line 32, in parse
lookahead = next(tokenizer)
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 56, in __next__
return self.next()
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 41, in next
return self.next()
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 41, in next
return self.next()
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 41, in next
return self.next()
[Previous line repeated 976 more times]
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py", line 38, in next
match = rule.matches(self.s, self.idx)
File "/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexergenerator.py", line 33, in matches
return Match(*m.span(0)) if m is not None else None
RecursionError: maximum recursion depth exceeded
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # Copyright 2017 the authors.
3 # This file is part of Hy, which is free software licensed under the Expat
4 # license. See the LICENSE.
5
6 import sys, os
7
8 from setuptools import find_packages, setup
9 from setuptools.command.install import install
10
11 from get_version import __version__
12
13 os.chdir(os.path.split(os.path.abspath(__file__))[0])
14
15 PKG = "hy"
16
17 long_description = """Hy is a Python <--> Lisp layer. It helps
18 make things work nicer, and lets Python and the Hy lisp variant play
19 nice together. """
20
21 class Install(install):
22 def run(self):
23 # Import each Hy module to ensure it's compiled.
24 import os, importlib
25 for dirpath, _, filenames in sorted(os.walk("hy")):
26 for filename in sorted(filenames):
27 if filename.endswith(".hy"):
28 importlib.import_module(
29 dirpath.replace("/", ".").replace("\\", ".") +
30 "." + filename[:-len(".hy")])
31 install.run(self)
32
33 install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
34 if os.name == 'nt':
35 install_requires.append('pyreadline>=2.1')
36
37 ver = sys.version_info[0]
38
39 setup(
40 name=PKG,
41 version=__version__,
42 install_requires=install_requires,
43 cmdclass=dict(install=Install),
44 entry_points={
45 'console_scripts': [
46 'hy = hy.cmdline:hy_main',
47 'hy%d = hy.cmdline:hy_main' % ver,
48 'hyc = hy.cmdline:hyc_main',
49 'hyc%d = hy.cmdline:hyc_main' % ver,
50 'hy2py = hy.cmdline:hy2py_main',
51 'hy2py%d = hy.cmdline:hy2py_main' % ver,
52 ]
53 },
54 packages=find_packages(exclude=['tests*']),
55 package_data={
56 'hy.contrib': ['*.hy', '__pycache__/*'],
57 'hy.core': ['*.hy', '__pycache__/*'],
58 'hy.extra': ['*.hy', '__pycache__/*'],
59 },
60 data_files=[
61 ('get_version', ['get_version.py'])
62 ],
63 author="Paul Tagliamonte",
64 author_email="[email protected]",
65 long_description=long_description,
66 description='Lisp and Python love each other.',
67 license="Expat",
68 url="http://hylang.org/",
69 platforms=['any'],
70 classifiers=[
71 "Development Status :: 4 - Beta",
72 "Intended Audience :: Developers",
73 "License :: DFSG approved",
74 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
75 "Operating System :: OS Independent",
76 "Programming Language :: Lisp",
77 "Programming Language :: Python",
78 "Programming Language :: Python :: 2",
79 "Programming Language :: Python :: 2.7",
80 "Programming Language :: Python :: 3",
81 "Programming Language :: Python :: 3.3",
82 "Programming Language :: Python :: 3.4",
83 "Programming Language :: Python :: 3.5",
84 "Programming Language :: Python :: 3.6",
85 "Topic :: Software Development :: Code Generators",
86 "Topic :: Software Development :: Compilers",
87 "Topic :: Software Development :: Libraries",
88 ]
89 )
90
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@
"." + filename[:-len(".hy")])
install.run(self)
-install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
+install_requires = ['rply>=0.7.5', 'astor>=0.5', 'clint>=0.4']
if os.name == 'nt':
install_requires.append('pyreadline>=2.1')
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n \".\" + filename[:-len(\".hy\")])\n install.run(self)\n \n-install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\n+install_requires = ['rply>=0.7.5', 'astor>=0.5', 'clint>=0.4']\n if os.name == 'nt':\n install_requires.append('pyreadline>=2.1')\n", "issue": "The lexer hits the maximum recursion depth given a file with too many comment lines\n $ yes ';' | head -n 500 >/tmp/foo.hy\r\n $ hy /tmp/foo.hy\r\n Traceback (most recent call last):\r\n File \"/home/hippo/Desktop/hyenv/bin/hy\", line 11, in <module>\r\n load_entry_point('hy', 'console_scripts', 'hy')()\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/cmdline.py\", line 344, in hy_main\r\n sys.exit(cmdline_handler(\"hy\", sys.argv))\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/cmdline.py\", line 332, in cmdline_handler\r\n return run_file(options.args[0])\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/cmdline.py\", line 211, in run_file\r\n pretty_error(import_file_to_module, \"__main__\", filename)\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/cmdline.py\", line 184, in pretty_error\r\n return func(*args, **kw)\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/importer.py\", line 95, in import_file_to_module\r\n _ast = import_file_to_ast(fpath, module_name)\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/importer.py\", line 53, in import_file_to_ast\r\n return hy_compile(import_file_to_hst(fpath), module_name)\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/importer.py\", line 41, in import_file_to_hst\r\n return import_buffer_to_hst(f.read())\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/importer.py\", line 34, in import_buffer_to_hst\r\n return tokenize(buf + \"\\n\")\r\n File \"/home/hippo/Desktop/hyenv/hy/hy/lex/__init__.py\", line 17, in tokenize\r\n return parser.parse(lexer.lex(buf))\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/parser.py\", line 32, in parse\r\n lookahead = next(tokenizer)\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 56, in __next__\r\n return self.next()\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 41, in next\r\n return self.next()\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 41, in next\r\n return self.next()\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 41, in next\r\n return self.next()\r\n [Previous line repeated 976 more times]\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexer.py\", line 38, in next\r\n match = rule.matches(self.s, self.idx)\r\n File \"/home/hippo/Desktop/hyenv/lib/python3.6/site-packages/rply/lexergenerator.py\", line 33, in matches\r\n return Match(*m.span(0)) if m is not None else None\r\n RecursionError: maximum recursion depth exceeded\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2017 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport sys, os\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\n\nfrom get_version import __version__\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nclass Install(install):\n def run(self):\n # Import each Hy module to ensure it's compiled.\n import os, importlib\n for dirpath, _, filenames in sorted(os.walk(\"hy\")):\n for filename in sorted(filenames):\n if filename.endswith(\".hy\"):\n importlib.import_module(\n dirpath.replace(\"/\", \".\").replace(\"\\\\\", \".\") +\n \".\" + filename[:-len(\".hy\")])\n install.run(self)\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif os.name == 'nt':\n install_requires.append('pyreadline>=2.1')\n\nver = sys.version_info[0]\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n cmdclass=dict(install=Install),\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy', '__pycache__/*'],\n 'hy.core': ['*.hy', '__pycache__/*'],\n 'hy.extra': ['*.hy', '__pycache__/*'],\n },\n data_files=[\n ('get_version', ['get_version.py'])\n ],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}]} | 2,195 | 127 |
gh_patches_debug_1061 | rasdani/github-patches | git_diff | kymatio__kymatio-352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ENH+TST find a way of testing GPU code
With not too much investment in 💲 💰 it should be possible to set up a `jenkins` testing suite on amazon aws: The idea is to have a micro machine that costs 1c/h run the jenkins server. When tests should be run, this should somehow spawn a couple of GPU machines with different GPUs, ideally as spot instances, run the tests and then shut them down again.
I looked into this at the very beginning of `kymatio`, but I don't really know how to set this up yet. If anybody has experience with this, feel free to try! :)
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import csv
5 import importlib
6 import os
7 import shutil
8 import sys
9 from setuptools import setup, find_packages
10
11 # Constants
12 DISTNAME = 'kymatio'
13 DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'
14 URL = 'https://www.kymat.io'
15 LICENSE = 'BSD-3-Clause'
16
17
18 # Parse description
19 with open('README.md') as f:
20 README = f.read().split('\n')
21 LONG_DESCRIPTION = '\n'.join([x for x in README if not x[:3]=='[!['])
22
23
24 # Parse version.py
25 kymatio_version_spec = importlib.util.spec_from_file_location(
26 'kymatio_version', 'kymatio/version.py')
27 kymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)
28 kymatio_version_spec.loader.exec_module(kymatio_version_module)
29 VERSION = kymatio_version_module.version
30
31
32 # Parse requirements.txt
33 with open('requirements.txt', 'r') as f:
34 REQUIREMENTS = f.read().split('\n')
35
36
37 setup_info = dict(
38 # Metadata
39 name=DISTNAME,
40 version=VERSION,
41 author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '
42 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '
43 'Louis Thiry, Vincent Lostanlen, Joakim Andén, '
44 'Tomás Angles, Gabriel Huang, Roberto Leonarduzzi'),
45 author_email=('[email protected], [email protected], '
46 '[email protected], [email protected], '
47 '[email protected], [email protected], '
48 '[email protected], [email protected], [email protected], '
49 '[email protected], [email protected], [email protected]'),
50 url=URL,
51 download_url='https://github.com/kymatio/kymatio/releases',
52 project_urls={
53 'Documentation': 'https://www.kymat.io/codereference.html',
54 'Source': 'https://github.com/kymatio/kymatio/',
55 'Tracker': 'https://github.com/kymatio/kymatio/issues',
56 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'
57 },
58 classifiers=['Intended Audience :: Education',
59 'Intended Audience :: Science/Research',
60 'License :: OSI Approved :: BSD License',
61 'Natural Language :: English',
62 'Operating System :: MacOS',
63 'Operating System :: POSIX :: Linux',
64 'Programming Language :: Python :: 3.5',
65 'Programming Language :: Python :: 3.6',
66 'Programming Language :: Python :: 3.7',
67 'Programming Language :: Python :: 3.8',
68 'Topic :: Multimedia :: Graphics :: 3D Modeling',
69 'Topic :: Multimedia :: Sound/Audio :: Analysis',
70 'Topic :: Scientific/Engineering :: Artificial Intelligence',
71 'Topic :: Scientific/Engineering :: Chemistry',
72 'Topic :: Scientific/Engineering :: Image Recognition',
73 'Topic :: Scientific/Engineering :: Information Analysis',
74 'Topic :: Scientific/Engineering :: Mathematics',
75 'Topic :: Scientific/Engineering :: Physics',
76 'Topic :: Software Development :: Libraries :: Python Modules',
77 ],
78 description=DESCRIPTION,
79 long_description=LONG_DESCRIPTION,
80 long_description_content_type='text/markdown',
81 python_requires='>=3.5',
82 license=LICENSE,
83 packages=find_packages(exclude=('test',)),
84 install_requires=REQUIREMENTS,
85 zip_safe=True,
86 )
87
88 setup(**setup_info)
89
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
# Parse description
-with open('README.md') as f:
+with open('README.md', encoding='utf8') as f:
README = f.read().split('\n')
LONG_DESCRIPTION = '\n'.join([x for x in README if not x[:3]=='[!['])
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n \n \n # Parse description\n-with open('README.md') as f:\n+with open('README.md', encoding='utf8') as f:\n README = f.read().split('\\n')\n LONG_DESCRIPTION = '\\n'.join([x for x in README if not x[:3]=='[!['])\n", "issue": "ENH+TST find a way of testing GPU code\nWith not too much investment in \ud83d\udcb2 \ud83d\udcb0 it should be possible to set up a `jenkins` testing suite on amazon aws: The idea is to have a micro machine that costs 1c/h run the jenkins server. When tests should be run, this should somehow spawn a couple of GPU machines with different GPUs, ideally as spot instances, run the tests and then shut them down again.\r\nI looked into this at the very beginning of `kymatio`, but I don't really know how to set this up yet. If anybody has experience with this, feel free to try! :)\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport importlib\nimport os\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages\n\n# Constants\nDISTNAME = 'kymatio'\nDESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\nURL = 'https://www.kymat.io'\nLICENSE = 'BSD-3-Clause'\n\n\n# Parse description\nwith open('README.md') as f:\n README = f.read().split('\\n')\n LONG_DESCRIPTION = '\\n'.join([x for x in README if not x[:3]=='[!['])\n\n\n# Parse version.py\nkymatio_version_spec = importlib.util.spec_from_file_location(\n 'kymatio_version', 'kymatio/version.py')\nkymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)\nkymatio_version_spec.loader.exec_module(kymatio_version_module)\nVERSION = kymatio_version_module.version\n\n\n# Parse requirements.txt\nwith open('requirements.txt', 'r') as f:\n REQUIREMENTS = f.read().split('\\n')\n\n\nsetup_info = dict(\n # Metadata\n name=DISTNAME,\n version=VERSION,\n author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '\n 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '\n 'Louis Thiry, Vincent Lostanlen, Joakim And\u00e9n, '\n 'Tom\u00e1s Angles, Gabriel Huang, Roberto Leonarduzzi'),\n author_email=('[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], [email protected], '\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n project_urls={\n 'Documentation': 'https://www.kymat.io/codereference.html',\n 'Source': 'https://github.com/kymatio/kymatio/',\n 'Tracker': 'https://github.com/kymatio/kymatio/issues',\n 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'\n },\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling',\n 'Topic :: Multimedia :: Sound/Audio :: Analysis',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n python_requires='>=3.5',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n zip_safe=True,\n)\n\nsetup(**setup_info)\n", "path": "setup.py"}]} | 1,670 | 94 |
gh_patches_debug_6301 | rasdani/github-patches | git_diff | azavea__raster-vision-1235 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Predictor does not reset the scene's aoi_geometries and the raster source's extent_crop
Currently, the `Predictor` re-uses a `SceneConfig` from the pipeline config in the bundle (instead of creating a new one) and resets its `label_source` and `aoi_uris`.
https://github.com/azavea/raster-vision/blob/master/rastervision_core/rastervision/core/predictor.py#L70-L71
However, it should also do this for `raster_source.extent_crop` (#1030) and `aoi_geometries` (#1033). In general, it should be done for every field that cannot be safely assumed to be the same for the input scene.
Instead of having to add to this every time something new is added to the `SceneConfig` or any of its member classes, it might be better to create a new scene in the predictor with options from the command line.
</issue>
<code>
[start of rastervision_core/rastervision/core/predictor.py]
1 from os.path import join
2 import zipfile
3 import logging
4
5 from rastervision.pipeline import rv_config
6 from rastervision.pipeline.config import (build_config, upgrade_config)
7 from rastervision.pipeline.file_system.utils import (download_if_needed,
8 make_dir, file_to_json)
9 from rastervision.core.data.raster_source import ChannelOrderError
10 from rastervision.core.analyzer import StatsAnalyzerConfig
11
12 log = logging.getLogger(__name__)
13
14
15 class Predictor():
16 """Class for making predictions based off of a model bundle."""
17
18 def __init__(self,
19 model_bundle_uri,
20 tmp_dir,
21 update_stats=False,
22 channel_order=None):
23 """Creates a new Predictor.
24
25 Args:
26 model_bundle_uri: URI of the model bundle to use. Can be any
27 type of URI that Raster Vision can read.
28 tmp_dir: Temporary directory in which to store files that are used
29 by the Predictor. This directory is not cleaned up by this
30 class.
31 channel_order: Option for a new channel order to use for the
32 imagery being predicted against. If not present, the
33 channel_order from the original configuration in the predict
34 package will be used.
35 """
36 self.tmp_dir = tmp_dir
37 self.update_stats = update_stats
38 self.model_loaded = False
39
40 bundle_path = download_if_needed(model_bundle_uri, tmp_dir)
41 bundle_dir = join(tmp_dir, 'bundle')
42 make_dir(bundle_dir)
43 with zipfile.ZipFile(bundle_path, 'r') as bundle_zip:
44 bundle_zip.extractall(path=bundle_dir)
45
46 config_path = join(bundle_dir, 'pipeline-config.json')
47 config_dict = file_to_json(config_path)
48 rv_config.set_everett_config(
49 config_overrides=config_dict.get('rv_config'))
50 config_dict = upgrade_config(config_dict)
51 self.config = build_config(config_dict)
52 self.scene = self.config.dataset.validation_scenes[0]
53
54 if not hasattr(self.scene.raster_source, 'uris'):
55 raise Exception(
56 'raster_source in model bundle must have uris as field')
57
58 if not hasattr(self.scene.label_store, 'uri'):
59 raise Exception(
60 'label_store in model bundle must have uri as field')
61
62 for t in self.scene.raster_source.transformers:
63 t.update_root(bundle_dir)
64
65 if self.update_stats:
66 stats_analyzer = StatsAnalyzerConfig(
67 output_uri=join(bundle_dir, 'stats.json'))
68 self.config.analyzers = [stats_analyzer]
69
70 self.scene.label_source = None
71 self.scene.aoi_uris = None
72 self.config.dataset.train_scenes = [self.scene]
73 self.config.dataset.validation_scenes = [self.scene]
74 self.config.dataset.test_scenes = []
75 self.config.train_uri = bundle_dir
76
77 if channel_order is not None:
78 self.scene.raster_source.channel_order = channel_order
79
80 self.pipeline = None
81
82 def predict(self, image_uris, label_uri, vector_label_uri=None):
83 """Generate predictions for the given image.
84
85 Args:
86 image_uris: URIs of the images to make predictions against.
87 This can be any type of URI readable by Raster Vision
88 FileSystems.
89 label_uri: URI to save labels off into
90 vector_label_uri: URI to save vectorized labels for semantic segmentation
91 model bundles that support it
92 """
93 if self.pipeline is None:
94 self.scene.raster_source.uris = image_uris
95 self.pipeline = self.config.build(self.tmp_dir)
96 if not hasattr(self.pipeline, 'predict'):
97 raise Exception(
98 'pipeline in model bundle must have predict method')
99
100 try:
101 self.scene.raster_source.uris = image_uris
102 self.scene.label_store.uri = label_uri
103 if (hasattr(self.scene.label_store, 'vector_output')
104 and self.scene.label_store.vector_output):
105 if vector_label_uri:
106 for vo in self.scene.label_store.vector_output:
107 vo.uri = join(
108 vector_label_uri, '{}-{}.json'.format(
109 vo.class_id, vo.get_mode()))
110 else:
111 self.scene.label_store.vector_output = []
112 elif vector_label_uri:
113 log.warn(
114 'vector_label_uri was supplied but this model bundle does not '
115 'generate vector labels.')
116
117 if self.update_stats:
118 self.pipeline.analyze()
119 self.pipeline.predict()
120 except ChannelOrderError:
121 raise ValueError(
122 'The predict package is using a channel_order '
123 'with channels unavailable in the imagery.\nTo set a new '
124 'channel_order that only uses channels available in the '
125 'imagery, use the --channel-order option.')
126
[end of rastervision_core/rastervision/core/predictor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rastervision_core/rastervision/core/predictor.py b/rastervision_core/rastervision/core/predictor.py
--- a/rastervision_core/rastervision/core/predictor.py
+++ b/rastervision_core/rastervision/core/predictor.py
@@ -69,6 +69,9 @@
self.scene.label_source = None
self.scene.aoi_uris = None
+ self.scene.aoi_geometries = None
+ self.scene.raster_source.extent_crop = None
+
self.config.dataset.train_scenes = [self.scene]
self.config.dataset.validation_scenes = [self.scene]
self.config.dataset.test_scenes = []
| {"golden_diff": "diff --git a/rastervision_core/rastervision/core/predictor.py b/rastervision_core/rastervision/core/predictor.py\n--- a/rastervision_core/rastervision/core/predictor.py\n+++ b/rastervision_core/rastervision/core/predictor.py\n@@ -69,6 +69,9 @@\n \n self.scene.label_source = None\n self.scene.aoi_uris = None\n+ self.scene.aoi_geometries = None\n+ self.scene.raster_source.extent_crop = None\n+\n self.config.dataset.train_scenes = [self.scene]\n self.config.dataset.validation_scenes = [self.scene]\n self.config.dataset.test_scenes = []\n", "issue": "Predictor does not reset the scene's aoi_geometries and the raster source's extent_crop\nCurrently, the `Predictor` re-uses a `SceneConfig` from the pipeline config in the bundle (instead of creating a new one) and resets its `label_source` and `aoi_uris`.\r\nhttps://github.com/azavea/raster-vision/blob/master/rastervision_core/rastervision/core/predictor.py#L70-L71\r\n\r\nHowever, it should also do this for `raster_source.extent_crop` (#1030) and `aoi_geometries` (#1033). In general, it should be done for every field that cannot be safely assumed to be the same for the input scene.\r\n\r\nInstead of having to add to this every time something new is added to the `SceneConfig` or any of its member classes, it might be better to create a new scene in the predictor with options from the command line.\n", "before_files": [{"content": "from os.path import join\nimport zipfile\nimport logging\n\nfrom rastervision.pipeline import rv_config\nfrom rastervision.pipeline.config import (build_config, upgrade_config)\nfrom rastervision.pipeline.file_system.utils import (download_if_needed,\n make_dir, file_to_json)\nfrom rastervision.core.data.raster_source import ChannelOrderError\nfrom rastervision.core.analyzer import StatsAnalyzerConfig\n\nlog = logging.getLogger(__name__)\n\n\nclass Predictor():\n \"\"\"Class for making predictions based off of a model bundle.\"\"\"\n\n def __init__(self,\n model_bundle_uri,\n tmp_dir,\n update_stats=False,\n channel_order=None):\n \"\"\"Creates a new Predictor.\n\n Args:\n model_bundle_uri: URI of the model bundle to use. Can be any\n type of URI that Raster Vision can read.\n tmp_dir: Temporary directory in which to store files that are used\n by the Predictor. This directory is not cleaned up by this\n class.\n channel_order: Option for a new channel order to use for the\n imagery being predicted against. If not present, the\n channel_order from the original configuration in the predict\n package will be used.\n \"\"\"\n self.tmp_dir = tmp_dir\n self.update_stats = update_stats\n self.model_loaded = False\n\n bundle_path = download_if_needed(model_bundle_uri, tmp_dir)\n bundle_dir = join(tmp_dir, 'bundle')\n make_dir(bundle_dir)\n with zipfile.ZipFile(bundle_path, 'r') as bundle_zip:\n bundle_zip.extractall(path=bundle_dir)\n\n config_path = join(bundle_dir, 'pipeline-config.json')\n config_dict = file_to_json(config_path)\n rv_config.set_everett_config(\n config_overrides=config_dict.get('rv_config'))\n config_dict = upgrade_config(config_dict)\n self.config = build_config(config_dict)\n self.scene = self.config.dataset.validation_scenes[0]\n\n if not hasattr(self.scene.raster_source, 'uris'):\n raise Exception(\n 'raster_source in model bundle must have uris as field')\n\n if not hasattr(self.scene.label_store, 'uri'):\n raise Exception(\n 'label_store in model bundle must have uri as field')\n\n for t in self.scene.raster_source.transformers:\n t.update_root(bundle_dir)\n\n if self.update_stats:\n stats_analyzer = StatsAnalyzerConfig(\n output_uri=join(bundle_dir, 'stats.json'))\n self.config.analyzers = [stats_analyzer]\n\n self.scene.label_source = None\n self.scene.aoi_uris = None\n self.config.dataset.train_scenes = [self.scene]\n self.config.dataset.validation_scenes = [self.scene]\n self.config.dataset.test_scenes = []\n self.config.train_uri = bundle_dir\n\n if channel_order is not None:\n self.scene.raster_source.channel_order = channel_order\n\n self.pipeline = None\n\n def predict(self, image_uris, label_uri, vector_label_uri=None):\n \"\"\"Generate predictions for the given image.\n\n Args:\n image_uris: URIs of the images to make predictions against.\n This can be any type of URI readable by Raster Vision\n FileSystems.\n label_uri: URI to save labels off into\n vector_label_uri: URI to save vectorized labels for semantic segmentation\n model bundles that support it\n \"\"\"\n if self.pipeline is None:\n self.scene.raster_source.uris = image_uris\n self.pipeline = self.config.build(self.tmp_dir)\n if not hasattr(self.pipeline, 'predict'):\n raise Exception(\n 'pipeline in model bundle must have predict method')\n\n try:\n self.scene.raster_source.uris = image_uris\n self.scene.label_store.uri = label_uri\n if (hasattr(self.scene.label_store, 'vector_output')\n and self.scene.label_store.vector_output):\n if vector_label_uri:\n for vo in self.scene.label_store.vector_output:\n vo.uri = join(\n vector_label_uri, '{}-{}.json'.format(\n vo.class_id, vo.get_mode()))\n else:\n self.scene.label_store.vector_output = []\n elif vector_label_uri:\n log.warn(\n 'vector_label_uri was supplied but this model bundle does not '\n 'generate vector labels.')\n\n if self.update_stats:\n self.pipeline.analyze()\n self.pipeline.predict()\n except ChannelOrderError:\n raise ValueError(\n 'The predict package is using a channel_order '\n 'with channels unavailable in the imagery.\\nTo set a new '\n 'channel_order that only uses channels available in the '\n 'imagery, use the --channel-order option.')\n", "path": "rastervision_core/rastervision/core/predictor.py"}]} | 2,026 | 159 |
gh_patches_debug_17813 | rasdani/github-patches | git_diff | translate__pootle-4679 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Snippet caching is not cleared between tests
Currently if you run a test that saves data in the exports cache, the data is still there in the next test
</issue>
<code>
[start of pytest_pootle/fixtures/site.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import tempfile
10
11 import pytest
12
13 from pytest_pootle.env import PootleTestEnv
14
15
16 @pytest.fixture(autouse=True, scope='session')
17 def setup_db_if_needed(request):
18 """Sets up the site DB only if tests requested to use the DB (autouse)."""
19 is_db_marker_set = [
20 item for item in request.node.items
21 if item.get_marker('django_db')
22 ]
23 if is_db_marker_set:
24 return request.getfuncargvalue('post_db_setup')
25
26 return None
27
28
29 @pytest.fixture(scope='session')
30 def post_db_setup(translations_directory, _django_db_setup,
31 _django_cursor_wrapper, request):
32 """Sets up the site DB for the test session."""
33 with _django_cursor_wrapper:
34 PootleTestEnv(request).setup()
35
36
37 @pytest.fixture
38 def no_projects():
39 from pootle_project.models import Project
40
41 Project.objects.all().delete()
42
43
44 @pytest.fixture
45 def no_permissions():
46 from django.contrib.auth.models import Permission
47
48 Permission.objects.all().delete()
49
50
51 @pytest.fixture
52 def no_permission_sets():
53 from pootle_app.models import PermissionSet
54
55 PermissionSet.objects.all().delete()
56
57
58 @pytest.fixture
59 def no_submissions():
60 from pootle_statistics.models import Submission
61
62 Submission.objects.all().delete()
63
64
65 @pytest.fixture
66 def no_users():
67 from django.contrib.auth import get_user_model
68
69 User = get_user_model()
70 User.objects.all().delete()
71
72
73 @pytest.fixture
74 def no_extra_users():
75 from django.contrib.auth import get_user_model
76
77 User = get_user_model()
78 User.objects.exclude(
79 username__in=["system", "default", "nobody"]).delete()
80
81
82 @pytest.fixture(autouse=True, scope="session")
83 def translations_directory(request):
84 """used by PootleEnv"""
85 from django.conf import settings
86 settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()
87
[end of pytest_pootle/fixtures/site.py]
[start of pytest_pootle/fixtures/revision.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import pytest
11
12
13 @pytest.fixture(autouse=True)
14 def revision():
15 """Sets up the revision counter for each test call."""
16 from pootle.core.models import Revision
17
18 Revision.initialize()
19
[end of pytest_pootle/fixtures/revision.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytest_pootle/fixtures/revision.py b/pytest_pootle/fixtures/revision.py
--- a/pytest_pootle/fixtures/revision.py
+++ b/pytest_pootle/fixtures/revision.py
@@ -11,8 +11,12 @@
@pytest.fixture(autouse=True)
-def revision():
- """Sets up the revision counter for each test call."""
+def revision(request, clear_cache):
+ """Sets up the cached revision counter for each test call."""
from pootle.core.models import Revision
+ from pootle_store.models import Unit
- Revision.initialize()
+ if request.node.get_marker("django_db"):
+ Revision.set(Unit.max_revision())
+ else:
+ Revision.initialize()
diff --git a/pytest_pootle/fixtures/site.py b/pytest_pootle/fixtures/site.py
--- a/pytest_pootle/fixtures/site.py
+++ b/pytest_pootle/fixtures/site.py
@@ -84,3 +84,13 @@
"""used by PootleEnv"""
from django.conf import settings
settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()
+
+
[email protected](autouse=True)
+def clear_cache(request):
+ """Currently tests only use one cache so this clears all"""
+
+ from django_redis import get_redis_connection
+
+ r_con = get_redis_connection('default')
+ r_con.flushdb()
| {"golden_diff": "diff --git a/pytest_pootle/fixtures/revision.py b/pytest_pootle/fixtures/revision.py\n--- a/pytest_pootle/fixtures/revision.py\n+++ b/pytest_pootle/fixtures/revision.py\n@@ -11,8 +11,12 @@\n \n \n @pytest.fixture(autouse=True)\n-def revision():\n- \"\"\"Sets up the revision counter for each test call.\"\"\"\n+def revision(request, clear_cache):\n+ \"\"\"Sets up the cached revision counter for each test call.\"\"\"\n from pootle.core.models import Revision\n+ from pootle_store.models import Unit\n \n- Revision.initialize()\n+ if request.node.get_marker(\"django_db\"):\n+ Revision.set(Unit.max_revision())\n+ else:\n+ Revision.initialize()\ndiff --git a/pytest_pootle/fixtures/site.py b/pytest_pootle/fixtures/site.py\n--- a/pytest_pootle/fixtures/site.py\n+++ b/pytest_pootle/fixtures/site.py\n@@ -84,3 +84,13 @@\n \"\"\"used by PootleEnv\"\"\"\n from django.conf import settings\n settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()\n+\n+\[email protected](autouse=True)\n+def clear_cache(request):\n+ \"\"\"Currently tests only use one cache so this clears all\"\"\"\n+\n+ from django_redis import get_redis_connection\n+\n+ r_con = get_redis_connection('default')\n+ r_con.flushdb()\n", "issue": "Snippet caching is not cleared between tests\nCurrently if you run a test that saves data in the exports cache, the data is still there in the next test\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport tempfile\n\nimport pytest\n\nfrom pytest_pootle.env import PootleTestEnv\n\n\[email protected](autouse=True, scope='session')\ndef setup_db_if_needed(request):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n is_db_marker_set = [\n item for item in request.node.items\n if item.get_marker('django_db')\n ]\n if is_db_marker_set:\n return request.getfuncargvalue('post_db_setup')\n\n return None\n\n\[email protected](scope='session')\ndef post_db_setup(translations_directory, _django_db_setup,\n _django_cursor_wrapper, request):\n \"\"\"Sets up the site DB for the test session.\"\"\"\n with _django_cursor_wrapper:\n PootleTestEnv(request).setup()\n\n\[email protected]\ndef no_projects():\n from pootle_project.models import Project\n\n Project.objects.all().delete()\n\n\[email protected]\ndef no_permissions():\n from django.contrib.auth.models import Permission\n\n Permission.objects.all().delete()\n\n\[email protected]\ndef no_permission_sets():\n from pootle_app.models import PermissionSet\n\n PermissionSet.objects.all().delete()\n\n\[email protected]\ndef no_submissions():\n from pootle_statistics.models import Submission\n\n Submission.objects.all().delete()\n\n\[email protected]\ndef no_users():\n from django.contrib.auth import get_user_model\n\n User = get_user_model()\n User.objects.all().delete()\n\n\[email protected]\ndef no_extra_users():\n from django.contrib.auth import get_user_model\n\n User = get_user_model()\n User.objects.exclude(\n username__in=[\"system\", \"default\", \"nobody\"]).delete()\n\n\[email protected](autouse=True, scope=\"session\")\ndef translations_directory(request):\n \"\"\"used by PootleEnv\"\"\"\n from django.conf import settings\n settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()\n", "path": "pytest_pootle/fixtures/site.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport pytest\n\n\[email protected](autouse=True)\ndef revision():\n \"\"\"Sets up the revision counter for each test call.\"\"\"\n from pootle.core.models import Revision\n\n Revision.initialize()\n", "path": "pytest_pootle/fixtures/revision.py"}]} | 1,399 | 310 |
gh_patches_debug_18334 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1295 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Modify noxfile to build and test the package
Versions `2.0.0` and `2.0.1` were yanked from PyPI last week due to an issue where discovery documents were not included in the published package causing `discovery.build()` to fail(#1214). A basic check could be added to verify the package works correctly using the steps in #1214. Ideally it should be done on every PR and push to master so the issue can be caught before the package is published.
Use these steps from #1214 to re-produce the issue with version `2.0.0` and `2.0.1`:
1. Start with a clean clone of `google-api-python-client`
2. Checkout version `2.0.0` or `2.0.1`, using `git checkout 2.0.0`
3. Run `python setup.py sdist`
4. Run `pip install dist/google-api-python-client-<version>.tar.gz`
5. Run
```
$ python3
Python 3.8.7 (default, Jan 27 2021, 18:44:05)
[GCC 10.2.1 20201224] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from googleapiclient import discovery
>>> client = discovery.build("cloudprofiler", "v2")
...
```
Before closing this issue, we should ensure that we have checks in place so that a PR will fail if `package_data` [here](https://github.com/googleapis/google-api-python-client/blob/master/setup.py#L78) is empty.
</issue>
<code>
[start of noxfile.py]
1
2 # Copyright 2020 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import sys
17
18 import nox
19
20 test_dependencies = [
21 "django>=2.0.0",
22 "google-auth",
23 "google-auth-httplib2",
24 "mox",
25 "parameterized",
26 "pyopenssl",
27 "pytest",
28 "pytest-cov",
29 "webtest",
30 "coverage",
31 "unittest2",
32 "mock",
33 ]
34
35
36 @nox.session(python=["3.7"])
37 def lint(session):
38 session.install("flake8")
39 session.run(
40 "flake8",
41 "googleapiclient",
42 "tests",
43 "--count",
44 "--select=E9,F63,F7,F82",
45 "--show-source",
46 "--statistics",
47 )
48
49
50 @nox.session(python=["3.6", "3.7", "3.8", "3.9"])
51 @nox.parametrize(
52 "oauth2client",
53 [
54 "oauth2client<2dev",
55 "oauth2client>=2,<=3dev",
56 "oauth2client>=3,<=4dev",
57 "oauth2client>=4,<=5dev",
58 ],
59 )
60 def unit(session, oauth2client):
61 session.install(*test_dependencies)
62 session.install(oauth2client)
63 session.install('.')
64
65 # Run py.test against the unit tests.
66 session.run(
67 "py.test",
68 "--quiet",
69 "--cov=googleapiclient",
70 "--cov=tests",
71 "--cov-append",
72 "--cov-config=.coveragerc",
73 "--cov-report=",
74 "--cov-fail-under=85",
75 "tests",
76 *session.posargs,
77 )
78
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -16,6 +16,8 @@
import sys
import nox
+import os
+import shutil
test_dependencies = [
"django>=2.0.0",
@@ -58,9 +60,22 @@
],
)
def unit(session, oauth2client):
+ # Clean up dist and build folders
+ shutil.rmtree('dist', ignore_errors=True)
+ shutil.rmtree('build', ignore_errors=True)
+
session.install(*test_dependencies)
session.install(oauth2client)
- session.install('.')
+
+ # Create and install wheels
+ session.run('python3', 'setup.py', 'bdist_wheel')
+ session.install(os.path.join('dist', os.listdir('dist').pop()))
+
+ # Run tests from a different directory to test the package artifacts
+ root_dir = os.path.dirname(os.path.realpath(__file__))
+ temp_dir = session.create_tmp()
+ session.chdir(temp_dir)
+ shutil.copytree(os.path.join(root_dir, 'tests'), 'tests')
# Run py.test against the unit tests.
session.run(
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -16,6 +16,8 @@\n import sys\n \n import nox\n+import os\n+import shutil\n \n test_dependencies = [\n \"django>=2.0.0\",\n@@ -58,9 +60,22 @@\n ],\n )\n def unit(session, oauth2client):\n+ # Clean up dist and build folders\n+ shutil.rmtree('dist', ignore_errors=True)\n+ shutil.rmtree('build', ignore_errors=True)\n+\n session.install(*test_dependencies)\n session.install(oauth2client)\n- session.install('.')\n+\n+ # Create and install wheels\n+ session.run('python3', 'setup.py', 'bdist_wheel')\n+ session.install(os.path.join('dist', os.listdir('dist').pop()))\n+\n+ # Run tests from a different directory to test the package artifacts\n+ root_dir = os.path.dirname(os.path.realpath(__file__))\n+ temp_dir = session.create_tmp()\n+ session.chdir(temp_dir)\n+ shutil.copytree(os.path.join(root_dir, 'tests'), 'tests')\n \n # Run py.test against the unit tests.\n session.run(\n", "issue": "Modify noxfile to build and test the package\nVersions `2.0.0` and `2.0.1` were yanked from PyPI last week due to an issue where discovery documents were not included in the published package causing `discovery.build()` to fail(#1214). A basic check could be added to verify the package works correctly using the steps in #1214. Ideally it should be done on every PR and push to master so the issue can be caught before the package is published. \r\n\r\nUse these steps from #1214 to re-produce the issue with version `2.0.0` and `2.0.1`:\r\n1. Start with a clean clone of `google-api-python-client`\r\n2. Checkout version `2.0.0` or `2.0.1`, using `git checkout 2.0.0`\r\n3. Run `python setup.py sdist`\r\n4. Run `pip install dist/google-api-python-client-<version>.tar.gz`\r\n5. Run \r\n```\r\n$ python3\r\nPython 3.8.7 (default, Jan 27 2021, 18:44:05) \r\n[GCC 10.2.1 20201224] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> from googleapiclient import discovery\r\n>>> client = discovery.build(\"cloudprofiler\", \"v2\")\r\n...\r\n```\r\n\r\nBefore closing this issue, we should ensure that we have checks in place so that a PR will fail if `package_data` [here](https://github.com/googleapis/google-api-python-client/blob/master/setup.py#L78) is empty.\r\n\n", "before_files": [{"content": "\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport nox\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"unittest2\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n session.install(*test_dependencies)\n session.install(oauth2client)\n session.install('.')\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n", "path": "noxfile.py"}]} | 1,535 | 267 |
gh_patches_debug_8112 | rasdani/github-patches | git_diff | opendatacube__datacube-core-941 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError for LocalConfig during replication
### Expected behaviour
SSHTunnel for replication doesn't open due to AttributeError. I have tried to understand the replication process and ran also a separate script that failed with same error.
### Actual behaviour
```
AttributeError: 'LocalConfig' object has no attribute 'db_hostname'
```
### Steps to reproduce the behaviour
```python
from paramiko import SSHClient, WarningPolicy
from sshtunnel import SSHTunnelForwarder
from datacube.config import LocalConfig, _DEFAULT_CONF
from configparser import ConfigParser
client = SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(WarningPolicy())
client.connect(hostname="host", username="user")
sftp = client.open_sftp()
remote_config = ConfigParser()
remote_config.read_string(_DEFAULT_CONF)
with sftp.open('.datacube.conf') as fin:
remote_config.read_file(fin)
config = LocalConfig(remote_config)
print(config.db_hostname)
client.close()
```
Same with
```
datacube-simple-replica
```
### Environment information
* Which ``datacube --version`` are you using? 1.7
* What datacube deployment/enviornment are you running against? Conda environment + postgres docker db
</issue>
<code>
[start of datacube_apps/simple_replica.py]
1 #!/usr/bin/env python
2 """
3 A Simple Data Cube Replication Tool
4
5 Connects to a remote Data Cube via SSH, and downloads database records and files to a local file system and database.
6
7 Provide a configuration file in ~/.datacube.replication.conf in YAML format, or specify an alternate location
8 on the command line.
9
10 For example, the following config will download 3 PQ products for the specified time and space range. Queries
11 are specified the same as when using the API to search for datasets.
12
13 .. code-block:: yaml
14
15 remote_host: raijin.nci.org.auo
16 remote_user: dra547
17 db_password: xxxxxxxxxxxx
18 remote_dir: /g/data/
19 local_dir: C:/datacube/
20
21 replicated_data:
22 - product: ls5_pq_albers
23 crs: EPSG:3577
24 x: [1200000, 1300000]
25 y: [-4200000, -4300000]
26 time: [2008-01-01, 2010-01-01]
27
28 - product: ls7_pq_albers
29 crs: EPSG:3577
30 x: [1200000, 1300000]
31 y: [-4200000, -4300000]
32 time: [2008-01-01, 2010-01-01]
33
34 - product: ls8_pq_albers
35 crs: EPSG:3577
36 x: [1200000, 1300000]
37 y: [-4200000, -4300000]
38 time: [2008-01-01, 2010-01-01]
39
40 """
41
42 import logging
43 import os.path
44 from configparser import ConfigParser
45 from pathlib import Path
46
47 import click
48 import yaml
49 from paramiko import SSHClient, WarningPolicy
50 from sshtunnel import SSHTunnelForwarder
51 from tqdm import tqdm
52
53 from datacube import Datacube
54 from datacube.config import LocalConfig, _DEFAULT_CONF
55 from datacube.index import index_connect
56 from datacube.ui.click import global_cli_options
57
58 LOG = logging.getLogger('simple_replicator')
59
60 DEFAULT_REPLICATION_CONFIG = os.path.expanduser('~/.datacube.replication.conf')
61
62
63 def uri_to_path(uri):
64 return uri.replace('file://', '')
65
66
67 class DatacubeReplicator(object):
68 def __init__(self, config):
69 self.remote_host = config['remote_host']
70 self.remote_user = config['remote_user']
71 self.db_password = config['db_password']
72 self.remote_dir = config['remote_dir']
73 self.local_dir = config['local_dir']
74 self.replication_defns = config['replicated_data']
75
76 self.client = None
77 self.sftp = None
78 self.tunnel = None
79 self.remote_dc_config = None
80 self.remote_dc = None
81 self.local_index = index_connect()
82
83 def run(self):
84 self.connect()
85 self.read_remote_config()
86 self.connect_to_db()
87 self.replicate_all()
88 self.disconnect()
89
90 def connect(self):
91 client = SSHClient()
92 client.load_system_host_keys()
93 client.set_missing_host_key_policy(WarningPolicy())
94 client.connect(hostname=self.remote_host, username=self.remote_user)
95
96 LOG.debug(client)
97 self.client = client
98 self.sftp = client.open_sftp()
99
100 def disconnect(self):
101 self.client.close()
102 self.tunnel.stop()
103
104 def read_remote_config(self):
105 remote_config = ConfigParser()
106 remote_config.read_string(_DEFAULT_CONF)
107 with self.sftp.open('.datacube.conf') as fin:
108 remote_config.read_file(fin)
109 self.remote_dc_config = LocalConfig(remote_config)
110
111 def connect_to_db(self):
112 self.tunnel = SSHTunnelForwarder(
113 self.remote_host,
114 ssh_username=self.remote_user,
115 remote_bind_address=(self.remote_dc_config.db_hostname, int(self.remote_dc_config.db_port)))
116 self.tunnel.start()
117
118 # pylint: disable=protected-access
119 self.remote_dc_config._config['datacube']['db_hostname'] = '127.0.0.1'
120 self.remote_dc_config._config['datacube']['db_port'] = str(self.tunnel.local_bind_port)
121 self.remote_dc_config._config['datacube']['db_username'] = self.remote_user
122 self.remote_dc_config._config['datacube']['db_password'] = self.db_password
123
124 # This requires the password from somewhere
125 # Parsing it out of .pgpass sounds error prone and fragile
126 # Lets put it in the configuration for now
127 LOG.debug('Remote configuration loaded %s', self.remote_dc_config)
128
129 self.remote_dc = Datacube(config=self.remote_dc_config)
130
131 def replicate_all(self):
132
133 for defn in tqdm(self.replication_defns, 'Replicating products'):
134 self.replicate(defn)
135
136 def replicate_all_products(self):
137 products = self.remote_dc.index.products.get_all()
138 for product in products:
139 self.local_index.products.add(product)
140
141 def replicate(self, defn):
142 datasets = list(self.remote_dc.find_datasets(**defn))
143
144 if not datasets:
145 LOG.info('No remote datasets found matching %s', defn)
146 return
147
148 # TODO: use generator not list
149 product = datasets[0].type
150 LOG.info('Ensuring remote product is in local index. %s', product)
151
152 self.local_index.products.add(product)
153
154 for dataset in tqdm(datasets, 'Datasets'):
155 # dataset = remote_dc.index.datasets.get(dataset.id, include_sources=True)
156 # We would need to pull the parent products down too
157 # TODO: Include parent source datasets + product definitions
158 dataset.sources = {}
159
160 LOG.debug('Replicating dataset %s', dataset)
161 remote_path = uri_to_path(dataset.local_uri)
162 local_path = self.remote_to_local(uri_to_path(dataset.local_uri))
163
164 # Ensure local path exists
165 Path(local_path).parent.mkdir(parents=True, exist_ok=True)
166
167 # Download file
168 self.sftp.get(remote_path, local_path)
169
170 # Add to local index
171 dataset.local_uri = 'file://' + local_path
172 self.local_index.datasets.add(dataset)
173 LOG.debug('Downloaded to %s', local_path)
174
175 def remote_to_local(self, remote):
176 return remote.replace(self.remote_dir, self.local_dir)
177
178
179 def replicate_data(config):
180 replicator = DatacubeReplicator(config)
181 replicator.run()
182
183
184 @click.command(help=__doc__)
185 @click.argument('config_path', required=False)
186 @global_cli_options
187 def replicate(config_path):
188 """
189 Connect to a remote Datacube, and replicate data locally.
190 """
191 if config_path is None:
192 config_path = DEFAULT_REPLICATION_CONFIG
193 LOG.debug('Config path: %s', config_path)
194 with open(config_path) as fin:
195 config = yaml.load(fin)
196
197 replicate_data(config)
198
199
200 if __name__ == '__main__':
201 replicate()
202
[end of datacube_apps/simple_replica.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/datacube_apps/simple_replica.py b/datacube_apps/simple_replica.py
--- a/datacube_apps/simple_replica.py
+++ b/datacube_apps/simple_replica.py
@@ -112,7 +112,8 @@
self.tunnel = SSHTunnelForwarder(
self.remote_host,
ssh_username=self.remote_user,
- remote_bind_address=(self.remote_dc_config.db_hostname, int(self.remote_dc_config.db_port)))
+ remote_bind_address=(self.remote_dc_config.get('db_hostname', '127.0.0.1'),
+ int(self.remote_dc_config.get('db_port', 5432))))
self.tunnel.start()
# pylint: disable=protected-access
| {"golden_diff": "diff --git a/datacube_apps/simple_replica.py b/datacube_apps/simple_replica.py\n--- a/datacube_apps/simple_replica.py\n+++ b/datacube_apps/simple_replica.py\n@@ -112,7 +112,8 @@\n self.tunnel = SSHTunnelForwarder(\n self.remote_host,\n ssh_username=self.remote_user,\n- remote_bind_address=(self.remote_dc_config.db_hostname, int(self.remote_dc_config.db_port)))\n+ remote_bind_address=(self.remote_dc_config.get('db_hostname', '127.0.0.1'),\n+ int(self.remote_dc_config.get('db_port', 5432))))\n self.tunnel.start()\n \n # pylint: disable=protected-access\n", "issue": "AttributeError for LocalConfig during replication\n### Expected behaviour\r\nSSHTunnel for replication doesn't open due to AttributeError. I have tried to understand the replication process and ran also a separate script that failed with same error. \r\n\r\n### Actual behaviour\r\n```\r\nAttributeError: 'LocalConfig' object has no attribute 'db_hostname'\r\n```\r\n\r\n### Steps to reproduce the behaviour\r\n\r\n```python\r\nfrom paramiko import SSHClient, WarningPolicy\r\nfrom sshtunnel import SSHTunnelForwarder\r\nfrom datacube.config import LocalConfig, _DEFAULT_CONF\r\nfrom configparser import ConfigParser\r\n\r\nclient = SSHClient()\r\nclient.load_system_host_keys()\r\nclient.set_missing_host_key_policy(WarningPolicy())\r\nclient.connect(hostname=\"host\", username=\"user\")\r\nsftp = client.open_sftp()\r\n\r\n\r\nremote_config = ConfigParser()\r\nremote_config.read_string(_DEFAULT_CONF)\r\nwith sftp.open('.datacube.conf') as fin:\r\n remote_config.read_file(fin)\r\n\r\nconfig = LocalConfig(remote_config)\r\nprint(config.db_hostname)\r\n\r\nclient.close()\r\n```\r\nSame with\r\n```\r\ndatacube-simple-replica\r\n```\r\n\r\n### Environment information\r\n\r\n* Which ``datacube --version`` are you using? 1.7\r\n* What datacube deployment/enviornment are you running against? Conda environment + postgres docker db\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nA Simple Data Cube Replication Tool\n\nConnects to a remote Data Cube via SSH, and downloads database records and files to a local file system and database.\n\nProvide a configuration file in ~/.datacube.replication.conf in YAML format, or specify an alternate location\non the command line.\n\nFor example, the following config will download 3 PQ products for the specified time and space range. Queries\nare specified the same as when using the API to search for datasets.\n\n.. code-block:: yaml\n\n remote_host: raijin.nci.org.auo\n remote_user: dra547\n db_password: xxxxxxxxxxxx\n remote_dir: /g/data/\n local_dir: C:/datacube/\n\n replicated_data:\n - product: ls5_pq_albers\n crs: EPSG:3577\n x: [1200000, 1300000]\n y: [-4200000, -4300000]\n time: [2008-01-01, 2010-01-01]\n\n - product: ls7_pq_albers\n crs: EPSG:3577\n x: [1200000, 1300000]\n y: [-4200000, -4300000]\n time: [2008-01-01, 2010-01-01]\n\n - product: ls8_pq_albers\n crs: EPSG:3577\n x: [1200000, 1300000]\n y: [-4200000, -4300000]\n time: [2008-01-01, 2010-01-01]\n\n\"\"\"\n\nimport logging\nimport os.path\nfrom configparser import ConfigParser\nfrom pathlib import Path\n\nimport click\nimport yaml\nfrom paramiko import SSHClient, WarningPolicy\nfrom sshtunnel import SSHTunnelForwarder\nfrom tqdm import tqdm\n\nfrom datacube import Datacube\nfrom datacube.config import LocalConfig, _DEFAULT_CONF\nfrom datacube.index import index_connect\nfrom datacube.ui.click import global_cli_options\n\nLOG = logging.getLogger('simple_replicator')\n\nDEFAULT_REPLICATION_CONFIG = os.path.expanduser('~/.datacube.replication.conf')\n\n\ndef uri_to_path(uri):\n return uri.replace('file://', '')\n\n\nclass DatacubeReplicator(object):\n def __init__(self, config):\n self.remote_host = config['remote_host']\n self.remote_user = config['remote_user']\n self.db_password = config['db_password']\n self.remote_dir = config['remote_dir']\n self.local_dir = config['local_dir']\n self.replication_defns = config['replicated_data']\n\n self.client = None\n self.sftp = None\n self.tunnel = None\n self.remote_dc_config = None\n self.remote_dc = None\n self.local_index = index_connect()\n\n def run(self):\n self.connect()\n self.read_remote_config()\n self.connect_to_db()\n self.replicate_all()\n self.disconnect()\n\n def connect(self):\n client = SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(WarningPolicy())\n client.connect(hostname=self.remote_host, username=self.remote_user)\n\n LOG.debug(client)\n self.client = client\n self.sftp = client.open_sftp()\n\n def disconnect(self):\n self.client.close()\n self.tunnel.stop()\n\n def read_remote_config(self):\n remote_config = ConfigParser()\n remote_config.read_string(_DEFAULT_CONF)\n with self.sftp.open('.datacube.conf') as fin:\n remote_config.read_file(fin)\n self.remote_dc_config = LocalConfig(remote_config)\n\n def connect_to_db(self):\n self.tunnel = SSHTunnelForwarder(\n self.remote_host,\n ssh_username=self.remote_user,\n remote_bind_address=(self.remote_dc_config.db_hostname, int(self.remote_dc_config.db_port)))\n self.tunnel.start()\n\n # pylint: disable=protected-access\n self.remote_dc_config._config['datacube']['db_hostname'] = '127.0.0.1'\n self.remote_dc_config._config['datacube']['db_port'] = str(self.tunnel.local_bind_port)\n self.remote_dc_config._config['datacube']['db_username'] = self.remote_user\n self.remote_dc_config._config['datacube']['db_password'] = self.db_password\n\n # This requires the password from somewhere\n # Parsing it out of .pgpass sounds error prone and fragile\n # Lets put it in the configuration for now\n LOG.debug('Remote configuration loaded %s', self.remote_dc_config)\n\n self.remote_dc = Datacube(config=self.remote_dc_config)\n\n def replicate_all(self):\n\n for defn in tqdm(self.replication_defns, 'Replicating products'):\n self.replicate(defn)\n\n def replicate_all_products(self):\n products = self.remote_dc.index.products.get_all()\n for product in products:\n self.local_index.products.add(product)\n\n def replicate(self, defn):\n datasets = list(self.remote_dc.find_datasets(**defn))\n\n if not datasets:\n LOG.info('No remote datasets found matching %s', defn)\n return\n\n # TODO: use generator not list\n product = datasets[0].type\n LOG.info('Ensuring remote product is in local index. %s', product)\n\n self.local_index.products.add(product)\n\n for dataset in tqdm(datasets, 'Datasets'):\n # dataset = remote_dc.index.datasets.get(dataset.id, include_sources=True)\n # We would need to pull the parent products down too\n # TODO: Include parent source datasets + product definitions\n dataset.sources = {}\n\n LOG.debug('Replicating dataset %s', dataset)\n remote_path = uri_to_path(dataset.local_uri)\n local_path = self.remote_to_local(uri_to_path(dataset.local_uri))\n\n # Ensure local path exists\n Path(local_path).parent.mkdir(parents=True, exist_ok=True)\n\n # Download file\n self.sftp.get(remote_path, local_path)\n\n # Add to local index\n dataset.local_uri = 'file://' + local_path\n self.local_index.datasets.add(dataset)\n LOG.debug('Downloaded to %s', local_path)\n\n def remote_to_local(self, remote):\n return remote.replace(self.remote_dir, self.local_dir)\n\n\ndef replicate_data(config):\n replicator = DatacubeReplicator(config)\n replicator.run()\n\n\[email protected](help=__doc__)\[email protected]('config_path', required=False)\n@global_cli_options\ndef replicate(config_path):\n \"\"\"\n Connect to a remote Datacube, and replicate data locally.\n \"\"\"\n if config_path is None:\n config_path = DEFAULT_REPLICATION_CONFIG\n LOG.debug('Config path: %s', config_path)\n with open(config_path) as fin:\n config = yaml.load(fin)\n\n replicate_data(config)\n\n\nif __name__ == '__main__':\n replicate()\n", "path": "datacube_apps/simple_replica.py"}]} | 2,889 | 158 |
gh_patches_debug_24814 | rasdani/github-patches | git_diff | coala__coala-bears-1276 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The bear HaskellLintBear raised an exception
I've used HaskellLintBear to linting https://github.com/wisn/elm-reactor/
Here is the log
https://travis-ci.org/wisn/elm-reactor/builds/180417562
The build result is green, but the bear HaskellLintBear raised an exception.
It seems HaskellLintBear have a problem
```
[WARNING][14:56:00] Bear HaskellLintBear failed to run. Take a look at debug messages (`-V`) for further information.
```
I've collected the traceback information:
```
Traceback (most recent call last):
File "/coala-bears/bears/haskell/HaskellLintBear.py", line 41, in process_output
assert issue['startLine'] == issue['endLine']
AssertionError
File "/coala-bears/bears/haskell/HaskellLintBear.py", line 45, in process_output
newline = line_to_change.replace(issue['from'], issue['to'])
TypeError: Can't convert 'NoneType' object to str implicitly
```
I think `TypeError: Can't convert 'NoneType' object to str implicitly` is the main problem.
Then, followed by `AssertionError`.
Unfortunately, I can't trace manually with `hlint` because my PC freezes when compiling (in installing) it. Hope this information will be helpful. Thanks and sorry for my bad English...
</issue>
<code>
[start of bears/haskell/HaskellLintBear.py]
1 import json
2
3 from coalib.bearlib.abstractions.Linter import linter
4 from dependency_management.requirements.DistributionRequirement import (
5 DistributionRequirement)
6 from coalib.results.Diff import Diff
7 from coalib.results.Result import Result
8 from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
9
10
11 @linter(executable='hlint')
12 class HaskellLintBear:
13 """
14 Check Haskell code for possible problems. This bear can propose patches for
15 using alternative functions, simplifying code and removing redundancies.
16
17 See <http://community.haskell.org/~ndm/darcs/hlint/hlint.htm> for more
18 information.
19 """
20
21 LANGUAGES = {'Haskell'}
22 REQUIREMENTS = {DistributionRequirement(apt_get='hlint')}
23 AUTHORS = {'The coala developers'}
24 AUTHORS_EMAILS = {'[email protected]'}
25 LICENSE = 'AGPL-3.0'
26 CAN_DETECT = {'Duplication'}
27 CAN_FIX = {'Unused Code', 'Code Simplification'}
28
29 severity_map = {'Error': RESULT_SEVERITY.MAJOR,
30 'Warning': RESULT_SEVERITY.NORMAL,
31 'Suggestion': RESULT_SEVERITY.INFO}
32
33 @staticmethod
34 def create_arguments(filename, file, config_file):
35 return '--json', filename
36
37 def process_output(self, output, filename, file):
38 output = json.loads(output)
39
40 for issue in output:
41 assert issue['startLine'] == issue['endLine']
42 diff = Diff(file)
43 line_nr = issue['startLine']
44 line_to_change = file[line_nr-1]
45 newline = line_to_change.replace(issue['from'], issue['to'])
46 diff.change_line(line_nr, line_to_change, newline)
47
48 yield Result.from_values(
49 origin=self,
50 message=issue['hint'],
51 file=filename,
52 severity=self.severity_map[issue['severity']],
53 line=issue['startLine'],
54 diffs={filename: diff})
55
[end of bears/haskell/HaskellLintBear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bears/haskell/HaskellLintBear.py b/bears/haskell/HaskellLintBear.py
--- a/bears/haskell/HaskellLintBear.py
+++ b/bears/haskell/HaskellLintBear.py
@@ -38,11 +38,15 @@
output = json.loads(output)
for issue in output:
- assert issue['startLine'] == issue['endLine']
diff = Diff(file)
+ from_lines = issue['from'].splitlines()
+ to_lines = issue['to'].splitlines()
+ assert len(from_lines) == len(to_lines)
+ for other_lines in range(1, len(from_lines)):
+ assert from_lines[other_lines] == to_lines[other_lines]
line_nr = issue['startLine']
line_to_change = file[line_nr-1]
- newline = line_to_change.replace(issue['from'], issue['to'])
+ newline = line_to_change.replace(from_lines[0], to_lines[0])
diff.change_line(line_nr, line_to_change, newline)
yield Result.from_values(
@@ -51,4 +55,7 @@
file=filename,
severity=self.severity_map[issue['severity']],
line=issue['startLine'],
+ column=issue['startColumn'],
+ end_line=issue['endLine'],
+ end_column=issue['endColumn'],
diffs={filename: diff})
| {"golden_diff": "diff --git a/bears/haskell/HaskellLintBear.py b/bears/haskell/HaskellLintBear.py\n--- a/bears/haskell/HaskellLintBear.py\n+++ b/bears/haskell/HaskellLintBear.py\n@@ -38,11 +38,15 @@\n output = json.loads(output)\n \n for issue in output:\n- assert issue['startLine'] == issue['endLine']\n diff = Diff(file)\n+ from_lines = issue['from'].splitlines()\n+ to_lines = issue['to'].splitlines()\n+ assert len(from_lines) == len(to_lines)\n+ for other_lines in range(1, len(from_lines)):\n+ assert from_lines[other_lines] == to_lines[other_lines]\n line_nr = issue['startLine']\n line_to_change = file[line_nr-1]\n- newline = line_to_change.replace(issue['from'], issue['to'])\n+ newline = line_to_change.replace(from_lines[0], to_lines[0])\n diff.change_line(line_nr, line_to_change, newline)\n \n yield Result.from_values(\n@@ -51,4 +55,7 @@\n file=filename,\n severity=self.severity_map[issue['severity']],\n line=issue['startLine'],\n+ column=issue['startColumn'],\n+ end_line=issue['endLine'],\n+ end_column=issue['endColumn'],\n diffs={filename: diff})\n", "issue": "The bear HaskellLintBear raised an exception\nI've used HaskellLintBear to linting https://github.com/wisn/elm-reactor/\r\n\r\nHere is the log\r\nhttps://travis-ci.org/wisn/elm-reactor/builds/180417562\r\n\r\nThe build result is green, but the bear HaskellLintBear raised an exception.\r\n\r\nIt seems HaskellLintBear have a problem\r\n```\r\n[WARNING][14:56:00] Bear HaskellLintBear failed to run. Take a look at debug messages (`-V`) for further information.\r\n```\r\n\r\nI've collected the traceback information:\r\n```\r\nTraceback (most recent call last):\r\n File \"/coala-bears/bears/haskell/HaskellLintBear.py\", line 41, in process_output\r\n assert issue['startLine'] == issue['endLine']\r\n AssertionError\r\n\r\n File \"/coala-bears/bears/haskell/HaskellLintBear.py\", line 45, in process_output\r\n newline = line_to_change.replace(issue['from'], issue['to'])\r\n TypeError: Can't convert 'NoneType' object to str implicitly\r\n```\r\n\r\nI think `TypeError: Can't convert 'NoneType' object to str implicitly` is the main problem.\r\nThen, followed by `AssertionError`.\r\n\r\nUnfortunately, I can't trace manually with `hlint` because my PC freezes when compiling (in installing) it. Hope this information will be helpful. Thanks and sorry for my bad English...\n", "before_files": [{"content": "import json\n\nfrom coalib.bearlib.abstractions.Linter import linter\nfrom dependency_management.requirements.DistributionRequirement import (\n DistributionRequirement)\nfrom coalib.results.Diff import Diff\nfrom coalib.results.Result import Result\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\n\n\n@linter(executable='hlint')\nclass HaskellLintBear:\n \"\"\"\n Check Haskell code for possible problems. This bear can propose patches for\n using alternative functions, simplifying code and removing redundancies.\n\n See <http://community.haskell.org/~ndm/darcs/hlint/hlint.htm> for more\n information.\n \"\"\"\n\n LANGUAGES = {'Haskell'}\n REQUIREMENTS = {DistributionRequirement(apt_get='hlint')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_DETECT = {'Duplication'}\n CAN_FIX = {'Unused Code', 'Code Simplification'}\n\n severity_map = {'Error': RESULT_SEVERITY.MAJOR,\n 'Warning': RESULT_SEVERITY.NORMAL,\n 'Suggestion': RESULT_SEVERITY.INFO}\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return '--json', filename\n\n def process_output(self, output, filename, file):\n output = json.loads(output)\n\n for issue in output:\n assert issue['startLine'] == issue['endLine']\n diff = Diff(file)\n line_nr = issue['startLine']\n line_to_change = file[line_nr-1]\n newline = line_to_change.replace(issue['from'], issue['to'])\n diff.change_line(line_nr, line_to_change, newline)\n\n yield Result.from_values(\n origin=self,\n message=issue['hint'],\n file=filename,\n severity=self.severity_map[issue['severity']],\n line=issue['startLine'],\n diffs={filename: diff})\n", "path": "bears/haskell/HaskellLintBear.py"}]} | 1,373 | 308 |
gh_patches_debug_23510 | rasdani/github-patches | git_diff | localstack__localstack-2499 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Edge Router doesn't handle S3 Presigned URL POSTs properly
# Bug Report
# Detailed description
This is a similar issue to #2329, specific to [S3 Presigned URLs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-presigned-urls.html). Using the edge router port (`:4566`), it is possible to use `generate_presigned_post`, but attempting to use the resultant URL to upload a file to localstack's S3 fails with a 404.
## Expected behavior
Localstack's edge router port should accept POST requests with an S3 Presigned URL (generated from that same Localstack instance) in the same way that the old S3 port (`:4572`) does.
## Actual behavior
While the old S3 port (`:4572`) handles this fine, the edge router responds with a 404 and a message similar to the following:
```
2020-05-24T15:36:54:INFO:localstack.services.edge: Unable to find forwarding rule for host "localhost:4566", path "/local-job-documents", target header "", auth header ""
```
(borrowed from @philippmalkov's comment on #2329 )
```
aws_1 | 2020-05-27T16:24:38:INFO:localstack.services.edge: Unable to find forwarding rule for host "aws:4566", path "/test-bucket", target header "", auth header ""
```
(our observed case with the below setup)
# Steps to reproduce
## Command used to start LocalStack
```
# Used in docker-compose.yml as:
aws:
image: localstack/localstack-light
environment:
- SERVICES=dynamodb,s3
- HOSTNAME_EXTERNAL=aws
- DEBUG=1
- DATA_DIR=/tmp/localstack/data
expose:
- 4566
- 4572
volumes:
- ./.localstack:/tmp/localstack
```
## Client code (AWS SDK code snippet, or sequence of "awslocal" commands)
Client code is a Django app using `boto3` to generate a Presigned URL and then upload a file to it using `requests`, effectively identical to the examples in [the boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-presigned-urls.html)
</issue>
<code>
[start of localstack/services/edge.py]
1 import re
2 import os
3 import sys
4 import json
5 import logging
6 from requests.models import Response
7 from localstack import config
8 from localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER
9 from localstack.utils.common import run, is_root, TMP_THREADS, to_bytes
10 from localstack.utils.common import safe_requests as requests
11 from localstack.services.generic_proxy import ProxyListener, GenericProxy
12
13 LOG = logging.getLogger(__name__)
14
15 # Header to indicate that the process should kill itself. This is required because if
16 # this process is started as root, then we cannot kill it from a non-root process
17 HEADER_KILL_SIGNAL = 'x-localstack-kill'
18
19
20 class ProxyListenerEdge(ProxyListener):
21
22 def forward_request(self, method, path, data, headers):
23 if method == 'OPTIONS':
24 return 200
25
26 # kill the process if we receive this header
27 headers.get(HEADER_KILL_SIGNAL) and os._exit(0)
28
29 target = headers.get('x-amz-target', '')
30 auth_header = headers.get('authorization', '')
31 host = headers.get('host', '')
32 headers[HEADER_LOCALSTACK_EDGE_URL] = 'https://%s' % host
33
34 # extract API details
35 api, port, path, host = get_api_from_headers(headers, path)
36
37 if port and int(port) < 0:
38 return 404
39
40 if not port:
41 port = get_port_from_custom_rules(method, path, data, headers) or port
42
43 if not port:
44 if api in ['', None, '_unknown_']:
45 LOG.info(('Unable to find forwarding rule for host "%s", path "%s", '
46 'target header "%s", auth header "%s"') % (host, path, target, auth_header))
47 else:
48 LOG.info(('Unable to determine forwarding port for API "%s" - please '
49 'make sure this API is enabled via the SERVICES configuration') % api)
50 response = Response()
51 response.status_code = 404
52 response._content = '{"status": "running"}'
53 return response
54
55 use_ssl = config.USE_SSL
56
57 connect_host = '%s:%s' % (config.HOSTNAME, port)
58 url = 'http%s://%s%s' % ('s' if use_ssl else '', connect_host, path)
59 headers['Host'] = host
60 function = getattr(requests, method.lower())
61 if isinstance(data, dict):
62 data = json.dumps(data)
63
64 response = function(url, data=data, headers=headers, verify=False)
65 return response
66
67
68 def get_api_from_headers(headers, path=None):
69 """ Determine API and backend port based on Authorization headers. """
70
71 target = headers.get('x-amz-target', '')
72 host = headers.get('host', '')
73 auth_header = headers.get('authorization', '')
74 ls_target = headers.get(HEADER_LOCALSTACK_TARGET, '')
75 path = path or '/'
76
77 # initialize result
78 result = '_unknown_', 0
79
80 # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
81 try:
82 credential_scope = auth_header.split(',')[0].split()[1]
83 _, _, _, service, _ = credential_scope.split('/')
84 result = service, get_service_port_for_account(service, headers)
85 except Exception:
86 pass
87
88 result_before = result
89
90 # Fallback rules and route customizations applied below
91
92 if host.endswith('cloudfront.net'):
93 path = path or '/'
94 result = 'cloudfront', config.PORT_CLOUDFRONT
95 elif target.startswith('AWSCognitoIdentityProviderService') or 'cognito-idp.' in host:
96 result = 'cognito-idp', config.PORT_COGNITO_IDP
97 elif target.startswith('AWSCognitoIdentityService') or 'cognito-identity.' in host:
98 result = 'cognito-identity', config.PORT_COGNITO_IDENTITY
99 elif result[0] == 's3' or re.match(r'.*s3(\-website)?\.([^\.]+\.)?amazonaws.com', host):
100 host = re.sub(r's3-website\..*\.amazonaws', 's3.amazonaws', host)
101 result = 's3', config.PORT_S3
102 elif result[0] == 'states' in auth_header or host.startswith('states.'):
103 result = 'stepfunctions', config.PORT_STEPFUNCTIONS
104 elif '.execute-api.' in host:
105 result = 'apigateway', config.PORT_APIGATEWAY
106 elif target.startswith('DynamoDBStreams') or host.startswith('streams.dynamodb.'):
107 result = 'dynamodbstreams', config.PORT_DYNAMODBSTREAMS
108 elif ls_target == 'web' or path == '/graph':
109 result = 'web', config.PORT_WEB_UI
110
111 return result[0], result_before[1] or result[1], path, host
112
113
114 def get_port_from_custom_rules(method, path, data, headers):
115 """ Determine backend port based on custom rules. """
116
117 # detect S3 presigned URLs
118 if 'AWSAccessKeyId=' in path or 'Signature=' in path:
119 return config.PORT_S3
120
121 # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first
122 stripped = path.strip('/')
123 data_bytes = to_bytes(data or '')
124 if method == 'GET' and '/' in stripped:
125 # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`
126 return config.PORT_S3
127 if stripped and '/' not in stripped:
128 if method == 'PUT':
129 # assume that this is an S3 PUT bucket request with URL path `/<bucket>`
130 return config.PORT_S3
131 if method == 'POST' and to_bytes('key=') in data_bytes:
132 # assume that this is an S3 POST request with form parameters in the body
133 return config.PORT_S3
134
135 if path == '/' and to_bytes('QueueName=') in data_bytes:
136 return config.PORT_SQS
137
138
139 def get_service_port_for_account(service, headers):
140 # assume we're only using a single account, hence return the static port mapping from config.py
141 return config.service_port(service)
142
143
144 def do_start_edge(port, use_ssl, asynchronous=False):
145 try:
146 # start local DNS server, if present
147 from localstack_ext.services import dns_server
148 dns_server.start_servers()
149 except Exception:
150 pass
151
152 # get port and start Edge
153 print('Starting edge router (http%s port %s)...' % ('s' if use_ssl else '', port))
154 # use use=True here because our proxy allows both, HTTP and HTTPS traffic
155 proxy = GenericProxy(port, ssl=True, update_listener=ProxyListenerEdge())
156 proxy.start()
157 if not asynchronous:
158 proxy.join()
159 return proxy
160
161
162 def can_use_sudo():
163 try:
164 run('echo | sudo -S echo', print_error=False)
165 return True
166 except Exception:
167 return False
168
169
170 def ensure_can_use_sudo():
171 if not is_root() and not can_use_sudo():
172 print('Please enter your sudo password (required to configure local network):')
173 run('sudo echo', stdin=True)
174
175
176 def start_edge(port=None, use_ssl=True, asynchronous=False):
177 if not port:
178 port = config.EDGE_PORT
179 if config.EDGE_PORT_HTTP:
180 do_start_edge(config.EDGE_PORT_HTTP, use_ssl=False, asynchronous=True)
181 if port > 1024 or is_root():
182 return do_start_edge(port, use_ssl, asynchronous=asynchronous)
183
184 # process requires priviledged port but we're not root -> try running as sudo
185
186 class Terminator(object):
187
188 def stop(self, quiet=True):
189 try:
190 url = 'http%s://localhost:%s' % ('s' if use_ssl else '', port)
191 requests.verify_ssl = False
192 requests.post(url, headers={HEADER_KILL_SIGNAL: 'kill'})
193 except Exception:
194 pass
195
196 # make sure we can run sudo commands
197 ensure_can_use_sudo()
198
199 # register a signal handler to terminate the sudo process later on
200 TMP_THREADS.append(Terminator())
201
202 # start the process as sudo
203 sudo_cmd = 'sudo '
204 python_cmd = sys.executable
205 cmd = '%sPYTHONPATH=.:%s %s %s %s' % (sudo_cmd, LOCALSTACK_ROOT_FOLDER, python_cmd, __file__, port)
206 process = run(cmd, asynchronous=asynchronous)
207 return process
208
209
210 if __name__ == '__main__':
211 logging.basicConfig()
212 start_edge(int(sys.argv[1]))
213
[end of localstack/services/edge.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/localstack/services/edge.py b/localstack/services/edge.py
--- a/localstack/services/edge.py
+++ b/localstack/services/edge.py
@@ -111,6 +111,16 @@
return result[0], result_before[1] or result[1], path, host
+def is_s3_form_data(data_bytes):
+ if(to_bytes('key=') in data_bytes):
+ return True
+
+ if(to_bytes('Content-Disposition: form-data') in data_bytes and to_bytes('name="key"') in data_bytes):
+ return True
+
+ return False
+
+
def get_port_from_custom_rules(method, path, data, headers):
""" Determine backend port based on custom rules. """
@@ -128,8 +138,8 @@
if method == 'PUT':
# assume that this is an S3 PUT bucket request with URL path `/<bucket>`
return config.PORT_S3
- if method == 'POST' and to_bytes('key=') in data_bytes:
- # assume that this is an S3 POST request with form parameters in the body
+ if method == 'POST' and is_s3_form_data(data_bytes):
+ # assume that this is an S3 POST request with form parameters or multipart form in the body
return config.PORT_S3
if path == '/' and to_bytes('QueueName=') in data_bytes:
| {"golden_diff": "diff --git a/localstack/services/edge.py b/localstack/services/edge.py\n--- a/localstack/services/edge.py\n+++ b/localstack/services/edge.py\n@@ -111,6 +111,16 @@\n return result[0], result_before[1] or result[1], path, host\n \n \n+def is_s3_form_data(data_bytes):\n+ if(to_bytes('key=') in data_bytes):\n+ return True\n+\n+ if(to_bytes('Content-Disposition: form-data') in data_bytes and to_bytes('name=\"key\"') in data_bytes):\n+ return True\n+\n+ return False\n+\n+\n def get_port_from_custom_rules(method, path, data, headers):\n \"\"\" Determine backend port based on custom rules. \"\"\"\n \n@@ -128,8 +138,8 @@\n if method == 'PUT':\n # assume that this is an S3 PUT bucket request with URL path `/<bucket>`\n return config.PORT_S3\n- if method == 'POST' and to_bytes('key=') in data_bytes:\n- # assume that this is an S3 POST request with form parameters in the body\n+ if method == 'POST' and is_s3_form_data(data_bytes):\n+ # assume that this is an S3 POST request with form parameters or multipart form in the body\n return config.PORT_S3\n \n if path == '/' and to_bytes('QueueName=') in data_bytes:\n", "issue": "Edge Router doesn't handle S3 Presigned URL POSTs properly\n# Bug Report\r\n\r\n# Detailed description\r\nThis is a similar issue to #2329, specific to [S3 Presigned URLs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-presigned-urls.html). Using the edge router port (`:4566`), it is possible to use `generate_presigned_post`, but attempting to use the resultant URL to upload a file to localstack's S3 fails with a 404.\r\n\r\n## Expected behavior\r\nLocalstack's edge router port should accept POST requests with an S3 Presigned URL (generated from that same Localstack instance) in the same way that the old S3 port (`:4572`) does.\r\n\r\n## Actual behavior\r\nWhile the old S3 port (`:4572`) handles this fine, the edge router responds with a 404 and a message similar to the following:\r\n\r\n```\r\n2020-05-24T15:36:54:INFO:localstack.services.edge: Unable to find forwarding rule for host \"localhost:4566\", path \"/local-job-documents\", target header \"\", auth header \"\"\r\n```\r\n\r\n(borrowed from @philippmalkov's comment on #2329 )\r\n\r\n```\r\naws_1 | 2020-05-27T16:24:38:INFO:localstack.services.edge: Unable to find forwarding rule for host \"aws:4566\", path \"/test-bucket\", target header \"\", auth header \"\"\r\n```\r\n\r\n(our observed case with the below setup)\r\n\r\n# Steps to reproduce\r\n\r\n## Command used to start LocalStack\r\n```\r\n# Used in docker-compose.yml as:\r\n aws:\r\n image: localstack/localstack-light\r\n environment:\r\n - SERVICES=dynamodb,s3\r\n - HOSTNAME_EXTERNAL=aws\r\n - DEBUG=1\r\n - DATA_DIR=/tmp/localstack/data\r\n expose:\r\n - 4566\r\n - 4572\r\n volumes:\r\n - ./.localstack:/tmp/localstack\r\n```\r\n\r\n## Client code (AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\nClient code is a Django app using `boto3` to generate a Presigned URL and then upload a file to it using `requests`, effectively identical to the examples in [the boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-presigned-urls.html)\n", "before_files": [{"content": "import re\nimport os\nimport sys\nimport json\nimport logging\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER\nfrom localstack.utils.common import run, is_root, TMP_THREADS, to_bytes\nfrom localstack.utils.common import safe_requests as requests\nfrom localstack.services.generic_proxy import ProxyListener, GenericProxy\n\nLOG = logging.getLogger(__name__)\n\n# Header to indicate that the process should kill itself. This is required because if\n# this process is started as root, then we cannot kill it from a non-root process\nHEADER_KILL_SIGNAL = 'x-localstack-kill'\n\n\nclass ProxyListenerEdge(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n if method == 'OPTIONS':\n return 200\n\n # kill the process if we receive this header\n headers.get(HEADER_KILL_SIGNAL) and os._exit(0)\n\n target = headers.get('x-amz-target', '')\n auth_header = headers.get('authorization', '')\n host = headers.get('host', '')\n headers[HEADER_LOCALSTACK_EDGE_URL] = 'https://%s' % host\n\n # extract API details\n api, port, path, host = get_api_from_headers(headers, path)\n\n if port and int(port) < 0:\n return 404\n\n if not port:\n port = get_port_from_custom_rules(method, path, data, headers) or port\n\n if not port:\n if api in ['', None, '_unknown_']:\n LOG.info(('Unable to find forwarding rule for host \"%s\", path \"%s\", '\n 'target header \"%s\", auth header \"%s\"') % (host, path, target, auth_header))\n else:\n LOG.info(('Unable to determine forwarding port for API \"%s\" - please '\n 'make sure this API is enabled via the SERVICES configuration') % api)\n response = Response()\n response.status_code = 404\n response._content = '{\"status\": \"running\"}'\n return response\n\n use_ssl = config.USE_SSL\n\n connect_host = '%s:%s' % (config.HOSTNAME, port)\n url = 'http%s://%s%s' % ('s' if use_ssl else '', connect_host, path)\n headers['Host'] = host\n function = getattr(requests, method.lower())\n if isinstance(data, dict):\n data = json.dumps(data)\n\n response = function(url, data=data, headers=headers, verify=False)\n return response\n\n\ndef get_api_from_headers(headers, path=None):\n \"\"\" Determine API and backend port based on Authorization headers. \"\"\"\n\n target = headers.get('x-amz-target', '')\n host = headers.get('host', '')\n auth_header = headers.get('authorization', '')\n ls_target = headers.get(HEADER_LOCALSTACK_TARGET, '')\n path = path or '/'\n\n # initialize result\n result = '_unknown_', 0\n\n # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html\n try:\n credential_scope = auth_header.split(',')[0].split()[1]\n _, _, _, service, _ = credential_scope.split('/')\n result = service, get_service_port_for_account(service, headers)\n except Exception:\n pass\n\n result_before = result\n\n # Fallback rules and route customizations applied below\n\n if host.endswith('cloudfront.net'):\n path = path or '/'\n result = 'cloudfront', config.PORT_CLOUDFRONT\n elif target.startswith('AWSCognitoIdentityProviderService') or 'cognito-idp.' in host:\n result = 'cognito-idp', config.PORT_COGNITO_IDP\n elif target.startswith('AWSCognitoIdentityService') or 'cognito-identity.' in host:\n result = 'cognito-identity', config.PORT_COGNITO_IDENTITY\n elif result[0] == 's3' or re.match(r'.*s3(\\-website)?\\.([^\\.]+\\.)?amazonaws.com', host):\n host = re.sub(r's3-website\\..*\\.amazonaws', 's3.amazonaws', host)\n result = 's3', config.PORT_S3\n elif result[0] == 'states' in auth_header or host.startswith('states.'):\n result = 'stepfunctions', config.PORT_STEPFUNCTIONS\n elif '.execute-api.' in host:\n result = 'apigateway', config.PORT_APIGATEWAY\n elif target.startswith('DynamoDBStreams') or host.startswith('streams.dynamodb.'):\n result = 'dynamodbstreams', config.PORT_DYNAMODBSTREAMS\n elif ls_target == 'web' or path == '/graph':\n result = 'web', config.PORT_WEB_UI\n\n return result[0], result_before[1] or result[1], path, host\n\n\ndef get_port_from_custom_rules(method, path, data, headers):\n \"\"\" Determine backend port based on custom rules. \"\"\"\n\n # detect S3 presigned URLs\n if 'AWSAccessKeyId=' in path or 'Signature=' in path:\n return config.PORT_S3\n\n # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first\n stripped = path.strip('/')\n data_bytes = to_bytes(data or '')\n if method == 'GET' and '/' in stripped:\n # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`\n return config.PORT_S3\n if stripped and '/' not in stripped:\n if method == 'PUT':\n # assume that this is an S3 PUT bucket request with URL path `/<bucket>`\n return config.PORT_S3\n if method == 'POST' and to_bytes('key=') in data_bytes:\n # assume that this is an S3 POST request with form parameters in the body\n return config.PORT_S3\n\n if path == '/' and to_bytes('QueueName=') in data_bytes:\n return config.PORT_SQS\n\n\ndef get_service_port_for_account(service, headers):\n # assume we're only using a single account, hence return the static port mapping from config.py\n return config.service_port(service)\n\n\ndef do_start_edge(port, use_ssl, asynchronous=False):\n try:\n # start local DNS server, if present\n from localstack_ext.services import dns_server\n dns_server.start_servers()\n except Exception:\n pass\n\n # get port and start Edge\n print('Starting edge router (http%s port %s)...' % ('s' if use_ssl else '', port))\n # use use=True here because our proxy allows both, HTTP and HTTPS traffic\n proxy = GenericProxy(port, ssl=True, update_listener=ProxyListenerEdge())\n proxy.start()\n if not asynchronous:\n proxy.join()\n return proxy\n\n\ndef can_use_sudo():\n try:\n run('echo | sudo -S echo', print_error=False)\n return True\n except Exception:\n return False\n\n\ndef ensure_can_use_sudo():\n if not is_root() and not can_use_sudo():\n print('Please enter your sudo password (required to configure local network):')\n run('sudo echo', stdin=True)\n\n\ndef start_edge(port=None, use_ssl=True, asynchronous=False):\n if not port:\n port = config.EDGE_PORT\n if config.EDGE_PORT_HTTP:\n do_start_edge(config.EDGE_PORT_HTTP, use_ssl=False, asynchronous=True)\n if port > 1024 or is_root():\n return do_start_edge(port, use_ssl, asynchronous=asynchronous)\n\n # process requires priviledged port but we're not root -> try running as sudo\n\n class Terminator(object):\n\n def stop(self, quiet=True):\n try:\n url = 'http%s://localhost:%s' % ('s' if use_ssl else '', port)\n requests.verify_ssl = False\n requests.post(url, headers={HEADER_KILL_SIGNAL: 'kill'})\n except Exception:\n pass\n\n # make sure we can run sudo commands\n ensure_can_use_sudo()\n\n # register a signal handler to terminate the sudo process later on\n TMP_THREADS.append(Terminator())\n\n # start the process as sudo\n sudo_cmd = 'sudo '\n python_cmd = sys.executable\n cmd = '%sPYTHONPATH=.:%s %s %s %s' % (sudo_cmd, LOCALSTACK_ROOT_FOLDER, python_cmd, __file__, port)\n process = run(cmd, asynchronous=asynchronous)\n return process\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n start_edge(int(sys.argv[1]))\n", "path": "localstack/services/edge.py"}]} | 3,488 | 316 |
gh_patches_debug_56249 | rasdani/github-patches | git_diff | facebookresearch__xformers-151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Follow up on #141 with a proper unit test
# 🐛 Bug
Make sure that #141 cannot happen anymore, add a small unit test to guard parity
Follow up on #141 with a proper unit test
# 🐛 Bug
Make sure that #141 cannot happen anymore, add a small unit test to guard parity
</issue>
<code>
[start of xformers/triton/softmax.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6
7 import logging
8 from enum import Enum
9 from typing import Optional
10
11 import torch
12 import triton
13 from torch.cuda.amp import custom_bwd, custom_fwd
14
15 from xformers.triton.k_softmax import _softmax, _softmax_backward
16
17 # CREDITS: This is adapted from the vanilla Triton example. See https://openai.com/blog/triton/
18 # and https://triton-lang.org/getting-started/tutorials/02-fused-softmax.html
19
20
21 _triton_registered_overflow = False
22 _triton_registered_warnings = False
23 _triton_softmax_fp16_enabled = False # NOTE: PyTorch keeps softmax as fp32
24
25
26 class MaskType(str, Enum):
27 ADD = "add"
28 MUL = "mul"
29
30
31 # Helper to handle the SPMD launch grid and error cases
32 class _softmax_triton(torch.autograd.Function):
33 @staticmethod
34 @custom_fwd(cast_inputs=torch.float16 if _triton_softmax_fp16_enabled else None)
35 def forward(ctx, x, mask, log_outputs, causal):
36 """
37 Fused softmax implementation, using the Triton programming model.
38 This only supports a reduction over the last dimension for now
39 """
40
41 # Handle 2D/3D tensors
42 x_ = x.unsqueeze(0) if x.ndim == 2 else x
43
44 if not x_.is_contiguous():
45 x_ = x_.contiguous()
46
47 y = torch.empty_like(x_)
48 assert (
49 y.stride(2) == 1 and x_.stride(2) == 1
50 ), f"{x.shape} - {x_.shape} - {x_.stride()}"
51
52 # SPMD launch grid
53 grid_2d = (
54 x_.shape[0],
55 x_.shape[1],
56 )
57
58 # enqueue GPU kernel
59 use_mask = True
60 if mask is None:
61 # placeholder, will not be used
62 mask = x_
63 use_mask = False
64 else:
65 # Make sure that the mask is binary
66 assert mask.dtype == x.dtype, "An additive mask is requested"
67
68 _softmax[grid_2d](
69 y,
70 x_,
71 mask,
72 y.stride(0),
73 y.stride(1),
74 x_.stride(0),
75 x_.stride(1),
76 mask.stride(0),
77 x_.shape[2],
78 log=log_outputs,
79 use_mask=use_mask,
80 causal=causal,
81 )
82
83 ctx.save_for_backward(y)
84 ctx.log_outputs = log_outputs
85 ctx.causal = causal
86 return y.reshape_as(x)
87
88 @staticmethod
89 @custom_bwd
90 def backward(ctx, grad_out):
91 (out,) = ctx.saved_tensors
92
93 # Handle 2D/3D tensors
94 grad_out_ = grad_out.unsqueeze(0) if grad_out.ndim == 2 else grad_out
95
96 # SPMD launch grid
97 grid_2d = (
98 grad_out_.shape[0],
99 grad_out_.shape[1],
100 )
101
102 depth = triton.next_power_of_2(grad_out_.shape[2])
103 grad_in = torch.empty_like(
104 out
105 ) # torch.zeros is measurably slower, we'll zero out in the kernel
106
107 # Make sure that the tensor are contiguous
108 grad_in, grad_out, out = map(lambda x: x.contiguous(), [grad_in, grad_out, out])
109
110 # fmt: off
111 _softmax_backward[grid_2d](
112 grad_in, grad_out_, out,
113 grad_in.stride(0), grad_in.stride(1),
114 grad_out_.stride(0), grad_out_.stride(1),
115 out.stride(0), out.stride(1),
116 out.shape[2],
117 depth=depth,
118 log=ctx.log_outputs,
119 causal=ctx.causal
120 )
121 # fmt: on
122 return grad_in.reshape_as(grad_out), None, None, None
123
124
125 def softmax(
126 x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False
127 ) -> torch.Tensor:
128 r"""Applies the Softmax function to an 3-dimensional input Tensor
129 rescaling them so that the elements of the n-dimensional output Tensor
130 lie in the range [0,1] and sum to 1.
131
132 Softmax is defined as:
133
134 .. math::
135 \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
136
137 .. warning: softmax is computed on the last dimension of the input tensor.
138
139
140 Args:
141 x: input tensor.
142 mask: optional mask, its application will be fused to the softmax computation if triton is used
143 causal: optional performance optimization, if triton is used and the attention is causal
144
145 Returns:
146 a Tensor of the same dimension and shape as the input with
147 values in the range [0, 1] and sum to 1
148 """
149 return _softmax_dispatch(x, log=False, mask=mask, causal=causal)
150
151
152 def log_softmax(
153 x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False
154 ) -> torch.Tensor:
155 r"""Applies the :math:`\log(\text{Softmax}(x))` function to an 3-dimensional
156 input Tensor. The LogSoftmax formulation can be simplified as:
157
158 .. math::
159 \text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
160
161 Args:
162 x: input tensor.
163
164 Returns:
165 a Tensor of the same dimension and shape as the input with
166 values in the range [-inf, 0)
167 """
168 return _softmax_dispatch(x, log=True, mask=mask, causal=causal)
169
170
171 def _softmax_dispatch(
172 x: torch.Tensor, log: bool, mask: Optional[torch.Tensor], causal: bool = False
173 ) -> torch.Tensor:
174 # Triton is used if
175 # - CUDA
176 # - there's enough data to make it faster than pytorch. This could change over time, Triton is improving
177 # - there was no previous failure
178
179 global _triton_registered_overflow
180 global _triton_registered_warnings
181
182 try:
183 if torch.cuda.is_available() and x.is_cuda and not _triton_registered_overflow:
184 return _softmax_triton.apply(x, mask, log, causal)
185 except (triton.code_gen.OutOfResources, RuntimeError) as e:
186 # Catch cases where the current GPU does not have enough registers to hold a full tensor line
187 # fallback to PyTorch's implementation, which streams the tensor in and out
188 _triton_registered_overflow = True
189 logging.warning(
190 "Triton softmax kernel register spillover or invalid image caught."
191 "Deactivating this kernel, please file an issue int the xFormers repository"
192 )
193 logging.warning(e)
194
195 if causal and not _triton_registered_warnings:
196 logging.warning(
197 "Triton softmax could not be used. \
198 The causal flags is being passed but it does not provide any benefit with PyTorch softmax."
199 )
200 _triton_registered_warnings = True
201
202 if mask is not None:
203 x += mask
204
205 if log:
206 return torch.log_softmax(x, dim=-1)
207 else:
208 return torch.softmax(x, dim=-1)
209
[end of xformers/triton/softmax.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xformers/triton/softmax.py b/xformers/triton/softmax.py
--- a/xformers/triton/softmax.py
+++ b/xformers/triton/softmax.py
@@ -200,7 +200,7 @@
_triton_registered_warnings = True
if mask is not None:
- x += mask
+ x = x + mask
if log:
return torch.log_softmax(x, dim=-1)
| {"golden_diff": "diff --git a/xformers/triton/softmax.py b/xformers/triton/softmax.py\n--- a/xformers/triton/softmax.py\n+++ b/xformers/triton/softmax.py\n@@ -200,7 +200,7 @@\n _triton_registered_warnings = True\n \n if mask is not None:\n- x += mask\n+ x = x + mask\n \n if log:\n return torch.log_softmax(x, dim=-1)\n", "issue": "Follow up on #141 with a proper unit test\n# \ud83d\udc1b Bug\r\nMake sure that #141 cannot happen anymore, add a small unit test to guard parity\r\n\nFollow up on #141 with a proper unit test\n# \ud83d\udc1b Bug\r\nMake sure that #141 cannot happen anymore, add a small unit test to guard parity\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport logging\nfrom enum import Enum\nfrom typing import Optional\n\nimport torch\nimport triton\nfrom torch.cuda.amp import custom_bwd, custom_fwd\n\nfrom xformers.triton.k_softmax import _softmax, _softmax_backward\n\n# CREDITS: This is adapted from the vanilla Triton example. See https://openai.com/blog/triton/\n# and https://triton-lang.org/getting-started/tutorials/02-fused-softmax.html\n\n\n_triton_registered_overflow = False\n_triton_registered_warnings = False\n_triton_softmax_fp16_enabled = False # NOTE: PyTorch keeps softmax as fp32\n\n\nclass MaskType(str, Enum):\n ADD = \"add\"\n MUL = \"mul\"\n\n\n# Helper to handle the SPMD launch grid and error cases\nclass _softmax_triton(torch.autograd.Function):\n @staticmethod\n @custom_fwd(cast_inputs=torch.float16 if _triton_softmax_fp16_enabled else None)\n def forward(ctx, x, mask, log_outputs, causal):\n \"\"\"\n Fused softmax implementation, using the Triton programming model.\n This only supports a reduction over the last dimension for now\n \"\"\"\n\n # Handle 2D/3D tensors\n x_ = x.unsqueeze(0) if x.ndim == 2 else x\n\n if not x_.is_contiguous():\n x_ = x_.contiguous()\n\n y = torch.empty_like(x_)\n assert (\n y.stride(2) == 1 and x_.stride(2) == 1\n ), f\"{x.shape} - {x_.shape} - {x_.stride()}\"\n\n # SPMD launch grid\n grid_2d = (\n x_.shape[0],\n x_.shape[1],\n )\n\n # enqueue GPU kernel\n use_mask = True\n if mask is None:\n # placeholder, will not be used\n mask = x_\n use_mask = False\n else:\n # Make sure that the mask is binary\n assert mask.dtype == x.dtype, \"An additive mask is requested\"\n\n _softmax[grid_2d](\n y,\n x_,\n mask,\n y.stride(0),\n y.stride(1),\n x_.stride(0),\n x_.stride(1),\n mask.stride(0),\n x_.shape[2],\n log=log_outputs,\n use_mask=use_mask,\n causal=causal,\n )\n\n ctx.save_for_backward(y)\n ctx.log_outputs = log_outputs\n ctx.causal = causal\n return y.reshape_as(x)\n\n @staticmethod\n @custom_bwd\n def backward(ctx, grad_out):\n (out,) = ctx.saved_tensors\n\n # Handle 2D/3D tensors\n grad_out_ = grad_out.unsqueeze(0) if grad_out.ndim == 2 else grad_out\n\n # SPMD launch grid\n grid_2d = (\n grad_out_.shape[0],\n grad_out_.shape[1],\n )\n\n depth = triton.next_power_of_2(grad_out_.shape[2])\n grad_in = torch.empty_like(\n out\n ) # torch.zeros is measurably slower, we'll zero out in the kernel\n\n # Make sure that the tensor are contiguous\n grad_in, grad_out, out = map(lambda x: x.contiguous(), [grad_in, grad_out, out])\n\n # fmt: off\n _softmax_backward[grid_2d](\n grad_in, grad_out_, out,\n grad_in.stride(0), grad_in.stride(1),\n grad_out_.stride(0), grad_out_.stride(1),\n out.stride(0), out.stride(1),\n out.shape[2],\n depth=depth,\n log=ctx.log_outputs,\n causal=ctx.causal\n )\n # fmt: on\n return grad_in.reshape_as(grad_out), None, None, None\n\n\ndef softmax(\n x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False\n) -> torch.Tensor:\n r\"\"\"Applies the Softmax function to an 3-dimensional input Tensor\n rescaling them so that the elements of the n-dimensional output Tensor\n lie in the range [0,1] and sum to 1.\n\n Softmax is defined as:\n\n .. math::\n \\text{Softmax}(x_{i}) = \\frac{\\exp(x_i)}{\\sum_j \\exp(x_j)}\n\n .. warning: softmax is computed on the last dimension of the input tensor.\n\n\n Args:\n x: input tensor.\n mask: optional mask, its application will be fused to the softmax computation if triton is used\n causal: optional performance optimization, if triton is used and the attention is causal\n\n Returns:\n a Tensor of the same dimension and shape as the input with\n values in the range [0, 1] and sum to 1\n \"\"\"\n return _softmax_dispatch(x, log=False, mask=mask, causal=causal)\n\n\ndef log_softmax(\n x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False\n) -> torch.Tensor:\n r\"\"\"Applies the :math:`\\log(\\text{Softmax}(x))` function to an 3-dimensional\n input Tensor. The LogSoftmax formulation can be simplified as:\n\n .. math::\n \\text{LogSoftmax}(x_{i}) = \\log\\left(\\frac{\\exp(x_i) }{ \\sum_j \\exp(x_j)} \\right)\n\n Args:\n x: input tensor.\n\n Returns:\n a Tensor of the same dimension and shape as the input with\n values in the range [-inf, 0)\n \"\"\"\n return _softmax_dispatch(x, log=True, mask=mask, causal=causal)\n\n\ndef _softmax_dispatch(\n x: torch.Tensor, log: bool, mask: Optional[torch.Tensor], causal: bool = False\n) -> torch.Tensor:\n # Triton is used if\n # - CUDA\n # - there's enough data to make it faster than pytorch. This could change over time, Triton is improving\n # - there was no previous failure\n\n global _triton_registered_overflow\n global _triton_registered_warnings\n\n try:\n if torch.cuda.is_available() and x.is_cuda and not _triton_registered_overflow:\n return _softmax_triton.apply(x, mask, log, causal)\n except (triton.code_gen.OutOfResources, RuntimeError) as e:\n # Catch cases where the current GPU does not have enough registers to hold a full tensor line\n # fallback to PyTorch's implementation, which streams the tensor in and out\n _triton_registered_overflow = True\n logging.warning(\n \"Triton softmax kernel register spillover or invalid image caught.\"\n \"Deactivating this kernel, please file an issue int the xFormers repository\"\n )\n logging.warning(e)\n\n if causal and not _triton_registered_warnings:\n logging.warning(\n \"Triton softmax could not be used. \\\n The causal flags is being passed but it does not provide any benefit with PyTorch softmax.\"\n )\n _triton_registered_warnings = True\n\n if mask is not None:\n x += mask\n\n if log:\n return torch.log_softmax(x, dim=-1)\n else:\n return torch.softmax(x, dim=-1)\n", "path": "xformers/triton/softmax.py"}]} | 2,814 | 103 |
gh_patches_debug_1616 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-3193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switching editions changes "shelved" date
**Describe the bug**
When switching editions of a book already on your "To Read" list, the "shelved" date is changed to today's date.
**To Reproduce**
Steps to reproduce the behavior:
1. Pick any book on your "To read" list with more than one edition
2. Pick another edition and switch to this
3. Observe that the book's shelved date is now today
**Expected behavior**
This shouldn't changed the shelved date
**Instance**
https://books.theunseen.city
---
**Desktop (please complete the following information):**
- OS: MacOS 14.1
- Browser: Firefox
- Version: 20.0 (64-bit)
</issue>
<code>
[start of bookwyrm/views/books/editions.py]
1 """ the good stuff! the books! """
2 from functools import reduce
3 import operator
4
5 from django.contrib.auth.decorators import login_required
6 from django.core.paginator import Paginator
7 from django.db import transaction
8 from django.db.models import Q
9 from django.shortcuts import get_object_or_404, redirect
10 from django.template.response import TemplateResponse
11 from django.views import View
12 from django.views.decorators.http import require_POST
13
14 from bookwyrm import forms, models
15 from bookwyrm.activitypub import ActivitypubResponse
16 from bookwyrm.settings import PAGE_LENGTH
17 from bookwyrm.views.helpers import is_api_request
18
19
20 # pylint: disable=no-self-use
21 class Editions(View):
22 """list of editions"""
23
24 def get(self, request, book_id):
25 """list of editions of a book"""
26 work = get_object_or_404(models.Work, id=book_id)
27
28 if is_api_request(request):
29 return ActivitypubResponse(work.to_edition_list(**request.GET))
30 filters = {}
31
32 if request.GET.get("language"):
33 filters["languages__contains"] = [request.GET.get("language")]
34 if request.GET.get("format"):
35 filters["physical_format__iexact"] = request.GET.get("format")
36
37 editions = work.editions.order_by("-edition_rank")
38 languages = set(sum(editions.values_list("languages", flat=True), []))
39
40 editions = editions.filter(**filters)
41
42 query = request.GET.get("q")
43 if query:
44 searchable_array_fields = ["languages", "publishers"]
45 searchable_fields = [
46 "title",
47 "physical_format",
48 "isbn_10",
49 "isbn_13",
50 "oclc_number",
51 "asin",
52 "aasin",
53 "isfdb",
54 ]
55 search_filter_entries = [
56 {f"{f}__icontains": query} for f in searchable_fields
57 ] + [{f"{f}__iexact": query} for f in searchable_array_fields]
58 editions = editions.filter(
59 reduce(operator.or_, (Q(**f) for f in search_filter_entries))
60 )
61
62 paginated = Paginator(editions, PAGE_LENGTH)
63 page = paginated.get_page(request.GET.get("page"))
64 data = {
65 "editions": page,
66 "page_range": paginated.get_elided_page_range(
67 page.number, on_each_side=2, on_ends=1
68 ),
69 "work": work,
70 "work_form": forms.EditionFromWorkForm(instance=work),
71 "languages": languages,
72 "formats": set(
73 e.physical_format.lower() for e in editions if e.physical_format
74 ),
75 }
76 return TemplateResponse(request, "book/editions/editions.html", data)
77
78
79 @login_required
80 @require_POST
81 @transaction.atomic
82 def switch_edition(request):
83 """switch your copy of a book to a different edition"""
84 edition_id = request.POST.get("edition")
85 new_edition = get_object_or_404(models.Edition, id=edition_id)
86 shelfbooks = models.ShelfBook.objects.filter(
87 book__parent_work=new_edition.parent_work, shelf__user=request.user
88 )
89 for shelfbook in shelfbooks.all():
90 with transaction.atomic():
91 models.ShelfBook.objects.create(
92 created_date=shelfbook.created_date,
93 user=shelfbook.user,
94 shelf=shelfbook.shelf,
95 book=new_edition,
96 )
97 shelfbook.delete()
98
99 readthroughs = models.ReadThrough.objects.filter(
100 book__parent_work=new_edition.parent_work, user=request.user
101 )
102 for readthrough in readthroughs.all():
103 readthrough.book = new_edition
104 readthrough.save()
105
106 return redirect(f"/book/{new_edition.id}")
107
[end of bookwyrm/views/books/editions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/books/editions.py b/bookwyrm/views/books/editions.py
--- a/bookwyrm/views/books/editions.py
+++ b/bookwyrm/views/books/editions.py
@@ -93,6 +93,7 @@
user=shelfbook.user,
shelf=shelfbook.shelf,
book=new_edition,
+ shelved_date=shelfbook.shelved_date,
)
shelfbook.delete()
| {"golden_diff": "diff --git a/bookwyrm/views/books/editions.py b/bookwyrm/views/books/editions.py\n--- a/bookwyrm/views/books/editions.py\n+++ b/bookwyrm/views/books/editions.py\n@@ -93,6 +93,7 @@\n user=shelfbook.user,\n shelf=shelfbook.shelf,\n book=new_edition,\n+ shelved_date=shelfbook.shelved_date,\n )\n shelfbook.delete()\n", "issue": "Switching editions changes \"shelved\" date\n**Describe the bug**\r\nWhen switching editions of a book already on your \"To Read\" list, the \"shelved\" date is changed to today's date.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Pick any book on your \"To read\" list with more than one edition\r\n2. Pick another edition and switch to this\r\n3. Observe that the book's shelved date is now today\r\n\r\n**Expected behavior**\r\nThis shouldn't changed the shelved date\r\n\r\n**Instance**\r\nhttps://books.theunseen.city\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 14.1\r\n - Browser: Firefox\r\n - Version: 20.0 (64-bit)\r\n\n", "before_files": [{"content": "\"\"\" the good stuff! the books! \"\"\"\nfrom functools import reduce\nimport operator\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request\n\n\n# pylint: disable=no-self-use\nclass Editions(View):\n \"\"\"list of editions\"\"\"\n\n def get(self, request, book_id):\n \"\"\"list of editions of a book\"\"\"\n work = get_object_or_404(models.Work, id=book_id)\n\n if is_api_request(request):\n return ActivitypubResponse(work.to_edition_list(**request.GET))\n filters = {}\n\n if request.GET.get(\"language\"):\n filters[\"languages__contains\"] = [request.GET.get(\"language\")]\n if request.GET.get(\"format\"):\n filters[\"physical_format__iexact\"] = request.GET.get(\"format\")\n\n editions = work.editions.order_by(\"-edition_rank\")\n languages = set(sum(editions.values_list(\"languages\", flat=True), []))\n\n editions = editions.filter(**filters)\n\n query = request.GET.get(\"q\")\n if query:\n searchable_array_fields = [\"languages\", \"publishers\"]\n searchable_fields = [\n \"title\",\n \"physical_format\",\n \"isbn_10\",\n \"isbn_13\",\n \"oclc_number\",\n \"asin\",\n \"aasin\",\n \"isfdb\",\n ]\n search_filter_entries = [\n {f\"{f}__icontains\": query} for f in searchable_fields\n ] + [{f\"{f}__iexact\": query} for f in searchable_array_fields]\n editions = editions.filter(\n reduce(operator.or_, (Q(**f) for f in search_filter_entries))\n )\n\n paginated = Paginator(editions, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"editions\": page,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n \"work\": work,\n \"work_form\": forms.EditionFromWorkForm(instance=work),\n \"languages\": languages,\n \"formats\": set(\n e.physical_format.lower() for e in editions if e.physical_format\n ),\n }\n return TemplateResponse(request, \"book/editions/editions.html\", data)\n\n\n@login_required\n@require_POST\[email protected]\ndef switch_edition(request):\n \"\"\"switch your copy of a book to a different edition\"\"\"\n edition_id = request.POST.get(\"edition\")\n new_edition = get_object_or_404(models.Edition, id=edition_id)\n shelfbooks = models.ShelfBook.objects.filter(\n book__parent_work=new_edition.parent_work, shelf__user=request.user\n )\n for shelfbook in shelfbooks.all():\n with transaction.atomic():\n models.ShelfBook.objects.create(\n created_date=shelfbook.created_date,\n user=shelfbook.user,\n shelf=shelfbook.shelf,\n book=new_edition,\n )\n shelfbook.delete()\n\n readthroughs = models.ReadThrough.objects.filter(\n book__parent_work=new_edition.parent_work, user=request.user\n )\n for readthrough in readthroughs.all():\n readthrough.book = new_edition\n readthrough.save()\n\n return redirect(f\"/book/{new_edition.id}\")\n", "path": "bookwyrm/views/books/editions.py"}]} | 1,723 | 101 |
gh_patches_debug_20183 | rasdani/github-patches | git_diff | saleor__saleor-2826 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django 2.1 compatibility
We should switch our supported Django version to the following list:
* Django 1.11 (current LTS)
* Django 2.1 (latest stable)
Current blockers:
* [x] `graphene-django` depends on an old version of `django-filters` (https://github.com/graphql-python/graphene-django/pull/492)
* [x] WeightInput passes floats to its base class which is a DecimalField
* [x] Some form widgets pass `renderer` to functions that don't expect it
</issue>
<code>
[start of saleor/core/weight.py]
1 """In Saleor we are using 'weight' instead of a 'mass'.
2
3 For those of us who are earth-bound, weight is what we usually experience.
4 Mass is a theoretical construct.
5 Unless we are dealing with inertia and momentum, we are encountering
6 the attractive force between ourselves and the earth,
7 the isolated effects of mass alone being a little more esoteric.
8
9 So even though mass is more fundamental, most people think
10 in terms of weight.
11
12 In the end, it does not really matter unless you travel between
13 different planets.
14 """
15 from decimal import Decimal
16 from enum import Enum
17
18 from django import forms
19 from django.contrib.sites.models import Site
20 from django.core.validators import MinValueValidator
21 from django.template.loader import render_to_string
22 from django.utils.translation import pgettext_lazy
23 from measurement.measures import Weight
24
25
26 class WeightUnits:
27 KILOGRAM = 'kg'
28 POUND = 'lb'
29 OUNCE = 'oz'
30 GRAM = 'g'
31
32 CHOICES = [
33 (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')),
34 (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')),
35 (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')),
36 (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]
37
38
39 WeightUnitsEnum = Enum(
40 'WeightUnitsEnum',
41 {unit: unit for unit in WeightUnits.CHOICES})
42
43
44 def zero_weight():
45 """Function used as a model's default."""
46 return Weight(kg=0)
47
48
49 def convert_weight(weight, unit):
50 # Weight amount from the Weight instance can be retrived in serveral units
51 # via its properties. eg. Weight(lb=10).kg
52 converted_weight = getattr(weight, unit)
53 return Weight(**{unit: converted_weight})
54
55
56 def get_default_weight_unit():
57 site = Site.objects.get_current()
58 return site.settings.default_weight_unit
59
60
61 class WeightInput(forms.TextInput):
62 template = 'dashboard/shipping/weight_widget.html'
63 input_type = 'number'
64
65 def format_value(self, value):
66 if isinstance(value, Weight):
67 unit = get_default_weight_unit()
68 if value.unit != unit:
69 value = convert_weight(value, unit)
70 return value.value
71 return value
72
73 def render(self, name, value, attrs=None):
74 widget = super().render(name, value, attrs=attrs)
75 unit = get_default_weight_unit()
76 translated_unit = dict(WeightUnits.CHOICES)[unit]
77 return render_to_string(
78 self.template,
79 {'widget': widget, 'value': value, 'unit': translated_unit})
80
81
82 class WeightField(forms.DecimalField):
83 def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):
84 if isinstance(widget, type):
85 widget = widget(attrs={'type': 'number', 'step': 'any'})
86 super().__init__(*args, widget=widget, **kwargs)
87 if min_value is not None:
88 self.validators.append(MinValueValidator(min_value))
89
90 def to_python(self, value):
91 value = super().to_python(value)
92 if value is None:
93 return value
94 unit = get_default_weight_unit()
95 return Weight(**{unit: value})
96
97 def validate(self, weight):
98 if weight is None or weight in self.empty_values:
99 super().validate(weight)
100 else:
101 unit = get_default_weight_unit()
102 if not isinstance(weight, Weight):
103 raise Exception(
104 '%r is not a valid weight.' % (weight,))
105 if weight.unit != unit:
106 raise forms.ValidationError(
107 'Invalid unit: %r (expected %r).' % (
108 weight.unit, unit))
109 super().validate(weight.value)
110
111 def clean(self, value):
112 value = value_to_be_validated = self.to_python(value)
113 self.validate(value_to_be_validated)
114 if isinstance(value, Weight):
115 value_to_be_validated = Decimal(value.value)
116 # default decimal validators can be used for Weight's value only
117 self.run_validators(value_to_be_validated)
118 return value
119
[end of saleor/core/weight.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/core/weight.py b/saleor/core/weight.py
--- a/saleor/core/weight.py
+++ b/saleor/core/weight.py
@@ -70,8 +70,8 @@
return value.value
return value
- def render(self, name, value, attrs=None):
- widget = super().render(name, value, attrs=attrs)
+ def render(self, name, value, attrs=None, renderer=None):
+ widget = super().render(name, value, attrs=attrs, renderer=renderer)
unit = get_default_weight_unit()
translated_unit = dict(WeightUnits.CHOICES)[unit]
return render_to_string(
@@ -79,7 +79,7 @@
{'widget': widget, 'value': value, 'unit': translated_unit})
-class WeightField(forms.DecimalField):
+class WeightField(forms.FloatField):
def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):
if isinstance(widget, type):
widget = widget(attrs={'type': 'number', 'step': 'any'})
| {"golden_diff": "diff --git a/saleor/core/weight.py b/saleor/core/weight.py\n--- a/saleor/core/weight.py\n+++ b/saleor/core/weight.py\n@@ -70,8 +70,8 @@\n return value.value\n return value\n \n- def render(self, name, value, attrs=None):\n- widget = super().render(name, value, attrs=attrs)\n+ def render(self, name, value, attrs=None, renderer=None):\n+ widget = super().render(name, value, attrs=attrs, renderer=renderer)\n unit = get_default_weight_unit()\n translated_unit = dict(WeightUnits.CHOICES)[unit]\n return render_to_string(\n@@ -79,7 +79,7 @@\n {'widget': widget, 'value': value, 'unit': translated_unit})\n \n \n-class WeightField(forms.DecimalField):\n+class WeightField(forms.FloatField):\n def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):\n if isinstance(widget, type):\n widget = widget(attrs={'type': 'number', 'step': 'any'})\n", "issue": "Django 2.1 compatibility\nWe should switch our supported Django version to the following list:\r\n* Django 1.11 (current LTS)\r\n* Django 2.1 (latest stable)\r\n\r\nCurrent blockers:\r\n* [x] `graphene-django` depends on an old version of `django-filters` (https://github.com/graphql-python/graphene-django/pull/492)\r\n* [x] WeightInput passes floats to its base class which is a DecimalField\r\n* [x] Some form widgets pass `renderer` to functions that don't expect it\n", "before_files": [{"content": "\"\"\"In Saleor we are using 'weight' instead of a 'mass'.\n\nFor those of us who are earth-bound, weight is what we usually experience.\nMass is a theoretical construct.\nUnless we are dealing with inertia and momentum, we are encountering\nthe attractive force between ourselves and the earth,\nthe isolated effects of mass alone being a little more esoteric.\n\nSo even though mass is more fundamental, most people think\nin terms of weight.\n\nIn the end, it does not really matter unless you travel between\ndifferent planets.\n\"\"\"\nfrom decimal import Decimal\nfrom enum import Enum\n\nfrom django import forms\nfrom django.contrib.sites.models import Site\nfrom django.core.validators import MinValueValidator\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import pgettext_lazy\nfrom measurement.measures import Weight\n\n\nclass WeightUnits:\n KILOGRAM = 'kg'\n POUND = 'lb'\n OUNCE = 'oz'\n GRAM = 'g'\n\n CHOICES = [\n (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')),\n (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')),\n (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')),\n (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]\n\n\nWeightUnitsEnum = Enum(\n 'WeightUnitsEnum',\n {unit: unit for unit in WeightUnits.CHOICES})\n\n\ndef zero_weight():\n \"\"\"Function used as a model's default.\"\"\"\n return Weight(kg=0)\n\n\ndef convert_weight(weight, unit):\n # Weight amount from the Weight instance can be retrived in serveral units\n # via its properties. eg. Weight(lb=10).kg\n converted_weight = getattr(weight, unit)\n return Weight(**{unit: converted_weight})\n\n\ndef get_default_weight_unit():\n site = Site.objects.get_current()\n return site.settings.default_weight_unit\n\n\nclass WeightInput(forms.TextInput):\n template = 'dashboard/shipping/weight_widget.html'\n input_type = 'number'\n\n def format_value(self, value):\n if isinstance(value, Weight):\n unit = get_default_weight_unit()\n if value.unit != unit:\n value = convert_weight(value, unit)\n return value.value\n return value\n\n def render(self, name, value, attrs=None):\n widget = super().render(name, value, attrs=attrs)\n unit = get_default_weight_unit()\n translated_unit = dict(WeightUnits.CHOICES)[unit]\n return render_to_string(\n self.template,\n {'widget': widget, 'value': value, 'unit': translated_unit})\n\n\nclass WeightField(forms.DecimalField):\n def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):\n if isinstance(widget, type):\n widget = widget(attrs={'type': 'number', 'step': 'any'})\n super().__init__(*args, widget=widget, **kwargs)\n if min_value is not None:\n self.validators.append(MinValueValidator(min_value))\n\n def to_python(self, value):\n value = super().to_python(value)\n if value is None:\n return value\n unit = get_default_weight_unit()\n return Weight(**{unit: value})\n\n def validate(self, weight):\n if weight is None or weight in self.empty_values:\n super().validate(weight)\n else:\n unit = get_default_weight_unit()\n if not isinstance(weight, Weight):\n raise Exception(\n '%r is not a valid weight.' % (weight,))\n if weight.unit != unit:\n raise forms.ValidationError(\n 'Invalid unit: %r (expected %r).' % (\n weight.unit, unit))\n super().validate(weight.value)\n\n def clean(self, value):\n value = value_to_be_validated = self.to_python(value)\n self.validate(value_to_be_validated)\n if isinstance(value, Weight):\n value_to_be_validated = Decimal(value.value)\n # default decimal validators can be used for Weight's value only\n self.run_validators(value_to_be_validated)\n return value\n", "path": "saleor/core/weight.py"}]} | 1,793 | 242 |
gh_patches_debug_10620 | rasdani/github-patches | git_diff | apache__tvm-5870 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some Windows and MSVC fixes
ref: https://github.com/apache/incubator-tvm/issues/4529
</issue>
<code>
[start of python/tvm/_ffi/base.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 # coding: utf-8
18 # pylint: disable=invalid-name
19 """Base library for TVM FFI."""
20 from __future__ import absolute_import
21
22 import sys
23 import os
24 import ctypes
25 import numpy as np
26 from . import libinfo
27
28 #----------------------------
29 # library loading
30 #----------------------------
31 if sys.version_info[0] == 3:
32 string_types = (str,)
33 integer_types = (int, np.int32)
34 numeric_types = integer_types + (float, np.float32)
35 # this function is needed for python3
36 # to convert ctypes.char_p .value back to python str
37 if sys.platform == "win32":
38 encoding = 'cp' + str(ctypes.cdll.kernel32.GetACP())
39 py_str = lambda x: x.decode(encoding)
40 else:
41 py_str = lambda x: x.decode('utf-8')
42 else:
43 string_types = (basestring,)
44 integer_types = (int, long, np.int32)
45 numeric_types = integer_types + (float, np.float32)
46 py_str = lambda x: x
47
48
49 def _load_lib():
50 """Load libary by searching possible path."""
51 lib_path = libinfo.find_lib_path()
52 lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)
53 # DMatrix functions
54 lib.TVMGetLastError.restype = ctypes.c_char_p
55 return lib, os.path.basename(lib_path[0])
56
57 try:
58 import readline # pylint: disable=unused-import
59 except ImportError:
60 pass
61
62 # version number
63 __version__ = libinfo.__version__
64 # library instance of nnvm
65 _LIB, _LIB_NAME = _load_lib()
66
67 # Whether we are runtime only
68 _RUNTIME_ONLY = "runtime" in _LIB_NAME
69
70 # The FFI mode of TVM
71 _FFI_MODE = os.environ.get("TVM_FFI", "auto")
72
73 #----------------------------
74 # helper function in ctypes.
75 #----------------------------
76 def c_str(string):
77 """Create ctypes char * from a python string
78 Parameters
79 ----------
80 string : string type
81 python string
82
83 Returns
84 -------
85 str : c_char_p
86 A char pointer that can be passed to C API
87 """
88 return ctypes.c_char_p(string.encode('utf-8'))
89
90
91 def c_array(ctype, values):
92 """Create ctypes array from a python array
93
94 Parameters
95 ----------
96 ctype : ctypes data type
97 data type of the array we want to convert to
98
99 values : tuple or list
100 data content
101
102 Returns
103 -------
104 out : ctypes array
105 Created ctypes array
106 """
107 return (ctype * len(values))(*values)
108
109
110 def decorate(func, fwrapped):
111 """A wrapper call of decorator package, differs to call time
112
113 Parameters
114 ----------
115 func : function
116 The original function
117
118 fwrapped : function
119 The wrapped function
120 """
121 import decorator
122 return decorator.decorate(func, fwrapped)
123
124
125 #-----------------------------------------
126 # Base code for structured error handling.
127 #-----------------------------------------
128 # Maps error type to its constructor
129 ERROR_TYPE = {}
130
131
132 class TVMError(RuntimeError):
133 """Default error thrown by TVM functions.
134
135 TVMError will be raised if you do not give any error type specification,
136 """
137
138
139 def register_error(func_name=None, cls=None):
140 """Register an error class so it can be recognized by the ffi error handler.
141
142 Parameters
143 ----------
144 func_name : str or function or class
145 The name of the error function.
146
147 cls : function
148 The function to create the class
149
150 Returns
151 -------
152 fregister : function
153 Register function if f is not specified.
154
155 Examples
156 --------
157 .. code-block:: python
158
159 @tvm.error.register_error
160 class MyError(RuntimeError):
161 pass
162
163 err_inst = tvm.error.create_ffi_error("MyError: xyz")
164 assert isinstance(err_inst, MyError)
165 """
166 if callable(func_name):
167 cls = func_name
168 func_name = cls.__name__
169
170 def register(mycls):
171 """internal register function"""
172 err_name = func_name if isinstance(func_name, str) else mycls.__name__
173 ERROR_TYPE[err_name] = mycls
174 return mycls
175 if cls is None:
176 return register
177 return register(cls)
178
179
180 def _valid_error_name(name):
181 """Check whether name is a valid error name."""
182 return all(x.isalnum() or x in "_." for x in name)
183
184
185 def _find_error_type(line):
186 """Find the error name given the first line of the error message.
187
188 Parameters
189 ----------
190 line : str
191 The first line of error message.
192
193 Returns
194 -------
195 name : str The error name
196 """
197 end_pos = line.find(":")
198 if end_pos == -1:
199 return None
200 err_name = line[:end_pos]
201 if _valid_error_name(err_name):
202 return err_name
203 return None
204
205
206 def c2pyerror(err_msg):
207 """Translate C API error message to python style.
208
209 Parameters
210 ----------
211 err_msg : str
212 The error message.
213
214 Returns
215 -------
216 new_msg : str
217 Translated message.
218
219 err_type : str
220 Detected error type.
221 """
222 arr = err_msg.split("\n")
223 if arr[-1] == "":
224 arr.pop()
225 err_type = _find_error_type(arr[0])
226 trace_mode = False
227 stack_trace = []
228 message = []
229 for line in arr:
230 if trace_mode:
231 if line.startswith(" "):
232 stack_trace.append(line)
233 else:
234 trace_mode = False
235 if not trace_mode:
236 if line.startswith("Stack trace"):
237 trace_mode = True
238 else:
239 message.append(line)
240 out_msg = ""
241 if stack_trace:
242 out_msg += "Traceback (most recent call last):\n"
243 out_msg += "\n".join(reversed(stack_trace)) + "\n"
244 out_msg += "\n".join(message)
245 return out_msg, err_type
246
247
248 def py2cerror(err_msg):
249 """Translate python style error message to C style.
250
251 Parameters
252 ----------
253 err_msg : str
254 The error message.
255
256 Returns
257 -------
258 new_msg : str
259 Translated message.
260 """
261 arr = err_msg.split("\n")
262 if arr[-1] == "":
263 arr.pop()
264 trace_mode = False
265 stack_trace = []
266 message = []
267 for line in arr:
268 if trace_mode:
269 if line.startswith(" "):
270 stack_trace.append(line)
271 else:
272 trace_mode = False
273 if not trace_mode:
274 if line.find("Traceback") != -1:
275 trace_mode = True
276 else:
277 message.append(line)
278 # Remove the first error name if there are two of them.
279 # RuntimeError: MyErrorName: message => MyErrorName: message
280 head_arr = message[0].split(":", 3)
281 if len(head_arr) >= 3 and _valid_error_name(head_arr[1].strip()):
282 head_arr[1] = head_arr[1].strip()
283 message[0] = ":".join(head_arr[1:])
284 # reverse the stack trace.
285 out_msg = "\n".join(message)
286 if stack_trace:
287 out_msg += "\nStack trace:\n"
288 out_msg += "\n".join(reversed(stack_trace)) + "\n"
289 return out_msg
290
291
292 def get_last_ffi_error():
293 """Create error object given result of TVMGetLastError.
294
295 Returns
296 -------
297 err : object
298 The error object based on the err_msg
299 """
300 c_err_msg = py_str(_LIB.TVMGetLastError())
301 py_err_msg, err_type = c2pyerror(c_err_msg)
302 if err_type is not None and err_type.startswith("tvm.error."):
303 err_type = err_type[10:]
304 return ERROR_TYPE.get(err_type, TVMError)(py_err_msg)
305
306
307 def check_call(ret):
308 """Check the return value of C API call
309
310 This function will raise exception when error occurs.
311 Wrap every API call with this function
312
313 Parameters
314 ----------
315 ret : int
316 return value from API calls
317 """
318 if ret != 0:
319 raise get_last_ffi_error()
320
[end of python/tvm/_ffi/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/tvm/_ffi/base.py b/python/tvm/_ffi/base.py
--- a/python/tvm/_ffi/base.py
+++ b/python/tvm/_ffi/base.py
@@ -35,8 +35,13 @@
# this function is needed for python3
# to convert ctypes.char_p .value back to python str
if sys.platform == "win32":
- encoding = 'cp' + str(ctypes.cdll.kernel32.GetACP())
- py_str = lambda x: x.decode(encoding)
+ def _py_str(x):
+ try:
+ return x.decode('utf-8')
+ except UnicodeDecodeError:
+ encoding = 'cp' + str(ctypes.cdll.kernel32.GetACP())
+ return x.decode(encoding)
+ py_str = _py_str
else:
py_str = lambda x: x.decode('utf-8')
else:
| {"golden_diff": "diff --git a/python/tvm/_ffi/base.py b/python/tvm/_ffi/base.py\n--- a/python/tvm/_ffi/base.py\n+++ b/python/tvm/_ffi/base.py\n@@ -35,8 +35,13 @@\n # this function is needed for python3\n # to convert ctypes.char_p .value back to python str\n if sys.platform == \"win32\":\n- encoding = 'cp' + str(ctypes.cdll.kernel32.GetACP())\n- py_str = lambda x: x.decode(encoding)\n+ def _py_str(x):\n+ try:\n+ return x.decode('utf-8')\n+ except UnicodeDecodeError:\n+ encoding = 'cp' + str(ctypes.cdll.kernel32.GetACP())\n+ return x.decode(encoding)\n+ py_str = _py_str\n else:\n py_str = lambda x: x.decode('utf-8')\n else:\n", "issue": "Some Windows and MSVC fixes\nref: https://github.com/apache/incubator-tvm/issues/4529\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# coding: utf-8\n# pylint: disable=invalid-name\n\"\"\"Base library for TVM FFI.\"\"\"\nfrom __future__ import absolute_import\n\nimport sys\nimport os\nimport ctypes\nimport numpy as np\nfrom . import libinfo\n\n#----------------------------\n# library loading\n#----------------------------\nif sys.version_info[0] == 3:\n string_types = (str,)\n integer_types = (int, np.int32)\n numeric_types = integer_types + (float, np.float32)\n # this function is needed for python3\n # to convert ctypes.char_p .value back to python str\n if sys.platform == \"win32\":\n encoding = 'cp' + str(ctypes.cdll.kernel32.GetACP())\n py_str = lambda x: x.decode(encoding)\n else:\n py_str = lambda x: x.decode('utf-8')\nelse:\n string_types = (basestring,)\n integer_types = (int, long, np.int32)\n numeric_types = integer_types + (float, np.float32)\n py_str = lambda x: x\n\n\ndef _load_lib():\n \"\"\"Load libary by searching possible path.\"\"\"\n lib_path = libinfo.find_lib_path()\n lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_GLOBAL)\n # DMatrix functions\n lib.TVMGetLastError.restype = ctypes.c_char_p\n return lib, os.path.basename(lib_path[0])\n\ntry:\n import readline # pylint: disable=unused-import\nexcept ImportError:\n pass\n\n# version number\n__version__ = libinfo.__version__\n# library instance of nnvm\n_LIB, _LIB_NAME = _load_lib()\n\n# Whether we are runtime only\n_RUNTIME_ONLY = \"runtime\" in _LIB_NAME\n\n# The FFI mode of TVM\n_FFI_MODE = os.environ.get(\"TVM_FFI\", \"auto\")\n\n#----------------------------\n# helper function in ctypes.\n#----------------------------\ndef c_str(string):\n \"\"\"Create ctypes char * from a python string\n Parameters\n ----------\n string : string type\n python string\n\n Returns\n -------\n str : c_char_p\n A char pointer that can be passed to C API\n \"\"\"\n return ctypes.c_char_p(string.encode('utf-8'))\n\n\ndef c_array(ctype, values):\n \"\"\"Create ctypes array from a python array\n\n Parameters\n ----------\n ctype : ctypes data type\n data type of the array we want to convert to\n\n values : tuple or list\n data content\n\n Returns\n -------\n out : ctypes array\n Created ctypes array\n \"\"\"\n return (ctype * len(values))(*values)\n\n\ndef decorate(func, fwrapped):\n \"\"\"A wrapper call of decorator package, differs to call time\n\n Parameters\n ----------\n func : function\n The original function\n\n fwrapped : function\n The wrapped function\n \"\"\"\n import decorator\n return decorator.decorate(func, fwrapped)\n\n\n#-----------------------------------------\n# Base code for structured error handling.\n#-----------------------------------------\n# Maps error type to its constructor\nERROR_TYPE = {}\n\n\nclass TVMError(RuntimeError):\n \"\"\"Default error thrown by TVM functions.\n\n TVMError will be raised if you do not give any error type specification,\n \"\"\"\n\n\ndef register_error(func_name=None, cls=None):\n \"\"\"Register an error class so it can be recognized by the ffi error handler.\n\n Parameters\n ----------\n func_name : str or function or class\n The name of the error function.\n\n cls : function\n The function to create the class\n\n Returns\n -------\n fregister : function\n Register function if f is not specified.\n\n Examples\n --------\n .. code-block:: python\n\n @tvm.error.register_error\n class MyError(RuntimeError):\n pass\n\n err_inst = tvm.error.create_ffi_error(\"MyError: xyz\")\n assert isinstance(err_inst, MyError)\n \"\"\"\n if callable(func_name):\n cls = func_name\n func_name = cls.__name__\n\n def register(mycls):\n \"\"\"internal register function\"\"\"\n err_name = func_name if isinstance(func_name, str) else mycls.__name__\n ERROR_TYPE[err_name] = mycls\n return mycls\n if cls is None:\n return register\n return register(cls)\n\n\ndef _valid_error_name(name):\n \"\"\"Check whether name is a valid error name.\"\"\"\n return all(x.isalnum() or x in \"_.\" for x in name)\n\n\ndef _find_error_type(line):\n \"\"\"Find the error name given the first line of the error message.\n\n Parameters\n ----------\n line : str\n The first line of error message.\n\n Returns\n -------\n name : str The error name\n \"\"\"\n end_pos = line.find(\":\")\n if end_pos == -1:\n return None\n err_name = line[:end_pos]\n if _valid_error_name(err_name):\n return err_name\n return None\n\n\ndef c2pyerror(err_msg):\n \"\"\"Translate C API error message to python style.\n\n Parameters\n ----------\n err_msg : str\n The error message.\n\n Returns\n -------\n new_msg : str\n Translated message.\n\n err_type : str\n Detected error type.\n \"\"\"\n arr = err_msg.split(\"\\n\")\n if arr[-1] == \"\":\n arr.pop()\n err_type = _find_error_type(arr[0])\n trace_mode = False\n stack_trace = []\n message = []\n for line in arr:\n if trace_mode:\n if line.startswith(\" \"):\n stack_trace.append(line)\n else:\n trace_mode = False\n if not trace_mode:\n if line.startswith(\"Stack trace\"):\n trace_mode = True\n else:\n message.append(line)\n out_msg = \"\"\n if stack_trace:\n out_msg += \"Traceback (most recent call last):\\n\"\n out_msg += \"\\n\".join(reversed(stack_trace)) + \"\\n\"\n out_msg += \"\\n\".join(message)\n return out_msg, err_type\n\n\ndef py2cerror(err_msg):\n \"\"\"Translate python style error message to C style.\n\n Parameters\n ----------\n err_msg : str\n The error message.\n\n Returns\n -------\n new_msg : str\n Translated message.\n \"\"\"\n arr = err_msg.split(\"\\n\")\n if arr[-1] == \"\":\n arr.pop()\n trace_mode = False\n stack_trace = []\n message = []\n for line in arr:\n if trace_mode:\n if line.startswith(\" \"):\n stack_trace.append(line)\n else:\n trace_mode = False\n if not trace_mode:\n if line.find(\"Traceback\") != -1:\n trace_mode = True\n else:\n message.append(line)\n # Remove the first error name if there are two of them.\n # RuntimeError: MyErrorName: message => MyErrorName: message\n head_arr = message[0].split(\":\", 3)\n if len(head_arr) >= 3 and _valid_error_name(head_arr[1].strip()):\n head_arr[1] = head_arr[1].strip()\n message[0] = \":\".join(head_arr[1:])\n # reverse the stack trace.\n out_msg = \"\\n\".join(message)\n if stack_trace:\n out_msg += \"\\nStack trace:\\n\"\n out_msg += \"\\n\".join(reversed(stack_trace)) + \"\\n\"\n return out_msg\n\n\ndef get_last_ffi_error():\n \"\"\"Create error object given result of TVMGetLastError.\n\n Returns\n -------\n err : object\n The error object based on the err_msg\n \"\"\"\n c_err_msg = py_str(_LIB.TVMGetLastError())\n py_err_msg, err_type = c2pyerror(c_err_msg)\n if err_type is not None and err_type.startswith(\"tvm.error.\"):\n err_type = err_type[10:]\n return ERROR_TYPE.get(err_type, TVMError)(py_err_msg)\n\n\ndef check_call(ret):\n \"\"\"Check the return value of C API call\n\n This function will raise exception when error occurs.\n Wrap every API call with this function\n\n Parameters\n ----------\n ret : int\n return value from API calls\n \"\"\"\n if ret != 0:\n raise get_last_ffi_error()\n", "path": "python/tvm/_ffi/base.py"}]} | 3,422 | 201 |
gh_patches_debug_7856 | rasdani/github-patches | git_diff | networkx__networkx-4579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve nx_pylab testing
Currently our visualization test suite are smoke tests. We should investigate using ``pytest-mpl``
- https://github.com/matplotlib/pytest-mpl
See #4375.
</issue>
<code>
[start of setup.py]
1 from glob import glob
2 import os
3 import sys
4 from setuptools import setup
5
6 if sys.version_info[:2] < (3, 8):
7 error = (
8 "NetworkX 2.7+ requires Python 3.8 or later (%d.%d detected). \n"
9 "For Python 2.7, please install version 2.2 using: \n"
10 "$ pip install 'networkx==2.2'" % sys.version_info[:2]
11 )
12 sys.stderr.write(error + "\n")
13 sys.exit(1)
14
15
16 name = "networkx"
17 description = "Python package for creating and manipulating graphs and networks"
18 authors = {
19 "Hagberg": ("Aric Hagberg", "[email protected]"),
20 "Schult": ("Dan Schult", "[email protected]"),
21 "Swart": ("Pieter Swart", "[email protected]"),
22 }
23 maintainer = "NetworkX Developers"
24 maintainer_email = "[email protected]"
25 url = "https://networkx.org/"
26 project_urls = {
27 "Bug Tracker": "https://github.com/networkx/networkx/issues",
28 "Documentation": "https://networkx.org/documentation/stable/",
29 "Source Code": "https://github.com/networkx/networkx",
30 }
31 platforms = ["Linux", "Mac OSX", "Windows", "Unix"]
32 keywords = [
33 "Networks",
34 "Graph Theory",
35 "Mathematics",
36 "network",
37 "graph",
38 "discrete mathematics",
39 "math",
40 ]
41 classifiers = [
42 "Development Status :: 5 - Production/Stable",
43 "Intended Audience :: Developers",
44 "Intended Audience :: Science/Research",
45 "License :: OSI Approved :: BSD License",
46 "Operating System :: OS Independent",
47 "Programming Language :: Python :: 3",
48 "Programming Language :: Python :: 3.8",
49 "Programming Language :: Python :: 3.9",
50 "Programming Language :: Python :: 3.10",
51 "Programming Language :: Python :: 3 :: Only",
52 "Topic :: Software Development :: Libraries :: Python Modules",
53 "Topic :: Scientific/Engineering :: Bio-Informatics",
54 "Topic :: Scientific/Engineering :: Information Analysis",
55 "Topic :: Scientific/Engineering :: Mathematics",
56 "Topic :: Scientific/Engineering :: Physics",
57 ]
58
59 with open("networkx/__init__.py") as fid:
60 for line in fid:
61 if line.startswith("__version__"):
62 version = line.strip().split()[-1][1:-1]
63 break
64
65 packages = [
66 "networkx",
67 "networkx.algorithms",
68 "networkx.algorithms.assortativity",
69 "networkx.algorithms.bipartite",
70 "networkx.algorithms.node_classification",
71 "networkx.algorithms.centrality",
72 "networkx.algorithms.community",
73 "networkx.algorithms.components",
74 "networkx.algorithms.connectivity",
75 "networkx.algorithms.coloring",
76 "networkx.algorithms.flow",
77 "networkx.algorithms.minors",
78 "networkx.algorithms.traversal",
79 "networkx.algorithms.isomorphism",
80 "networkx.algorithms.shortest_paths",
81 "networkx.algorithms.link_analysis",
82 "networkx.algorithms.operators",
83 "networkx.algorithms.approximation",
84 "networkx.algorithms.tree",
85 "networkx.classes",
86 "networkx.generators",
87 "networkx.drawing",
88 "networkx.linalg",
89 "networkx.readwrite",
90 "networkx.readwrite.json_graph",
91 "networkx.tests",
92 "networkx.testing",
93 "networkx.utils",
94 ]
95
96 docdirbase = "share/doc/networkx-%s" % version
97 # add basic documentation
98 data = [(docdirbase, glob("*.txt"))]
99 # add examples
100 for d in [
101 ".",
102 "advanced",
103 "algorithms",
104 "basic",
105 "3d_drawing",
106 "drawing",
107 "graph",
108 "javascript",
109 "jit",
110 "pygraphviz",
111 "subclass",
112 ]:
113 dd = os.path.join(docdirbase, "examples", d)
114 pp = os.path.join("examples", d)
115 data.append((dd, glob(os.path.join(pp, "*.txt"))))
116 data.append((dd, glob(os.path.join(pp, "*.py"))))
117 data.append((dd, glob(os.path.join(pp, "*.bz2"))))
118 data.append((dd, glob(os.path.join(pp, "*.gz"))))
119 data.append((dd, glob(os.path.join(pp, "*.mbox"))))
120 data.append((dd, glob(os.path.join(pp, "*.edgelist"))))
121 # add js force examples
122 dd = os.path.join(docdirbase, "examples", "javascript/force")
123 pp = os.path.join("examples", "javascript/force")
124 data.append((dd, glob(os.path.join(pp, "*"))))
125
126 # add the tests
127 package_data = {
128 "networkx": ["tests/*.py"],
129 "networkx.algorithms": ["tests/*.py"],
130 "networkx.algorithms.assortativity": ["tests/*.py"],
131 "networkx.algorithms.bipartite": ["tests/*.py"],
132 "networkx.algorithms.node_classification": ["tests/*.py"],
133 "networkx.algorithms.centrality": ["tests/*.py"],
134 "networkx.algorithms.community": ["tests/*.py"],
135 "networkx.algorithms.components": ["tests/*.py"],
136 "networkx.algorithms.connectivity": ["tests/*.py"],
137 "networkx.algorithms.coloring": ["tests/*.py"],
138 "networkx.algorithms.minors": ["tests/*.py"],
139 "networkx.algorithms.flow": ["tests/*.py", "tests/*.bz2"],
140 "networkx.algorithms.isomorphism": ["tests/*.py", "tests/*.*99"],
141 "networkx.algorithms.link_analysis": ["tests/*.py"],
142 "networkx.algorithms.approximation": ["tests/*.py"],
143 "networkx.algorithms.operators": ["tests/*.py"],
144 "networkx.algorithms.shortest_paths": ["tests/*.py"],
145 "networkx.algorithms.traversal": ["tests/*.py"],
146 "networkx.algorithms.tree": ["tests/*.py"],
147 "networkx.classes": ["tests/*.py"],
148 "networkx.generators": ["tests/*.py", "atlas.dat.gz"],
149 "networkx.drawing": ["tests/*.py"],
150 "networkx.linalg": ["tests/*.py"],
151 "networkx.readwrite": ["tests/*.py"],
152 "networkx.readwrite.json_graph": ["tests/*.py"],
153 "networkx.testing": ["tests/*.py"],
154 "networkx.utils": ["tests/*.py"],
155 }
156
157
158 def parse_requirements_file(filename):
159 with open(filename) as fid:
160 requires = [l.strip() for l in fid.readlines() if not l.startswith("#")]
161
162 return requires
163
164
165 install_requires = []
166 extras_require = {
167 dep: parse_requirements_file("requirements/" + dep + ".txt")
168 for dep in ["default", "developer", "doc", "extra", "test"]
169 }
170
171 with open("README.rst") as fh:
172 long_description = fh.read()
173
174 if __name__ == "__main__":
175
176 setup(
177 name=name,
178 version=version,
179 maintainer=maintainer,
180 maintainer_email=maintainer_email,
181 author=authors["Hagberg"][0],
182 author_email=authors["Hagberg"][1],
183 description=description,
184 keywords=keywords,
185 long_description=long_description,
186 platforms=platforms,
187 url=url,
188 project_urls=project_urls,
189 classifiers=classifiers,
190 packages=packages,
191 data_files=data,
192 package_data=package_data,
193 install_requires=install_requires,
194 extras_require=extras_require,
195 python_requires=">=3.8",
196 zip_safe=False,
197 )
198
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -146,7 +146,7 @@
"networkx.algorithms.tree": ["tests/*.py"],
"networkx.classes": ["tests/*.py"],
"networkx.generators": ["tests/*.py", "atlas.dat.gz"],
- "networkx.drawing": ["tests/*.py"],
+ "networkx.drawing": ["tests/*.py", "tests/baseline/*png"],
"networkx.linalg": ["tests/*.py"],
"networkx.readwrite": ["tests/*.py"],
"networkx.readwrite.json_graph": ["tests/*.py"],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -146,7 +146,7 @@\n \"networkx.algorithms.tree\": [\"tests/*.py\"],\n \"networkx.classes\": [\"tests/*.py\"],\n \"networkx.generators\": [\"tests/*.py\", \"atlas.dat.gz\"],\n- \"networkx.drawing\": [\"tests/*.py\"],\n+ \"networkx.drawing\": [\"tests/*.py\", \"tests/baseline/*png\"],\n \"networkx.linalg\": [\"tests/*.py\"],\n \"networkx.readwrite\": [\"tests/*.py\"],\n \"networkx.readwrite.json_graph\": [\"tests/*.py\"],\n", "issue": "Improve nx_pylab testing\nCurrently our visualization test suite are smoke tests. We should investigate using ``pytest-mpl``\r\n- https://github.com/matplotlib/pytest-mpl\r\n\r\nSee #4375.\n", "before_files": [{"content": "from glob import glob\nimport os\nimport sys\nfrom setuptools import setup\n\nif sys.version_info[:2] < (3, 8):\n error = (\n \"NetworkX 2.7+ requires Python 3.8 or later (%d.%d detected). \\n\"\n \"For Python 2.7, please install version 2.2 using: \\n\"\n \"$ pip install 'networkx==2.2'\" % sys.version_info[:2]\n )\n sys.stderr.write(error + \"\\n\")\n sys.exit(1)\n\n\nname = \"networkx\"\ndescription = \"Python package for creating and manipulating graphs and networks\"\nauthors = {\n \"Hagberg\": (\"Aric Hagberg\", \"[email protected]\"),\n \"Schult\": (\"Dan Schult\", \"[email protected]\"),\n \"Swart\": (\"Pieter Swart\", \"[email protected]\"),\n}\nmaintainer = \"NetworkX Developers\"\nmaintainer_email = \"[email protected]\"\nurl = \"https://networkx.org/\"\nproject_urls = {\n \"Bug Tracker\": \"https://github.com/networkx/networkx/issues\",\n \"Documentation\": \"https://networkx.org/documentation/stable/\",\n \"Source Code\": \"https://github.com/networkx/networkx\",\n}\nplatforms = [\"Linux\", \"Mac OSX\", \"Windows\", \"Unix\"]\nkeywords = [\n \"Networks\",\n \"Graph Theory\",\n \"Mathematics\",\n \"network\",\n \"graph\",\n \"discrete mathematics\",\n \"math\",\n]\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Physics\",\n]\n\nwith open(\"networkx/__init__.py\") as fid:\n for line in fid:\n if line.startswith(\"__version__\"):\n version = line.strip().split()[-1][1:-1]\n break\n\npackages = [\n \"networkx\",\n \"networkx.algorithms\",\n \"networkx.algorithms.assortativity\",\n \"networkx.algorithms.bipartite\",\n \"networkx.algorithms.node_classification\",\n \"networkx.algorithms.centrality\",\n \"networkx.algorithms.community\",\n \"networkx.algorithms.components\",\n \"networkx.algorithms.connectivity\",\n \"networkx.algorithms.coloring\",\n \"networkx.algorithms.flow\",\n \"networkx.algorithms.minors\",\n \"networkx.algorithms.traversal\",\n \"networkx.algorithms.isomorphism\",\n \"networkx.algorithms.shortest_paths\",\n \"networkx.algorithms.link_analysis\",\n \"networkx.algorithms.operators\",\n \"networkx.algorithms.approximation\",\n \"networkx.algorithms.tree\",\n \"networkx.classes\",\n \"networkx.generators\",\n \"networkx.drawing\",\n \"networkx.linalg\",\n \"networkx.readwrite\",\n \"networkx.readwrite.json_graph\",\n \"networkx.tests\",\n \"networkx.testing\",\n \"networkx.utils\",\n]\n\ndocdirbase = \"share/doc/networkx-%s\" % version\n# add basic documentation\ndata = [(docdirbase, glob(\"*.txt\"))]\n# add examples\nfor d in [\n \".\",\n \"advanced\",\n \"algorithms\",\n \"basic\",\n \"3d_drawing\",\n \"drawing\",\n \"graph\",\n \"javascript\",\n \"jit\",\n \"pygraphviz\",\n \"subclass\",\n]:\n dd = os.path.join(docdirbase, \"examples\", d)\n pp = os.path.join(\"examples\", d)\n data.append((dd, glob(os.path.join(pp, \"*.txt\"))))\n data.append((dd, glob(os.path.join(pp, \"*.py\"))))\n data.append((dd, glob(os.path.join(pp, \"*.bz2\"))))\n data.append((dd, glob(os.path.join(pp, \"*.gz\"))))\n data.append((dd, glob(os.path.join(pp, \"*.mbox\"))))\n data.append((dd, glob(os.path.join(pp, \"*.edgelist\"))))\n# add js force examples\ndd = os.path.join(docdirbase, \"examples\", \"javascript/force\")\npp = os.path.join(\"examples\", \"javascript/force\")\ndata.append((dd, glob(os.path.join(pp, \"*\"))))\n\n# add the tests\npackage_data = {\n \"networkx\": [\"tests/*.py\"],\n \"networkx.algorithms\": [\"tests/*.py\"],\n \"networkx.algorithms.assortativity\": [\"tests/*.py\"],\n \"networkx.algorithms.bipartite\": [\"tests/*.py\"],\n \"networkx.algorithms.node_classification\": [\"tests/*.py\"],\n \"networkx.algorithms.centrality\": [\"tests/*.py\"],\n \"networkx.algorithms.community\": [\"tests/*.py\"],\n \"networkx.algorithms.components\": [\"tests/*.py\"],\n \"networkx.algorithms.connectivity\": [\"tests/*.py\"],\n \"networkx.algorithms.coloring\": [\"tests/*.py\"],\n \"networkx.algorithms.minors\": [\"tests/*.py\"],\n \"networkx.algorithms.flow\": [\"tests/*.py\", \"tests/*.bz2\"],\n \"networkx.algorithms.isomorphism\": [\"tests/*.py\", \"tests/*.*99\"],\n \"networkx.algorithms.link_analysis\": [\"tests/*.py\"],\n \"networkx.algorithms.approximation\": [\"tests/*.py\"],\n \"networkx.algorithms.operators\": [\"tests/*.py\"],\n \"networkx.algorithms.shortest_paths\": [\"tests/*.py\"],\n \"networkx.algorithms.traversal\": [\"tests/*.py\"],\n \"networkx.algorithms.tree\": [\"tests/*.py\"],\n \"networkx.classes\": [\"tests/*.py\"],\n \"networkx.generators\": [\"tests/*.py\", \"atlas.dat.gz\"],\n \"networkx.drawing\": [\"tests/*.py\"],\n \"networkx.linalg\": [\"tests/*.py\"],\n \"networkx.readwrite\": [\"tests/*.py\"],\n \"networkx.readwrite.json_graph\": [\"tests/*.py\"],\n \"networkx.testing\": [\"tests/*.py\"],\n \"networkx.utils\": [\"tests/*.py\"],\n}\n\n\ndef parse_requirements_file(filename):\n with open(filename) as fid:\n requires = [l.strip() for l in fid.readlines() if not l.startswith(\"#\")]\n\n return requires\n\n\ninstall_requires = []\nextras_require = {\n dep: parse_requirements_file(\"requirements/\" + dep + \".txt\")\n for dep in [\"default\", \"developer\", \"doc\", \"extra\", \"test\"]\n}\n\nwith open(\"README.rst\") as fh:\n long_description = fh.read()\n\nif __name__ == \"__main__\":\n\n setup(\n name=name,\n version=version,\n maintainer=maintainer,\n maintainer_email=maintainer_email,\n author=authors[\"Hagberg\"][0],\n author_email=authors[\"Hagberg\"][1],\n description=description,\n keywords=keywords,\n long_description=long_description,\n platforms=platforms,\n url=url,\n project_urls=project_urls,\n classifiers=classifiers,\n packages=packages,\n data_files=data,\n package_data=package_data,\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires=\">=3.8\",\n zip_safe=False,\n )\n", "path": "setup.py"}]} | 2,746 | 146 |
gh_patches_debug_61923 | rasdani/github-patches | git_diff | ray-project__ray-3109 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ship Modin with Ray
### Describe the problem
<!-- Describe the problem clearly here. -->
I think it makes sense to ship Modin with Ray. I suggest doing this similar to how pyarrow is shipped with Ray.
We don't need to rely on the dependencies of Modin, but some of the Modin source will have to be updated to make sure that the pandas version is correct.
</issue>
<code>
[start of python/ray/__init__.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6 import sys
7
8 if "pyarrow" in sys.modules:
9 raise ImportError("Ray must be imported before pyarrow because Ray "
10 "requires a specific version of pyarrow (which is "
11 "packaged along with Ray).")
12
13 # Add the directory containing pyarrow to the Python path so that we find the
14 # pyarrow version packaged with ray and not a pre-existing pyarrow.
15 pyarrow_path = os.path.join(
16 os.path.abspath(os.path.dirname(__file__)), "pyarrow_files")
17 sys.path.insert(0, pyarrow_path)
18
19 # See https://github.com/ray-project/ray/issues/131.
20 helpful_message = """
21
22 If you are using Anaconda, try fixing this problem by running:
23
24 conda install libgcc
25 """
26
27 try:
28 import pyarrow # noqa: F401
29 except ImportError as e:
30 if ((hasattr(e, "msg") and isinstance(e.msg, str)
31 and ("libstdc++" in e.msg or "CXX" in e.msg))):
32 # This code path should be taken with Python 3.
33 e.msg += helpful_message
34 elif (hasattr(e, "message") and isinstance(e.message, str)
35 and ("libstdc++" in e.message or "CXX" in e.message)):
36 # This code path should be taken with Python 2.
37 condition = (hasattr(e, "args") and isinstance(e.args, tuple)
38 and len(e.args) == 1 and isinstance(e.args[0], str))
39 if condition:
40 e.args = (e.args[0] + helpful_message, )
41 else:
42 if not hasattr(e, "args"):
43 e.args = ()
44 elif not isinstance(e.args, tuple):
45 e.args = (e.args, )
46 e.args += (helpful_message, )
47 raise
48
49 from ray.raylet import ObjectID, _config # noqa: E402
50 from ray.profiling import profile # noqa: E402
51 from ray.worker import (error_info, init, connect, disconnect, get, put, wait,
52 remote, get_gpu_ids, get_resource_ids, get_webui_url,
53 register_custom_serializer, shutdown,
54 is_initialized) # noqa: E402
55 from ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,
56 PYTHON_MODE) # noqa: E402
57 from ray.worker import global_state # noqa: E402
58 import ray.internal # noqa: E402
59 # We import ray.actor because some code is run in actor.py which initializes
60 # some functions in the worker.
61 import ray.actor # noqa: F401
62 from ray.actor import method # noqa: E402
63
64 # Ray version string.
65 __version__ = "0.5.3"
66
67 __all__ = [
68 "error_info", "init", "connect", "disconnect", "get", "put", "wait",
69 "remote", "profile", "actor", "method", "get_gpu_ids", "get_resource_ids",
70 "get_webui_url", "register_custom_serializer", "shutdown",
71 "is_initialized", "SCRIPT_MODE", "WORKER_MODE", "LOCAL_MODE",
72 "PYTHON_MODE", "global_state", "ObjectID", "_config", "__version__",
73 "internal"
74 ]
75
76 import ctypes # noqa: E402
77 # Windows only
78 if hasattr(ctypes, "windll"):
79 # Makes sure that all child processes die when we die. Also makes sure that
80 # fatal crashes result in process termination rather than an error dialog
81 # (the latter is annoying since we have a lot of processes). This is done
82 # by associating all child processes with a "job" object that imposes this
83 # behavior.
84 (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501
85
[end of python/ray/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/__init__.py b/python/ray/__init__.py
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -46,6 +46,9 @@
e.args += (helpful_message, )
raise
+modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "modin")
+sys.path.insert(0, modin_path)
+
from ray.raylet import ObjectID, _config # noqa: E402
from ray.profiling import profile # noqa: E402
from ray.worker import (error_info, init, connect, disconnect, get, put, wait,
| {"golden_diff": "diff --git a/python/ray/__init__.py b/python/ray/__init__.py\n--- a/python/ray/__init__.py\n+++ b/python/ray/__init__.py\n@@ -46,6 +46,9 @@\n e.args += (helpful_message, )\n raise\n \n+modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\n+sys.path.insert(0, modin_path)\n+\n from ray.raylet import ObjectID, _config # noqa: E402\n from ray.profiling import profile # noqa: E402\n from ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n", "issue": "Ship Modin with Ray\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nI think it makes sense to ship Modin with Ray. I suggest doing this similar to how pyarrow is shipped with Ray.\r\n\r\nWe don't need to rely on the dependencies of Modin, but some of the Modin source will have to be updated to make sure that the pandas version is correct.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.5.3\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py"}]} | 1,784 | 154 |
gh_patches_debug_15080 | rasdani/github-patches | git_diff | pulp__pulpcore-5190 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix import in wsgi preventing startup
**Version**
Confirmed with Katello folks using 3.49 branch.
**Describe the bug**
We're getting an error during the startup stage:
```python
Starting Pulp API Server...
Traceback (most recent call last):
File "/usr/bin/pulpcore-api", line 33, in <module>
sys.exit(load_entry_point('pulpcore==3.49.1', 'console_scripts', 'pulpcore-api')())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/pulpcore/app/entrypoint.py", line 140, in main
PulpcoreApiApplication(options).run()
File "/usr/lib/python3.11/site-packages/gunicorn/app/base.py", line 231, in run
super().run()
File "/usr/lib/python3.11/site-packages/gunicorn/app/base.py", line 72, in run
Arbiter(self).run()
^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/gunicorn/arbiter.py", line 58, in __init__
self.setup(app)
File "/usr/lib/python3.11/site-packages/gunicorn/arbiter.py", line 118, in setup
self.app.wsgi()
File "/usr/lib/python3.11/site-packages/gunicorn/app/base.py", line 67, in wsgi
self.callable = self.load()
^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/pulpcore/app/entrypoint.py", line 95, in load
import pulpcore.app.wsgi
File "/usr/lib/python3.11/site-packages/pulpcore/app/wsgi.py", line 14, in <module>
from pulpcore.app.util import init_domain_metrics_exporter
File "/usr/lib/python3.11/site-packages/pulpcore/app/util.py", line 24, in <module>
from pulpcore.app import models
File "/usr/lib/python3.11/site-packages/pulpcore/app/models/__init__.py", line 4, in <module>
from .base import (
File "/usr/lib/python3.11/site-packages/pulpcore/app/models/base.py", line 3, in <module>
from django.contrib.contenttypes.fields import GenericRelation
File "/usr/lib/python3.11/site-packages/django/contrib/contenttypes/fields.py", line 7, in <module>
from django.contrib.contenttypes.models import ContentType
File "/usr/lib/python3.11/site-packages/django/contrib/contenttypes/models.py", line 139, in <module>
class ContentType(models.Model):
File "/usr/lib/python3.11/site-packages/django/db/models/base.py", line 129, in __new__
app_config = apps.get_containing_app_config(module)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/django/apps/registry.py", line 260, in get_containing_app_config
```
and what got our eye was this line:
```python
File "/usr/lib/python3.11/site-packages/pulpcore/app/wsgi.py", line 14, in <module>
from pulpcore.app.util import init_domain_metrics_exporter
```
Also, there's already a fix for this in the main branch #5178
**To Reproduce**
Installing using pip and rpm packages.
**Expected behavior**
The application should start without issues
</issue>
<code>
[start of pulpcore/app/wsgi.py]
1 """
2 WSGI config for pulp project.
3
4 It exposes the WSGI callable as a module-level variable named ``application``.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
8 """
9
10 from django.core.wsgi import get_wsgi_application
11 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware
12
13 from pulpcore.app.entrypoint import using_pulp_api_worker
14 from pulpcore.app.util import init_domain_metrics_exporter
15
16 if not using_pulp_api_worker.get(False):
17 raise RuntimeError("This app must be executed using pulpcore-api entrypoint.")
18
19 application = get_wsgi_application()
20 application = OpenTelemetryMiddleware(application)
21
22 init_domain_metrics_exporter()
23
[end of pulpcore/app/wsgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/wsgi.py b/pulpcore/app/wsgi.py
--- a/pulpcore/app/wsgi.py
+++ b/pulpcore/app/wsgi.py
@@ -11,7 +11,6 @@
from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware
from pulpcore.app.entrypoint import using_pulp_api_worker
-from pulpcore.app.util import init_domain_metrics_exporter
if not using_pulp_api_worker.get(False):
raise RuntimeError("This app must be executed using pulpcore-api entrypoint.")
@@ -19,4 +18,6 @@
application = get_wsgi_application()
application = OpenTelemetryMiddleware(application)
+from pulpcore.app.util import init_domain_metrics_exporter # noqa: E402
+
init_domain_metrics_exporter()
| {"golden_diff": "diff --git a/pulpcore/app/wsgi.py b/pulpcore/app/wsgi.py\n--- a/pulpcore/app/wsgi.py\n+++ b/pulpcore/app/wsgi.py\n@@ -11,7 +11,6 @@\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n \n from pulpcore.app.entrypoint import using_pulp_api_worker\n-from pulpcore.app.util import init_domain_metrics_exporter\n \n if not using_pulp_api_worker.get(False):\n raise RuntimeError(\"This app must be executed using pulpcore-api entrypoint.\")\n@@ -19,4 +18,6 @@\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n \n+from pulpcore.app.util import init_domain_metrics_exporter # noqa: E402\n+\n init_domain_metrics_exporter()\n", "issue": "Fix import in wsgi preventing startup\n**Version**\r\nConfirmed with Katello folks using 3.49 branch.\r\n\r\n**Describe the bug**\r\nWe're getting an error during the startup stage:\r\n```python\r\nStarting Pulp API Server...\r\nTraceback (most recent call last):\r\n File \"/usr/bin/pulpcore-api\", line 33, in <module>\r\n sys.exit(load_entry_point('pulpcore==3.49.1', 'console_scripts', 'pulpcore-api')())\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/click/core.py\", line 1130, in __call__\r\n return self.main(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/click/core.py\", line 1055, in main\r\n rv = self.invoke(ctx)\r\n ^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/click/core.py\", line 760, in invoke\r\n return __callback(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/entrypoint.py\", line 140, in main\r\n PulpcoreApiApplication(options).run()\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/app/base.py\", line 231, in run\r\n super().run()\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/app/base.py\", line 72, in run\r\n Arbiter(self).run()\r\n ^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/arbiter.py\", line 58, in __init__\r\n self.setup(app)\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/arbiter.py\", line 118, in setup\r\n self.app.wsgi()\r\n File \"/usr/lib/python3.11/site-packages/gunicorn/app/base.py\", line 67, in wsgi\r\n self.callable = self.load()\r\n ^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/entrypoint.py\", line 95, in load\r\n import pulpcore.app.wsgi\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/wsgi.py\", line 14, in <module>\r\n from pulpcore.app.util import init_domain_metrics_exporter\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/util.py\", line 24, in <module>\r\n from pulpcore.app import models\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/models/__init__.py\", line 4, in <module>\r\n from .base import (\r\n File \"/usr/lib/python3.11/site-packages/pulpcore/app/models/base.py\", line 3, in <module>\r\n from django.contrib.contenttypes.fields import GenericRelation\r\n File \"/usr/lib/python3.11/site-packages/django/contrib/contenttypes/fields.py\", line 7, in <module>\r\n from django.contrib.contenttypes.models import ContentType\r\n File \"/usr/lib/python3.11/site-packages/django/contrib/contenttypes/models.py\", line 139, in <module>\r\n class ContentType(models.Model):\r\n File \"/usr/lib/python3.11/site-packages/django/db/models/base.py\", line 129, in __new__\r\n app_config = apps.get_containing_app_config(module)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/django/apps/registry.py\", line 260, in get_containing_app_config\r\n```\r\n\r\nand what got our eye was this line:\r\n```python\r\nFile \"/usr/lib/python3.11/site-packages/pulpcore/app/wsgi.py\", line 14, in <module>\r\n from pulpcore.app.util import init_domain_metrics_exporter\r\n```\r\n\r\nAlso, there's already a fix for this in the main branch #5178\r\n\r\n**To Reproduce**\r\nInstalling using pip and rpm packages.\r\n\r\n**Expected behavior**\r\nThe application should start without issues\r\n\n", "before_files": [{"content": "\"\"\"\nWSGI config for pulp project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/\n\"\"\"\n\nfrom django.core.wsgi import get_wsgi_application\nfrom opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\nfrom pulpcore.app.entrypoint import using_pulp_api_worker\nfrom pulpcore.app.util import init_domain_metrics_exporter\n\nif not using_pulp_api_worker.get(False):\n raise RuntimeError(\"This app must be executed using pulpcore-api entrypoint.\")\n\napplication = get_wsgi_application()\napplication = OpenTelemetryMiddleware(application)\n\ninit_domain_metrics_exporter()\n", "path": "pulpcore/app/wsgi.py"}]} | 1,715 | 173 |
gh_patches_debug_24448 | rasdani/github-patches | git_diff | conan-io__conan-center-index-9862 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Package]OpenSUSE Tumbleweed fix problem - glu/system
https://github.com/conan-io/conan-center-index/blob/8658ae021ce225d889fa4ee38d30cb80877a7c75/recipes/glu/all/conanfile.py#L17-L32
This fix the problem in openSUSE Tumbleweed:
```
elif tools.os_info.with_zypper:
packages = ["glu-devel"]
```
</issue>
<code>
[start of recipes/glu/all/conanfile.py]
1 from conans import ConanFile, tools
2 from conans.errors import ConanException
3 import os
4
5
6 class SysConfigGLUConan(ConanFile):
7 name = "glu"
8 version = "system"
9 description = "cross-platform virtual conan package for the GLU support"
10 topics = ("conan", "opengl", "glu")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://cgit.freedesktop.org/mesa/glu/"
13 license = "SGI-B-2.0"
14 settings = "os"
15 requires = "opengl/system"
16
17 def system_requirements(self):
18 packages = []
19 if tools.os_info.is_linux and self.settings.os == "Linux":
20 if tools.os_info.with_yum or tools.os_info.with_dnf:
21 packages = ["mesa-libGLU-devel"]
22 elif tools.os_info.with_apt:
23 packages = ["libglu1-mesa-dev"]
24 elif tools.os_info.with_pacman:
25 packages = ["glu"]
26 elif tools.os_info.with_zypper:
27 packages = ["Mesa-libGLU-devel"]
28 else:
29 self.output.warn("Don't know how to install GLU for your distro")
30 if tools.os_info.is_freebsd and self.settings.os == "FreeBSD":
31 packages = ["libGLU"]
32 if packages:
33 package_tool = tools.SystemPackageTool(conanfile=self, default_mode='verify')
34 for p in packages:
35 package_tool.install(update=True, packages=p)
36
37 def _fill_cppinfo_from_pkgconfig(self, name):
38 pkg_config = tools.PkgConfig(name)
39 if not pkg_config.provides:
40 raise ConanException("GLU development files aren't available, giving up")
41 libs = [lib[2:] for lib in pkg_config.libs_only_l]
42 lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]
43 ldflags = [flag for flag in pkg_config.libs_only_other]
44 include_dirs = [include[2:] for include in pkg_config.cflags_only_I]
45 cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")]
46 defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")]
47
48 self.cpp_info.system_libs.extend(libs)
49 self.cpp_info.libdirs.extend(lib_dirs)
50 self.cpp_info.sharedlinkflags.extend(ldflags)
51 self.cpp_info.exelinkflags.extend(ldflags)
52 self.cpp_info.defines.extend(defines)
53 self.cpp_info.includedirs.extend(include_dirs)
54 self.cpp_info.cflags.extend(cflags)
55 self.cpp_info.cxxflags.extend(cflags)
56
57 def package_info(self):
58 self.cpp_info.includedirs = []
59 self.cpp_info.libdirs = []
60
61 if self.settings.os == "Windows":
62 self.cpp_info.system_libs = ["Glu32"]
63 elif self.settings.os in ["Linux", "FreeBSD"]:
64 self._fill_cppinfo_from_pkgconfig("glu")
65
66 def package_id(self):
67 self.info.header_only()
68
[end of recipes/glu/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/glu/all/conanfile.py b/recipes/glu/all/conanfile.py
--- a/recipes/glu/all/conanfile.py
+++ b/recipes/glu/all/conanfile.py
@@ -1,13 +1,12 @@
from conans import ConanFile, tools
from conans.errors import ConanException
-import os
class SysConfigGLUConan(ConanFile):
name = "glu"
version = "system"
description = "cross-platform virtual conan package for the GLU support"
- topics = ("conan", "opengl", "glu")
+ topics = ("opengl", "glu")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://cgit.freedesktop.org/mesa/glu/"
license = "SGI-B-2.0"
@@ -24,7 +23,7 @@
elif tools.os_info.with_pacman:
packages = ["glu"]
elif tools.os_info.with_zypper:
- packages = ["Mesa-libGLU-devel"]
+ packages = ["glu-devel"]
else:
self.output.warn("Don't know how to install GLU for your distro")
if tools.os_info.is_freebsd and self.settings.os == "FreeBSD":
| {"golden_diff": "diff --git a/recipes/glu/all/conanfile.py b/recipes/glu/all/conanfile.py\n--- a/recipes/glu/all/conanfile.py\n+++ b/recipes/glu/all/conanfile.py\n@@ -1,13 +1,12 @@\n from conans import ConanFile, tools\n from conans.errors import ConanException\n-import os\n \n \n class SysConfigGLUConan(ConanFile):\n name = \"glu\"\n version = \"system\"\n description = \"cross-platform virtual conan package for the GLU support\"\n- topics = (\"conan\", \"opengl\", \"glu\")\n+ topics = (\"opengl\", \"glu\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://cgit.freedesktop.org/mesa/glu/\"\n license = \"SGI-B-2.0\"\n@@ -24,7 +23,7 @@\n elif tools.os_info.with_pacman:\n packages = [\"glu\"]\n elif tools.os_info.with_zypper:\n- packages = [\"Mesa-libGLU-devel\"]\n+ packages = [\"glu-devel\"]\n else:\n self.output.warn(\"Don't know how to install GLU for your distro\")\n if tools.os_info.is_freebsd and self.settings.os == \"FreeBSD\":\n", "issue": "[Package]OpenSUSE Tumbleweed fix problem - glu/system\nhttps://github.com/conan-io/conan-center-index/blob/8658ae021ce225d889fa4ee38d30cb80877a7c75/recipes/glu/all/conanfile.py#L17-L32\r\n\r\nThis fix the problem in openSUSE Tumbleweed:\r\n```\r\nelif tools.os_info.with_zypper:\r\n packages = [\"glu-devel\"]\r\n```\n", "before_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanException\nimport os\n\n\nclass SysConfigGLUConan(ConanFile):\n name = \"glu\"\n version = \"system\"\n description = \"cross-platform virtual conan package for the GLU support\"\n topics = (\"conan\", \"opengl\", \"glu\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://cgit.freedesktop.org/mesa/glu/\"\n license = \"SGI-B-2.0\"\n settings = \"os\"\n requires = \"opengl/system\"\n\n def system_requirements(self):\n packages = []\n if tools.os_info.is_linux and self.settings.os == \"Linux\":\n if tools.os_info.with_yum or tools.os_info.with_dnf:\n packages = [\"mesa-libGLU-devel\"]\n elif tools.os_info.with_apt:\n packages = [\"libglu1-mesa-dev\"]\n elif tools.os_info.with_pacman:\n packages = [\"glu\"]\n elif tools.os_info.with_zypper:\n packages = [\"Mesa-libGLU-devel\"]\n else:\n self.output.warn(\"Don't know how to install GLU for your distro\")\n if tools.os_info.is_freebsd and self.settings.os == \"FreeBSD\":\n packages = [\"libGLU\"]\n if packages:\n package_tool = tools.SystemPackageTool(conanfile=self, default_mode='verify')\n for p in packages:\n package_tool.install(update=True, packages=p)\n\n def _fill_cppinfo_from_pkgconfig(self, name):\n pkg_config = tools.PkgConfig(name)\n if not pkg_config.provides:\n raise ConanException(\"GLU development files aren't available, giving up\")\n libs = [lib[2:] for lib in pkg_config.libs_only_l]\n lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n ldflags = [flag for flag in pkg_config.libs_only_other]\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n\n self.cpp_info.system_libs.extend(libs)\n self.cpp_info.libdirs.extend(lib_dirs)\n self.cpp_info.sharedlinkflags.extend(ldflags)\n self.cpp_info.exelinkflags.extend(ldflags)\n self.cpp_info.defines.extend(defines)\n self.cpp_info.includedirs.extend(include_dirs)\n self.cpp_info.cflags.extend(cflags)\n self.cpp_info.cxxflags.extend(cflags)\n\n def package_info(self):\n self.cpp_info.includedirs = []\n self.cpp_info.libdirs = []\n\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"Glu32\"]\n elif self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self._fill_cppinfo_from_pkgconfig(\"glu\")\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/glu/all/conanfile.py"}]} | 1,449 | 292 |
gh_patches_debug_18205 | rasdani/github-patches | git_diff | fossasia__open-event-server-3539 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
If speaker edit their profile or session they are unassigned
If speaker edit their profile or session they are unassigned from their session. Result: Organizer needs to re-add them to the session.
Expected: If speaker edit profile or session nothing should change. Sessions should still have the same speaker and same status.
</issue>
<code>
[start of app/views/users/my_sessions.py]
1 import json
2 from datetime import datetime
3
4 from flask import Blueprint, jsonify
5 from flask import flash, redirect, url_for, request
6 from flask import render_template
7 from flask.ext.restplus import abort
8 from flask.ext import login
9 from markupsafe import Markup
10
11 from app.helpers.data import DataManager, save_to_db
12 from app.helpers.data_getter import DataGetter
13 from app.helpers.auth import AuthManager
14
15 my_sessions = Blueprint('my_sessions', __name__, url_prefix='/events/mysessions')
16
17
18 @my_sessions.route('/')
19 def display_my_sessions_view():
20 placeholder_images = DataGetter.get_event_default_images()
21 custom_placeholder = DataGetter.get_custom_placeholders()
22 upcoming_events_sessions = DataGetter.get_sessions_of_user(upcoming_events=True)
23 im_config = DataGetter.get_image_configs()
24 im_size = ''
25 for config in im_config:
26 if config.page == 'mysession':
27 im_size = config.size
28 past_events_sessions = DataGetter.get_sessions_of_user(upcoming_events=False)
29 page_content = {"tab_upcoming_events": "Upcoming Sessions",
30 "tab_past_events": "Past Sessions",
31 "title": "My Session Proposals"}
32 if not AuthManager.is_verified_user():
33 flash(Markup("Your account is unverified. "
34 "Please verify by clicking on the confirmation link that has been emailed to you."
35 '<br>Did not get the email? Please <a href="/resend_email/" class="alert-link"> '
36 'click here to resend the confirmation.</a>'))
37 return render_template('gentelella/users/mysessions/mysessions_list.html',
38 upcoming_events_sessions=upcoming_events_sessions,
39 past_events_sessions=past_events_sessions,
40 page_content=page_content,
41 placeholder_images=placeholder_images,
42 custom_placeholder=custom_placeholder,
43 im_size=im_size)
44
45
46 @my_sessions.route('/<int:session_id>/')
47 def display_session_view(session_id):
48 session = DataGetter.get_sessions_of_user_by_id(session_id)
49 if not session:
50 abort(404)
51 form_elems = DataGetter.get_custom_form_elements(session.event_id)
52 if not form_elems:
53 flash("Speaker and Session forms have been incorrectly configured for this event."
54 " Session creation has been disabled", "danger")
55 return redirect(url_for('.display_my_sessions_view', event_id=session.event_id))
56 speaker_form = json.loads(form_elems.speaker_form)
57 session_form = json.loads(form_elems.session_form)
58 event = DataGetter.get_event(session.event_id)
59 speakers = DataGetter.get_speakers(session.event_id).all()
60 user_speaker = DataGetter.get_speakers(session.event_id).filter_by(user_id=login.current_user.id).first()
61 return render_template('gentelella/users/mysessions/mysession_detail.html',
62 session=session,
63 speaker_form=speaker_form,
64 session_form=session_form,
65 event=event,
66 speakers=speakers,
67 user_speaker=user_speaker)
68
69
70 @my_sessions.route('/<int:session_id>/session-edit/', methods=('POST', 'GET'))
71 def process_session_view(session_id):
72 if request.method == 'GET':
73 session = DataGetter.get_sessions_of_user_by_id(session_id)
74 speaker = DataGetter.get_speakers(session.event_id).filter_by(user_id=login.current_user.id).first()
75 if not session:
76 abort(404)
77 form_elems = DataGetter.get_custom_form_elements(session.event_id)
78 if not form_elems:
79 flash("Speaker and Session forms have been incorrectly configured for this event."
80 " Session creation has been disabled", "danger")
81 return redirect(url_for('.display_my_sessions_view', event_id=session.event_id))
82 session_form = json.loads(form_elems.session_form)
83 event = DataGetter.get_event(session.event_id)
84 return render_template(
85 'gentelella/users/mysessions/mysession_session_edit.html',
86 session=session,
87 speaker=speaker,
88 session_form=session_form,
89 event=event)
90
91 if request.method == 'POST':
92 session = DataGetter.get_sessions_of_user_by_id(session_id)
93 speaker = DataGetter.get_speakers(session.event_id).filter_by(user_id=login.current_user.id).first()
94 DataManager.edit_session(request, session, speaker)
95 flash("The session has been updated successfully", "success")
96 return redirect(url_for('.display_session_view', session_id=session_id))
97
98
99 @my_sessions.route('/<int:speaker_id>/speaker-edit/', methods=('POST', 'GET'))
100 def process_speaker_view(speaker_id):
101 if request.method == 'GET':
102 speaker = DataGetter.get_speaker(speaker_id)
103 if not speaker or speaker.name == '':
104 abort(404)
105 form_elems = DataGetter.get_custom_form_elements(speaker.event_id)
106 if not form_elems:
107 flash("Speaker and Session forms have been incorrectly configured for this event."
108 " Session creation has been disabled", "danger")
109 return redirect(url_for('.display_my_sessions_view', event_id=speaker.event_id))
110 speaker_form = json.loads(form_elems.speaker_form)
111 event = DataGetter.get_event(speaker.event_id)
112 return render_template(
113 'gentelella/users/mysessions/mysession_speaker_edit.html',
114 photo_delete_url=url_for('.avatar_delete',
115 event_id=event.id,
116 speaker_id=speaker.id),
117 speaker_form=speaker_form,
118 event=event,
119 speaker=speaker)
120
121 if request.method == 'POST':
122 speaker = DataGetter.get_speaker(speaker_id)
123 DataManager.edit_speaker(request, speaker)
124 flash("The speaker has been updated successfully", "success")
125 return redirect(url_for('.display_my_sessions_view', event_id=speaker.event_id))
126
127
128 @my_sessions.route('/<int:event_id>/speakers/<int:speaker_id>/avatar', methods=('DELETE',))
129 def avatar_delete(event_id, speaker_id):
130 if request.method == 'DELETE':
131 speaker = DataGetter.get_speakers(event_id).filter_by(user_id=login.current_user.id, id=speaker_id).first()
132 if speaker:
133 speaker.photo = ''
134 speaker.small = ''
135 speaker.thumbnail = ''
136 speaker.icon = ''
137 save_to_db(speaker)
138 return jsonify({'status': 'ok'})
139 else:
140 abort(403)
141
142
143 @my_sessions.route('/<int:session_id>/withdraw/')
144 def withdraw_session_view(session_id):
145 session = DataGetter.get_sessions_of_user_by_id(session_id)
146 session.deleted_at = datetime.now()
147 save_to_db(session)
148 flash("The session has been withdrawn", "success")
149 return redirect(url_for('.display_my_sessions_view', session_id=session_id))
150
[end of app/views/users/my_sessions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/views/users/my_sessions.py b/app/views/users/my_sessions.py
--- a/app/views/users/my_sessions.py
+++ b/app/views/users/my_sessions.py
@@ -81,12 +81,14 @@
return redirect(url_for('.display_my_sessions_view', event_id=session.event_id))
session_form = json.loads(form_elems.session_form)
event = DataGetter.get_event(session.event_id)
+ speakers = DataGetter.get_speakers(session.event_id).all()
return render_template(
'gentelella/users/mysessions/mysession_session_edit.html',
session=session,
speaker=speaker,
session_form=session_form,
- event=event)
+ event=event,
+ speakers=speakers)
if request.method == 'POST':
session = DataGetter.get_sessions_of_user_by_id(session_id)
| {"golden_diff": "diff --git a/app/views/users/my_sessions.py b/app/views/users/my_sessions.py\n--- a/app/views/users/my_sessions.py\n+++ b/app/views/users/my_sessions.py\n@@ -81,12 +81,14 @@\n return redirect(url_for('.display_my_sessions_view', event_id=session.event_id))\n session_form = json.loads(form_elems.session_form)\n event = DataGetter.get_event(session.event_id)\n+ speakers = DataGetter.get_speakers(session.event_id).all()\n return render_template(\n 'gentelella/users/mysessions/mysession_session_edit.html',\n session=session,\n speaker=speaker,\n session_form=session_form,\n- event=event)\n+ event=event,\n+ speakers=speakers)\n \n if request.method == 'POST':\n session = DataGetter.get_sessions_of_user_by_id(session_id)\n", "issue": "If speaker edit their profile or session they are unassigned\nIf speaker edit their profile or session they are unassigned from their session. Result: Organizer needs to re-add them to the session.\r\n\r\nExpected: If speaker edit profile or session nothing should change. Sessions should still have the same speaker and same status.\n", "before_files": [{"content": "import json\nfrom datetime import datetime\n\nfrom flask import Blueprint, jsonify\nfrom flask import flash, redirect, url_for, request\nfrom flask import render_template\nfrom flask.ext.restplus import abort\nfrom flask.ext import login\nfrom markupsafe import Markup\n\nfrom app.helpers.data import DataManager, save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.auth import AuthManager\n\nmy_sessions = Blueprint('my_sessions', __name__, url_prefix='/events/mysessions')\n\n\n@my_sessions.route('/')\ndef display_my_sessions_view():\n placeholder_images = DataGetter.get_event_default_images()\n custom_placeholder = DataGetter.get_custom_placeholders()\n upcoming_events_sessions = DataGetter.get_sessions_of_user(upcoming_events=True)\n im_config = DataGetter.get_image_configs()\n im_size = ''\n for config in im_config:\n if config.page == 'mysession':\n im_size = config.size\n past_events_sessions = DataGetter.get_sessions_of_user(upcoming_events=False)\n page_content = {\"tab_upcoming_events\": \"Upcoming Sessions\",\n \"tab_past_events\": \"Past Sessions\",\n \"title\": \"My Session Proposals\"}\n if not AuthManager.is_verified_user():\n flash(Markup(\"Your account is unverified. \"\n \"Please verify by clicking on the confirmation link that has been emailed to you.\"\n '<br>Did not get the email? Please <a href=\"/resend_email/\" class=\"alert-link\"> '\n 'click here to resend the confirmation.</a>'))\n return render_template('gentelella/users/mysessions/mysessions_list.html',\n upcoming_events_sessions=upcoming_events_sessions,\n past_events_sessions=past_events_sessions,\n page_content=page_content,\n placeholder_images=placeholder_images,\n custom_placeholder=custom_placeholder,\n im_size=im_size)\n\n\n@my_sessions.route('/<int:session_id>/')\ndef display_session_view(session_id):\n session = DataGetter.get_sessions_of_user_by_id(session_id)\n if not session:\n abort(404)\n form_elems = DataGetter.get_custom_form_elements(session.event_id)\n if not form_elems:\n flash(\"Speaker and Session forms have been incorrectly configured for this event.\"\n \" Session creation has been disabled\", \"danger\")\n return redirect(url_for('.display_my_sessions_view', event_id=session.event_id))\n speaker_form = json.loads(form_elems.speaker_form)\n session_form = json.loads(form_elems.session_form)\n event = DataGetter.get_event(session.event_id)\n speakers = DataGetter.get_speakers(session.event_id).all()\n user_speaker = DataGetter.get_speakers(session.event_id).filter_by(user_id=login.current_user.id).first()\n return render_template('gentelella/users/mysessions/mysession_detail.html',\n session=session,\n speaker_form=speaker_form,\n session_form=session_form,\n event=event,\n speakers=speakers,\n user_speaker=user_speaker)\n\n\n@my_sessions.route('/<int:session_id>/session-edit/', methods=('POST', 'GET'))\ndef process_session_view(session_id):\n if request.method == 'GET':\n session = DataGetter.get_sessions_of_user_by_id(session_id)\n speaker = DataGetter.get_speakers(session.event_id).filter_by(user_id=login.current_user.id).first()\n if not session:\n abort(404)\n form_elems = DataGetter.get_custom_form_elements(session.event_id)\n if not form_elems:\n flash(\"Speaker and Session forms have been incorrectly configured for this event.\"\n \" Session creation has been disabled\", \"danger\")\n return redirect(url_for('.display_my_sessions_view', event_id=session.event_id))\n session_form = json.loads(form_elems.session_form)\n event = DataGetter.get_event(session.event_id)\n return render_template(\n 'gentelella/users/mysessions/mysession_session_edit.html',\n session=session,\n speaker=speaker,\n session_form=session_form,\n event=event)\n\n if request.method == 'POST':\n session = DataGetter.get_sessions_of_user_by_id(session_id)\n speaker = DataGetter.get_speakers(session.event_id).filter_by(user_id=login.current_user.id).first()\n DataManager.edit_session(request, session, speaker)\n flash(\"The session has been updated successfully\", \"success\")\n return redirect(url_for('.display_session_view', session_id=session_id))\n\n\n@my_sessions.route('/<int:speaker_id>/speaker-edit/', methods=('POST', 'GET'))\ndef process_speaker_view(speaker_id):\n if request.method == 'GET':\n speaker = DataGetter.get_speaker(speaker_id)\n if not speaker or speaker.name == '':\n abort(404)\n form_elems = DataGetter.get_custom_form_elements(speaker.event_id)\n if not form_elems:\n flash(\"Speaker and Session forms have been incorrectly configured for this event.\"\n \" Session creation has been disabled\", \"danger\")\n return redirect(url_for('.display_my_sessions_view', event_id=speaker.event_id))\n speaker_form = json.loads(form_elems.speaker_form)\n event = DataGetter.get_event(speaker.event_id)\n return render_template(\n 'gentelella/users/mysessions/mysession_speaker_edit.html',\n photo_delete_url=url_for('.avatar_delete',\n event_id=event.id,\n speaker_id=speaker.id),\n speaker_form=speaker_form,\n event=event,\n speaker=speaker)\n\n if request.method == 'POST':\n speaker = DataGetter.get_speaker(speaker_id)\n DataManager.edit_speaker(request, speaker)\n flash(\"The speaker has been updated successfully\", \"success\")\n return redirect(url_for('.display_my_sessions_view', event_id=speaker.event_id))\n\n\n@my_sessions.route('/<int:event_id>/speakers/<int:speaker_id>/avatar', methods=('DELETE',))\ndef avatar_delete(event_id, speaker_id):\n if request.method == 'DELETE':\n speaker = DataGetter.get_speakers(event_id).filter_by(user_id=login.current_user.id, id=speaker_id).first()\n if speaker:\n speaker.photo = ''\n speaker.small = ''\n speaker.thumbnail = ''\n speaker.icon = ''\n save_to_db(speaker)\n return jsonify({'status': 'ok'})\n else:\n abort(403)\n\n\n@my_sessions.route('/<int:session_id>/withdraw/')\ndef withdraw_session_view(session_id):\n session = DataGetter.get_sessions_of_user_by_id(session_id)\n session.deleted_at = datetime.now()\n save_to_db(session)\n flash(\"The session has been withdrawn\", \"success\")\n return redirect(url_for('.display_my_sessions_view', session_id=session_id))\n", "path": "app/views/users/my_sessions.py"}]} | 2,350 | 182 |
gh_patches_debug_15440 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Google maps API requests should use an API key
`For development purposes only` watermark is being shown on our maps as Google has made it mandatory to use an API key to talk to the maps API.
</issue>
<code>
[start of akvo/rsr/context_processors.py]
1 # -*- coding: utf-8 -*-
2 """
3 Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the
6 Akvo RSR module. For additional details on the GNU license please see
7 < http://www.gnu.org/licenses/agpl.html >.
8 """
9
10 import re
11 import django
12
13 from django.conf import settings
14 from django.core.exceptions import DisallowedHost
15 from django.contrib.sites.models import get_current_site
16
17
18 def extra_context(request, protocol="http"):
19 """Add information to the request context."""
20 try:
21 current_site = get_current_site(request)
22 except DisallowedHost:
23 current_site = None
24
25 django_version = django.get_version()
26 debug = getattr(settings, 'DEBUG', False)
27 deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')
28 deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')
29 deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')
30 deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')
31 sentry_dsn = get_sentry_dsn(settings)
32
33 return dict(
34 current_site=current_site,
35 django_version=django_version,
36 debug=debug,
37 deploy_tag=deploy_tag,
38 deploy_branch=deploy_branch,
39 deploy_commit_id=deploy_commit_id,
40 deploy_commit_full_id=deploy_commit_full_id,
41 sentry_dsn=sentry_dsn,
42 )
43
44
45 def get_sentry_dsn(settings):
46 sentry_dsn = getattr(settings, 'RAVEN_CONFIG', {}).get('dsn', '')
47 sentry_dsn = re.sub('(:\w*?)@', '@', sentry_dsn)
48 # Always use https!
49 sentry_dsn = sentry_dsn.replace('http://', 'https://')
50 return sentry_dsn
51
52
53 def get_current_path_without_lang(request):
54 """Return current path without lang."""
55 path = request.get_full_path()
56 path_bits = path.split('/')
57 path = '/'.join(path_bits[2:])
58 return {'current_path_without_lang': path}
59
60
61 def extra_pages_context(request):
62 """Add context information of an RSR Page."""
63 if request.rsr_page:
64 page = request.rsr_page
65 return {
66 'rsr_page': page,
67 'favicon': page.favicon,
68 'logo': page.logo,
69 'organisation': page.organisation,
70 'return_url': page.return_url,
71 'return_url_text': page.custom_return_url_text,
72 'page_stylesheet': page.stylesheet,
73 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),
74 'domain_url': '//{}'.format(settings.RSR_DOMAIN),
75 'no_facebook': not page.facebook_button,
76 'facebook_app_id': page.facebook_app_id,
77 'no_twitter': not page.twitter_button,
78 }
79
80 return {}
81
[end of akvo/rsr/context_processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py
--- a/akvo/rsr/context_processors.py
+++ b/akvo/rsr/context_processors.py
@@ -29,6 +29,7 @@
deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')
deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')
sentry_dsn = get_sentry_dsn(settings)
+ gmaps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY', 'NO_API_KEY')
return dict(
current_site=current_site,
@@ -39,6 +40,7 @@
deploy_commit_id=deploy_commit_id,
deploy_commit_full_id=deploy_commit_full_id,
sentry_dsn=sentry_dsn,
+ gmaps_api_key=gmaps_api_key,
)
| {"golden_diff": "diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py\n--- a/akvo/rsr/context_processors.py\n+++ b/akvo/rsr/context_processors.py\n@@ -29,6 +29,7 @@\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n sentry_dsn = get_sentry_dsn(settings)\n+ gmaps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY', 'NO_API_KEY')\n \n return dict(\n current_site=current_site,\n@@ -39,6 +40,7 @@\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id,\n sentry_dsn=sentry_dsn,\n+ gmaps_api_key=gmaps_api_key,\n )\n", "issue": "Google maps API requests should use an API key\n`For development purposes only` watermark is being shown on our maps as Google has made it mandatory to use an API key to talk to the maps API. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport re\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n sentry_dsn = get_sentry_dsn(settings)\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id,\n sentry_dsn=sentry_dsn,\n )\n\n\ndef get_sentry_dsn(settings):\n sentry_dsn = getattr(settings, 'RAVEN_CONFIG', {}).get('dsn', '')\n sentry_dsn = re.sub('(:\\w*?)@', '@', sentry_dsn)\n # Always use https!\n sentry_dsn = sentry_dsn.replace('http://', 'https://')\n return sentry_dsn\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'page_stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}]} | 1,354 | 194 |
gh_patches_debug_30947 | rasdani/github-patches | git_diff | allegro__ralph-1541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ubuntu package
We should build ubuntu packages for ralph (without much of scan plugins) to be able to install easily if you're reluctant to use docker.
- all js and components integrated into the package
- /etc/ralph for system configuration
- only ubuntu supported
</issue>
<code>
[start of src/ralph/__main__.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5
6 def main(settings_module='ralph.settings'):
7 os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_module)
8
9 from django.core.management import execute_from_command_line
10
11 execute_from_command_line(sys.argv)
12
13
14 def dev():
15 main('ralph.settings.dev')
16
17
18 def test():
19 main('ralph.settings.test')
20
21
22 if __name__ == '__main__':
23 main()
24
[end of src/ralph/__main__.py]
[start of setup.py]
1 # -*- encoding: utf-8 -*-
2
3 import os
4 import sys
5 from setuptools import setup, find_packages
6
7 assert sys.version_info >= (3, 3), 'Python 3.3+ required.'
8
9
10 def read(fname):
11 return open(os.path.join(os.path.dirname(__file__), fname)).read()
12
13 setup(
14 name='ralph',
15 version='3.0.0', # TODO: import from ralph
16 author='Grupa Allegro Sp. z o.o. and Contributors',
17 author_email='[email protected]',
18 description="Advanced Asset Management and DCIM system for data center and back office.",
19 long_description='\n'.join([read('README.md'), read('CHANGES')]),
20 url='http://ralph.allegrogroup.com/',
21 keywords='',
22 platforms=['any'],
23 license='Apache Software License v2.0',
24 packages=find_packages('src'), # TODO: remove src intermediate directory
25 include_package_data=True,
26 package_dir={'': 'src'},
27 zip_safe=False, # because templates are loaded from file path
28 entry_points={
29 'console_scripts': [
30 'ralph = ralph.__main__:main',
31 'dev_ralph = ralph.__main__:dev',
32 'test_ralph = ralph.__main__:test',
33 ],
34 },
35 classifiers=[
36 'Development Status :: 4 - Beta',
37 'Framework :: Django',
38 'Intended Audience :: System Administrators',
39 'Intended Audience :: Information Technology',
40 'License :: OSI Approved :: Apache Software License',
41 'Natural Language :: English',
42 'Operating System :: POSIX',
43 'Operating System :: MacOS :: MacOS X',
44 'Operating System :: Microsoft :: Windows :: Windows NT/2000',
45 'Programming Language :: Python',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.4',
48 'Topic :: Internet :: WWW/HTTP',
49 ]
50 )
51
[end of setup.py]
[start of src/ralph/settings/prod.py]
1 from ralph.settings import * # noqa
2
3 STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' # noqa
4 STATIC_ROOT = os.path.join(BASE_DIR, 'var', 'static')
5
6 LDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: "user, person
7
[end of src/ralph/settings/prod.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
setup(
name='ralph',
- version='3.0.0', # TODO: import from ralph
+ version=read('./VERSION').strip(),
author='Grupa Allegro Sp. z o.o. and Contributors',
author_email='[email protected]',
description="Advanced Asset Management and DCIM system for data center and back office.",
@@ -21,13 +21,13 @@
keywords='',
platforms=['any'],
license='Apache Software License v2.0',
- packages=find_packages('src'), # TODO: remove src intermediate directory
+ packages=find_packages('src'),
include_package_data=True,
package_dir={'': 'src'},
zip_safe=False, # because templates are loaded from file path
entry_points={
'console_scripts': [
- 'ralph = ralph.__main__:main',
+ 'ralph = ralph.__main__:prod',
'dev_ralph = ralph.__main__:dev',
'test_ralph = ralph.__main__:test',
],
diff --git a/src/ralph/__main__.py b/src/ralph/__main__.py
--- a/src/ralph/__main__.py
+++ b/src/ralph/__main__.py
@@ -19,5 +19,9 @@
main('ralph.settings.test')
+def prod():
+ main('ralph.settings.prod')
+
+
if __name__ == '__main__':
- main()
+ main('ralph.settings.prod')
diff --git a/src/ralph/settings/prod.py b/src/ralph/settings/prod.py
--- a/src/ralph/settings/prod.py
+++ b/src/ralph/settings/prod.py
@@ -4,3 +4,7 @@
STATIC_ROOT = os.path.join(BASE_DIR, 'var', 'static')
LDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: "user, person
+
+# FIXME: when going for full production, change it to False
+
+DEBUG = True
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n \n setup(\n name='ralph',\n- version='3.0.0', # TODO: import from ralph\n+ version=read('./VERSION').strip(),\n author='Grupa Allegro Sp. z o.o. and Contributors',\n author_email='[email protected]',\n description=\"Advanced Asset Management and DCIM system for data center and back office.\",\n@@ -21,13 +21,13 @@\n keywords='',\n platforms=['any'],\n license='Apache Software License v2.0',\n- packages=find_packages('src'), # TODO: remove src intermediate directory\n+ packages=find_packages('src'),\n include_package_data=True,\n package_dir={'': 'src'},\n zip_safe=False, # because templates are loaded from file path\n entry_points={\n 'console_scripts': [\n- 'ralph = ralph.__main__:main',\n+ 'ralph = ralph.__main__:prod',\n 'dev_ralph = ralph.__main__:dev',\n 'test_ralph = ralph.__main__:test',\n ],\ndiff --git a/src/ralph/__main__.py b/src/ralph/__main__.py\n--- a/src/ralph/__main__.py\n+++ b/src/ralph/__main__.py\n@@ -19,5 +19,9 @@\n main('ralph.settings.test')\n \n \n+def prod():\n+ main('ralph.settings.prod')\n+\n+\n if __name__ == '__main__':\n- main()\n+ main('ralph.settings.prod')\ndiff --git a/src/ralph/settings/prod.py b/src/ralph/settings/prod.py\n--- a/src/ralph/settings/prod.py\n+++ b/src/ralph/settings/prod.py\n@@ -4,3 +4,7 @@\n STATIC_ROOT = os.path.join(BASE_DIR, 'var', 'static')\n \n LDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: \"user, person\n+\n+# FIXME: when going for full production, change it to False\n+\n+DEBUG = True\n", "issue": "Ubuntu package\nWe should build ubuntu packages for ralph (without much of scan plugins) to be able to install easily if you're reluctant to use docker.\n- all js and components integrated into the package\n- /etc/ralph for system configuration\n- only ubuntu supported\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\n\ndef main(settings_module='ralph.settings'):\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_module)\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n\n\ndef dev():\n main('ralph.settings.dev')\n\n\ndef test():\n main('ralph.settings.test')\n\n\nif __name__ == '__main__':\n main()\n", "path": "src/ralph/__main__.py"}, {"content": "# -*- encoding: utf-8 -*-\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\nassert sys.version_info >= (3, 3), 'Python 3.3+ required.'\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='ralph',\n version='3.0.0', # TODO: import from ralph\n author='Grupa Allegro Sp. z o.o. and Contributors',\n author_email='[email protected]',\n description=\"Advanced Asset Management and DCIM system for data center and back office.\",\n long_description='\\n'.join([read('README.md'), read('CHANGES')]),\n url='http://ralph.allegrogroup.com/',\n keywords='',\n platforms=['any'],\n license='Apache Software License v2.0',\n packages=find_packages('src'), # TODO: remove src intermediate directory\n include_package_data=True,\n package_dir={'': 'src'},\n zip_safe=False, # because templates are loaded from file path\n entry_points={\n 'console_scripts': [\n 'ralph = ralph.__main__:main',\n 'dev_ralph = ralph.__main__:dev',\n 'test_ralph = ralph.__main__:test',\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows :: Windows NT/2000',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Internet :: WWW/HTTP',\n ]\n)\n", "path": "setup.py"}, {"content": "from ralph.settings import * # noqa\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' # noqa\nSTATIC_ROOT = os.path.join(BASE_DIR, 'var', 'static')\n\nLDAP_SERVER_OBJECT_USER_CLASS = 'user' # possible values: \"user, person\n", "path": "src/ralph/settings/prod.py"}]} | 1,351 | 476 |
gh_patches_debug_39660 | rasdani/github-patches | git_diff | streamlink__streamlink-141 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Euronews plugin broken
I dig up EuroNews plugin which is broken since December 2014.
https://github.com/chrippa/livestreamer/issues/626
</issue>
<code>
[start of src/streamlink/plugins/euronews.py]
1 import re
2
3 from itertools import chain
4
5 from streamlink.compat import urlparse
6 from streamlink.plugin import Plugin
7 from streamlink.plugin.api import http
8 from streamlink.stream import HLSStream, HTTPStream
9
10 from streamlink.plugin.api.support_plugin import common_jwplayer as jwplayer
11
12 _url_re = re.compile("http(s)?://(\w+\.)?euronews.com")
13
14
15 class Euronews(Plugin):
16 @classmethod
17 def can_handle_url(self, url):
18 return _url_re.match(url)
19
20 def _create_stream(self, source):
21 url = source["file"]
22
23 if urlparse(url).path.endswith("m3u8"):
24 streams = HLSStream.parse_variant_playlist(self.session, url)
25
26 # TODO: Replace with "yield from" when dropping Python 2.
27 for stream in streams.items():
28 yield stream
29 else:
30 name = source.get("label", "vod")
31 yield name, HTTPStream(self.session, url)
32
33 def _get_streams(self):
34 res = http.get(self.url)
35 playlist = jwplayer.parse_playlist(res)
36 if not playlist:
37 return
38
39 for item in playlist:
40 streams = map(self._create_stream, item["sources"])
41
42 # TODO: Replace with "yield from" when dropping Python 2.
43 for stream in chain.from_iterable(streams):
44 yield stream
45
46 __plugin__ = Euronews
47
[end of src/streamlink/plugins/euronews.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/euronews.py b/src/streamlink/plugins/euronews.py
--- a/src/streamlink/plugins/euronews.py
+++ b/src/streamlink/plugins/euronews.py
@@ -1,46 +1,77 @@
import re
-from itertools import chain
-
-from streamlink.compat import urlparse
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
+from streamlink.plugin.api import validate
from streamlink.stream import HLSStream, HTTPStream
-from streamlink.plugin.api.support_plugin import common_jwplayer as jwplayer
-
-_url_re = re.compile("http(s)?://(\w+\.)?euronews.com")
-
class Euronews(Plugin):
- @classmethod
- def can_handle_url(self, url):
- return _url_re.match(url)
+ _url_re = re.compile("http(?:s)?://(\w+)\.?euronews.com/(live|.*)")
+ _re_vod = re.compile(r'<meta\s+property="og:video"\s+content="(http.*?)"\s*/>')
+ _live_api_url = "http://fr.euronews.com/api/watchlive.json"
+ _live_schema = validate.Schema({
+ u"url": validate.url()
+ })
+ _stream_api_schema = validate.Schema({
+ u'status': u'ok',
+ u'primary': {
+ validate.text: {
+ validate.optional(u'hls'): validate.url(),
+ validate.optional(u'rtsp'): validate.url(scheme="rtsp")
+ }
+ },
+ validate.optional(u'backup'): {
+ validate.text: {
+ validate.optional(u'hls'): validate.url(),
+ validate.optional(u'rtsp'): validate.url(scheme="rtsp")
+ }
+ }
+ })
- def _create_stream(self, source):
- url = source["file"]
+ @classmethod
+ def can_handle_url(cls, url):
+ return cls._url_re.match(url)
- if urlparse(url).path.endswith("m3u8"):
- streams = HLSStream.parse_variant_playlist(self.session, url)
+ def _get_vod_stream(self):
+ """
+ Find the VOD video url
+ :return: video url
+ """
+ res = http.get(self.url)
+ video_urls = self._re_vod.findall(res.text)
+ if len(video_urls):
+ return dict(vod=HTTPStream(self.session, video_urls[0]))
- # TODO: Replace with "yield from" when dropping Python 2.
- for stream in streams.items():
- yield stream
- else:
- name = source.get("label", "vod")
- yield name, HTTPStream(self.session, url)
+ def _get_live_streams(self, language):
+ """
+ Get the live stream in a particular language
+ :param language:
+ :return:
+ """
+ res = http.get(self._live_api_url)
+ live_res = http.json(res, schema=self._live_schema)
+ api_res = http.get(live_res[u"url"])
+ stream_data = http.json(api_res, schema=self._stream_api_schema)
+ # find the stream in the requested language
+ if language in stream_data[u'primary']:
+ playlist_url = stream_data[u'primary'][language][u"hls"]
+ return HLSStream.parse_variant_playlist(self.session, playlist_url)
def _get_streams(self):
- res = http.get(self.url)
- playlist = jwplayer.parse_playlist(res)
- if not playlist:
- return
+ """
+ Find the streams for euronews
+ :return:
+ """
+ match = self._url_re.match(self.url)
+ language, path = match.groups()
- for item in playlist:
- streams = map(self._create_stream, item["sources"])
+ # remap domain to language (default to english)
+ language = {"www": "en", "": "en", "arabic": "ar"}.get(language, language)
- # TODO: Replace with "yield from" when dropping Python 2.
- for stream in chain.from_iterable(streams):
- yield stream
+ if path == "live":
+ return self._get_live_streams(language)
+ else:
+ return self._get_vod_stream()
__plugin__ = Euronews
| {"golden_diff": "diff --git a/src/streamlink/plugins/euronews.py b/src/streamlink/plugins/euronews.py\n--- a/src/streamlink/plugins/euronews.py\n+++ b/src/streamlink/plugins/euronews.py\n@@ -1,46 +1,77 @@\n import re\n \n-from itertools import chain\n-\n-from streamlink.compat import urlparse\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http\n+from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream, HTTPStream\n \n-from streamlink.plugin.api.support_plugin import common_jwplayer as jwplayer\n-\n-_url_re = re.compile(\"http(s)?://(\\w+\\.)?euronews.com\")\n-\n \n class Euronews(Plugin):\n- @classmethod\n- def can_handle_url(self, url):\n- return _url_re.match(url)\n+ _url_re = re.compile(\"http(?:s)?://(\\w+)\\.?euronews.com/(live|.*)\")\n+ _re_vod = re.compile(r'<meta\\s+property=\"og:video\"\\s+content=\"(http.*?)\"\\s*/>')\n+ _live_api_url = \"http://fr.euronews.com/api/watchlive.json\"\n+ _live_schema = validate.Schema({\n+ u\"url\": validate.url()\n+ })\n+ _stream_api_schema = validate.Schema({\n+ u'status': u'ok',\n+ u'primary': {\n+ validate.text: {\n+ validate.optional(u'hls'): validate.url(),\n+ validate.optional(u'rtsp'): validate.url(scheme=\"rtsp\")\n+ }\n+ },\n+ validate.optional(u'backup'): {\n+ validate.text: {\n+ validate.optional(u'hls'): validate.url(),\n+ validate.optional(u'rtsp'): validate.url(scheme=\"rtsp\")\n+ }\n+ }\n+ })\n \n- def _create_stream(self, source):\n- url = source[\"file\"]\n+ @classmethod\n+ def can_handle_url(cls, url):\n+ return cls._url_re.match(url)\n \n- if urlparse(url).path.endswith(\"m3u8\"):\n- streams = HLSStream.parse_variant_playlist(self.session, url)\n+ def _get_vod_stream(self):\n+ \"\"\"\n+ Find the VOD video url\n+ :return: video url\n+ \"\"\"\n+ res = http.get(self.url)\n+ video_urls = self._re_vod.findall(res.text)\n+ if len(video_urls):\n+ return dict(vod=HTTPStream(self.session, video_urls[0]))\n \n- # TODO: Replace with \"yield from\" when dropping Python 2.\n- for stream in streams.items():\n- yield stream\n- else:\n- name = source.get(\"label\", \"vod\")\n- yield name, HTTPStream(self.session, url)\n+ def _get_live_streams(self, language):\n+ \"\"\"\n+ Get the live stream in a particular language\n+ :param language:\n+ :return:\n+ \"\"\"\n+ res = http.get(self._live_api_url)\n+ live_res = http.json(res, schema=self._live_schema)\n+ api_res = http.get(live_res[u\"url\"])\n+ stream_data = http.json(api_res, schema=self._stream_api_schema)\n+ # find the stream in the requested language\n+ if language in stream_data[u'primary']:\n+ playlist_url = stream_data[u'primary'][language][u\"hls\"]\n+ return HLSStream.parse_variant_playlist(self.session, playlist_url)\n \n def _get_streams(self):\n- res = http.get(self.url)\n- playlist = jwplayer.parse_playlist(res)\n- if not playlist:\n- return\n+ \"\"\"\n+ Find the streams for euronews\n+ :return:\n+ \"\"\"\n+ match = self._url_re.match(self.url)\n+ language, path = match.groups()\n \n- for item in playlist:\n- streams = map(self._create_stream, item[\"sources\"])\n+ # remap domain to language (default to english)\n+ language = {\"www\": \"en\", \"\": \"en\", \"arabic\": \"ar\"}.get(language, language)\n \n- # TODO: Replace with \"yield from\" when dropping Python 2.\n- for stream in chain.from_iterable(streams):\n- yield stream\n+ if path == \"live\":\n+ return self._get_live_streams(language)\n+ else:\n+ return self._get_vod_stream()\n \n __plugin__ = Euronews\n", "issue": "Euronews plugin broken\nI dig up EuroNews plugin which is broken since December 2014.\r\n\r\nhttps://github.com/chrippa/livestreamer/issues/626\n", "before_files": [{"content": "import re\n\nfrom itertools import chain\n\nfrom streamlink.compat import urlparse\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream, HTTPStream\n\nfrom streamlink.plugin.api.support_plugin import common_jwplayer as jwplayer\n\n_url_re = re.compile(\"http(s)?://(\\w+\\.)?euronews.com\")\n\n\nclass Euronews(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _create_stream(self, source):\n url = source[\"file\"]\n\n if urlparse(url).path.endswith(\"m3u8\"):\n streams = HLSStream.parse_variant_playlist(self.session, url)\n\n # TODO: Replace with \"yield from\" when dropping Python 2.\n for stream in streams.items():\n yield stream\n else:\n name = source.get(\"label\", \"vod\")\n yield name, HTTPStream(self.session, url)\n\n def _get_streams(self):\n res = http.get(self.url)\n playlist = jwplayer.parse_playlist(res)\n if not playlist:\n return\n\n for item in playlist:\n streams = map(self._create_stream, item[\"sources\"])\n\n # TODO: Replace with \"yield from\" when dropping Python 2.\n for stream in chain.from_iterable(streams):\n yield stream\n\n__plugin__ = Euronews\n", "path": "src/streamlink/plugins/euronews.py"}]} | 974 | 981 |
gh_patches_debug_3261 | rasdani/github-patches | git_diff | Kinto__kinto-476 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error while trying to generate a configuration file without subfolder with CLI.
```
$ kinto --ini kinto.ini
Traceback (most recent call last):
File "~/.virtualenvs/kinto/bin/kinto", line 9, in <module>
load_entry_point('kinto', 'console_scripts', 'kinto')()
File "~/mozilla/kinto/kinto/__main__.py", line 72, in main
init(config_file, backend)
File "~/mozilla/kinto/kinto/config/__init__.py", line 50, in init
render_template("kinto.tpl", config_file, **values)
File "~/mozilla/kinto/kinto/config/__init__.py", line 14, in render_template
os.makedirs(folder)
File "~/.virtualenvs/kinto/lib/python2.7/os.py", line 157, in makedirs
mkdir(name, mode)
OSError: [Errno 2] No such file or directory: ''
```
</issue>
<code>
[start of kinto/config/__init__.py]
1 import os
2 import codecs
3
4 from cliquet import utils as cliquet_utils
5
6 from kinto import logger
7
8 HERE = os.path.abspath(os.path.dirname(__file__))
9
10
11 def render_template(template, destination, **kwargs):
12 template = os.path.join(HERE, template)
13 folder = os.path.dirname(destination)
14
15 if not os.path.exists(folder):
16 os.makedirs(folder)
17
18 logger.info("Created config {}".format(os.path.abspath(destination)))
19
20 with codecs.open(template, 'r', encoding='utf-8') as f:
21 raw_template = f.read()
22 rendered = raw_template.format(**kwargs)
23 with codecs.open(destination, 'w+', encoding='utf-8') as output:
24 output.write(rendered)
25
26
27 def init(config_file, backend):
28 values = {}
29
30 values['secret'] = cliquet_utils.random_bytes_hex(32)
31
32 values['storage_backend'] = "cliquet.storage.%s" % backend
33 values['cache_backend'] = "cliquet.cache.%s" % backend
34 values['permission_backend'] = "cliquet.permission.%s" % backend
35
36 if backend == 'postgresql':
37 postgresql_url = "postgres://postgres:postgres@localhost/postgres"
38 values['storage_url'] = postgresql_url
39 values['cache_url'] = postgresql_url
40 values['permission_url'] = postgresql_url
41
42 elif backend == 'redis':
43 redis_url = "redis://localhost:6379"
44 values['storage_url'] = redis_url + "/1"
45 values['cache_url'] = redis_url + "/2"
46 values['permission_url'] = redis_url + "/3"
47
48 else:
49 values['storage_url'] = ''
50 values['cache_url'] = ''
51 values['permission_url'] = ''
52
53 render_template("kinto.tpl", config_file, **values)
54
[end of kinto/config/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py
--- a/kinto/config/__init__.py
+++ b/kinto/config/__init__.py
@@ -12,7 +12,7 @@
template = os.path.join(HERE, template)
folder = os.path.dirname(destination)
- if not os.path.exists(folder):
+ if folder and not os.path.exists(folder):
os.makedirs(folder)
logger.info("Created config {}".format(os.path.abspath(destination)))
| {"golden_diff": "diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py\n--- a/kinto/config/__init__.py\n+++ b/kinto/config/__init__.py\n@@ -12,7 +12,7 @@\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n \n- if not os.path.exists(folder):\n+ if folder and not os.path.exists(folder):\n os.makedirs(folder)\n \n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n", "issue": "Error while trying to generate a configuration file without subfolder with CLI.\n```\n$ kinto --ini kinto.ini\n\nTraceback (most recent call last):\n File \"~/.virtualenvs/kinto/bin/kinto\", line 9, in <module>\n load_entry_point('kinto', 'console_scripts', 'kinto')()\n File \"~/mozilla/kinto/kinto/__main__.py\", line 72, in main\n init(config_file, backend)\n File \"~/mozilla/kinto/kinto/config/__init__.py\", line 50, in init\n render_template(\"kinto.tpl\", config_file, **values)\n File \"~/mozilla/kinto/kinto/config/__init__.py\", line 14, in render_template\n os.makedirs(folder)\n File \"~/.virtualenvs/kinto/lib/python2.7/os.py\", line 157, in makedirs\n mkdir(name, mode)\nOSError: [Errno 2] No such file or directory: ''\n```\n\n", "before_files": [{"content": "import os\nimport codecs\n\nfrom cliquet import utils as cliquet_utils\n\nfrom kinto import logger\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n\n values['secret'] = cliquet_utils.random_bytes_hex(32)\n\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}]} | 1,252 | 111 |
gh_patches_debug_26861 | rasdani/github-patches | git_diff | pyca__cryptography-1462 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove pragma nocovers from Windows specific code
See #502
</issue>
<code>
[start of cryptography/hazmat/bindings/openssl/binding.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import os
17 import sys
18 import threading
19
20 from cryptography.hazmat.bindings.utils import build_ffi_for_binding
21
22
23 _OSX_PRE_INCLUDE = """
24 #ifdef __APPLE__
25 #include <AvailabilityMacros.h>
26 #define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
27 DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
28 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
29 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
30 #endif
31 """
32
33 _OSX_POST_INCLUDE = """
34 #ifdef __APPLE__
35 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
36 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
37 __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
38 #endif
39 """
40
41
42 class Binding(object):
43 """
44 OpenSSL API wrapper.
45 """
46 _module_prefix = "cryptography.hazmat.bindings.openssl."
47 _modules = [
48 "aes",
49 "asn1",
50 "bignum",
51 "bio",
52 "cmac",
53 "cms",
54 "conf",
55 "crypto",
56 "dh",
57 "dsa",
58 "ec",
59 "ecdh",
60 "ecdsa",
61 "engine",
62 "err",
63 "evp",
64 "hmac",
65 "nid",
66 "objects",
67 "opensslv",
68 "osrandom_engine",
69 "pem",
70 "pkcs7",
71 "pkcs12",
72 "rand",
73 "rsa",
74 "ssl",
75 "x509",
76 "x509name",
77 "x509v3",
78 "x509_vfy"
79 ]
80
81 _locks = None
82 _lock_cb_handle = None
83 _lock_init_lock = threading.Lock()
84
85 ffi = None
86 lib = None
87
88 def __init__(self):
89 self._ensure_ffi_initialized()
90
91 @classmethod
92 def _ensure_ffi_initialized(cls):
93 if cls.ffi is not None and cls.lib is not None:
94 return
95
96 # OpenSSL goes by a different library name on different operating
97 # systems.
98 if sys.platform != "win32":
99 # In some circumstances, the order in which these libs are
100 # specified on the linker command-line is significant;
101 # libssl must come before libcrypto
102 # (http://marc.info/?l=openssl-users&m=135361825921871)
103 libraries = ["ssl", "crypto"]
104 else: # pragma: no cover
105 link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static")
106 libraries = _get_windows_libraries(link_type)
107
108 cls.ffi, cls.lib = build_ffi_for_binding(
109 module_prefix=cls._module_prefix,
110 modules=cls._modules,
111 pre_include=_OSX_PRE_INCLUDE,
112 post_include=_OSX_POST_INCLUDE,
113 libraries=libraries,
114 )
115 res = cls.lib.Cryptography_add_osrandom_engine()
116 assert res != 0
117
118 @classmethod
119 def init_static_locks(cls):
120 with cls._lock_init_lock:
121 cls._ensure_ffi_initialized()
122
123 if not cls._lock_cb_handle:
124 cls._lock_cb_handle = cls.ffi.callback(
125 "void(int, int, const char *, int)",
126 cls._lock_cb
127 )
128
129 # Use Python's implementation if available, importing _ssl triggers
130 # the setup for this.
131 __import__("_ssl")
132
133 if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:
134 return
135
136 # If nothing else has setup a locking callback already, we set up
137 # our own
138 num_locks = cls.lib.CRYPTO_num_locks()
139 cls._locks = [threading.Lock() for n in range(num_locks)]
140
141 cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)
142
143 @classmethod
144 def _lock_cb(cls, mode, n, file, line):
145 lock = cls._locks[n]
146
147 if mode & cls.lib.CRYPTO_LOCK:
148 lock.acquire()
149 elif mode & cls.lib.CRYPTO_UNLOCK:
150 lock.release()
151 else:
152 raise RuntimeError(
153 "Unknown lock mode {0}: lock={1}, file={2}, line={3}.".format(
154 mode, n, file, line
155 )
156 )
157
158
159 def _get_windows_libraries(link_type):
160 if link_type == "dynamic":
161 return ["libeay32", "ssleay32", "advapi32"]
162 elif link_type == "static" or link_type == "":
163 return ["libeay32mt", "ssleay32mt", "advapi32",
164 "crypt32", "gdi32", "user32", "ws2_32"]
165 else:
166 raise ValueError(
167 "PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'"
168 )
169
[end of cryptography/hazmat/bindings/openssl/binding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py
--- a/cryptography/hazmat/bindings/openssl/binding.py
+++ b/cryptography/hazmat/bindings/openssl/binding.py
@@ -95,15 +95,7 @@
# OpenSSL goes by a different library name on different operating
# systems.
- if sys.platform != "win32":
- # In some circumstances, the order in which these libs are
- # specified on the linker command-line is significant;
- # libssl must come before libcrypto
- # (http://marc.info/?l=openssl-users&m=135361825921871)
- libraries = ["ssl", "crypto"]
- else: # pragma: no cover
- link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static")
- libraries = _get_windows_libraries(link_type)
+ libraries = _get_libraries(sys.platform)
cls.ffi, cls.lib = build_ffi_for_binding(
module_prefix=cls._module_prefix,
@@ -156,6 +148,18 @@
)
+def _get_libraries(platform):
+ if platform != "win32":
+ # In some circumstances, the order in which these libs are
+ # specified on the linker command-line is significant;
+ # libssl must come before libcrypto
+ # (http://marc.info/?l=openssl-users&m=135361825921871)
+ return ["ssl", "crypto"]
+ else:
+ link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static")
+ return _get_windows_libraries(link_type)
+
+
def _get_windows_libraries(link_type):
if link_type == "dynamic":
return ["libeay32", "ssleay32", "advapi32"]
| {"golden_diff": "diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py\n--- a/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/cryptography/hazmat/bindings/openssl/binding.py\n@@ -95,15 +95,7 @@\n \n # OpenSSL goes by a different library name on different operating\n # systems.\n- if sys.platform != \"win32\":\n- # In some circumstances, the order in which these libs are\n- # specified on the linker command-line is significant;\n- # libssl must come before libcrypto\n- # (http://marc.info/?l=openssl-users&m=135361825921871)\n- libraries = [\"ssl\", \"crypto\"]\n- else: # pragma: no cover\n- link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n- libraries = _get_windows_libraries(link_type)\n+ libraries = _get_libraries(sys.platform)\n \n cls.ffi, cls.lib = build_ffi_for_binding(\n module_prefix=cls._module_prefix,\n@@ -156,6 +148,18 @@\n )\n \n \n+def _get_libraries(platform):\n+ if platform != \"win32\":\n+ # In some circumstances, the order in which these libs are\n+ # specified on the linker command-line is significant;\n+ # libssl must come before libcrypto\n+ # (http://marc.info/?l=openssl-users&m=135361825921871)\n+ return [\"ssl\", \"crypto\"]\n+ else:\n+ link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n+ return _get_windows_libraries(link_type)\n+\n+\n def _get_windows_libraries(link_type):\n if link_type == \"dynamic\":\n return [\"libeay32\", \"ssleay32\", \"advapi32\"]\n", "issue": "Remove pragma nocovers from Windows specific code\nSee #502 \n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport threading\n\nfrom cryptography.hazmat.bindings.utils import build_ffi_for_binding\n\n\n_OSX_PRE_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#include <AvailabilityMacros.h>\n#define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n_OSX_POST_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n _module_prefix = \"cryptography.hazmat.bindings.openssl.\"\n _modules = [\n \"aes\",\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"cmac\",\n \"cms\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"ec\",\n \"ecdh\",\n \"ecdsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"osrandom_engine\",\n \"pem\",\n \"pkcs7\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n \"x509_vfy\"\n ]\n\n _locks = None\n _lock_cb_handle = None\n _lock_init_lock = threading.Lock()\n\n ffi = None\n lib = None\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n if cls.ffi is not None and cls.lib is not None:\n return\n\n # OpenSSL goes by a different library name on different operating\n # systems.\n if sys.platform != \"win32\":\n # In some circumstances, the order in which these libs are\n # specified on the linker command-line is significant;\n # libssl must come before libcrypto\n # (http://marc.info/?l=openssl-users&m=135361825921871)\n libraries = [\"ssl\", \"crypto\"]\n else: # pragma: no cover\n link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n libraries = _get_windows_libraries(link_type)\n\n cls.ffi, cls.lib = build_ffi_for_binding(\n module_prefix=cls._module_prefix,\n modules=cls._modules,\n pre_include=_OSX_PRE_INCLUDE,\n post_include=_OSX_POST_INCLUDE,\n libraries=libraries,\n )\n res = cls.lib.Cryptography_add_osrandom_engine()\n assert res != 0\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n\n if not cls._lock_cb_handle:\n cls._lock_cb_handle = cls.ffi.callback(\n \"void(int, int, const char *, int)\",\n cls._lock_cb\n )\n\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n num_locks = cls.lib.CRYPTO_num_locks()\n cls._locks = [threading.Lock() for n in range(num_locks)]\n\n cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)\n\n @classmethod\n def _lock_cb(cls, mode, n, file, line):\n lock = cls._locks[n]\n\n if mode & cls.lib.CRYPTO_LOCK:\n lock.acquire()\n elif mode & cls.lib.CRYPTO_UNLOCK:\n lock.release()\n else:\n raise RuntimeError(\n \"Unknown lock mode {0}: lock={1}, file={2}, line={3}.\".format(\n mode, n, file, line\n )\n )\n\n\ndef _get_windows_libraries(link_type):\n if link_type == \"dynamic\":\n return [\"libeay32\", \"ssleay32\", \"advapi32\"]\n elif link_type == \"static\" or link_type == \"\":\n return [\"libeay32mt\", \"ssleay32mt\", \"advapi32\",\n \"crypt32\", \"gdi32\", \"user32\", \"ws2_32\"]\n else:\n raise ValueError(\n \"PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'\"\n )\n", "path": "cryptography/hazmat/bindings/openssl/binding.py"}]} | 2,203 | 447 |
gh_patches_debug_29540 | rasdani/github-patches | git_diff | rasterio__rasterio-506 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extract data bounds (excluding NODATA) from raster
I have a need for auto-cropping rasters that have large amounts of `NODATA` around the periphery. To support that, it seems like we could use a general function similar to `bounds`, e.g., `data_bounds` which would return the minimum bounding rectangle of non `NODATA` pixels.
I looked for prior works here and in scipy and didn't see anything obvious.
@sgillies any issues with adding this here?
</issue>
<code>
[start of rasterio/__init__.py]
1 # rasterio
2
3 from collections import namedtuple
4 import logging
5 import os
6 import warnings
7
8 from rasterio._base import eval_window, window_shape, window_index
9 from rasterio._drivers import driver_count, GDALEnv
10 import rasterio.dtypes
11 from rasterio.dtypes import (
12 bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,
13 complex_)
14 from rasterio.five import string_types
15 from rasterio.profiles import default_gtiff_profile
16 from rasterio.transform import Affine, guard_transform
17
18 # These modules are imported from the Cython extensions, but are also import
19 # here to help tools like cx_Freeze find them automatically
20 from rasterio import _err, coords, enums
21
22 # Classes in rasterio._io are imported below just before we need them.
23
24 __all__ = [
25 'band', 'open', 'drivers', 'copy', 'pad']
26 __version__ = "0.29.0"
27
28 log = logging.getLogger('rasterio')
29 class NullHandler(logging.Handler):
30 def emit(self, record):
31 pass
32 log.addHandler(NullHandler())
33
34
35 def open(
36 path, mode='r',
37 driver=None,
38 width=None, height=None,
39 count=None,
40 crs=None, transform=None,
41 dtype=None,
42 nodata=None,
43 **kwargs):
44 """Open file at ``path`` in ``mode`` "r" (read), "r+" (read/write),
45 or "w" (write) and return a ``Reader`` or ``Updater`` object.
46
47 In write mode, a driver name such as "GTiff" or "JPEG" (see GDAL
48 docs or ``gdal_translate --help`` on the command line), ``width``
49 (number of pixels per line) and ``height`` (number of lines), the
50 ``count`` number of bands in the new file must be specified.
51 Additionally, the data type for bands such as ``rasterio.ubyte`` for
52 8-bit bands or ``rasterio.uint16`` for 16-bit bands must be
53 specified using the ``dtype`` argument.
54
55 A coordinate reference system for raster datasets in write mode can
56 be defined by the ``crs`` argument. It takes Proj4 style mappings
57 like
58
59 {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
60 'no_defs': True}
61
62 An affine transformation that maps ``col,row`` pixel coordinates to
63 ``x,y`` coordinates in the coordinate reference system can be
64 specified using the ``transform`` argument. The value may be either
65 an instance of ``affine.Affine`` or a 6-element sequence of the
66 affine transformation matrix coefficients ``a, b, c, d, e, f``.
67 These coefficients are shown in the figure below.
68
69 | x | | a b c | | c |
70 | y | = | d e f | | r |
71 | 1 | | 0 0 1 | | 1 |
72
73 a: rate of change of X with respect to increasing column, i.e.
74 pixel width
75 b: rotation, 0 if the raster is oriented "north up"
76 c: X coordinate of the top left corner of the top left pixel
77 f: Y coordinate of the top left corner of the top left pixel
78 d: rotation, 0 if the raster is oriented "north up"
79 e: rate of change of Y with respect to increasing row, usually
80 a negative number i.e. -1 * pixel height
81 f: Y coordinate of the top left corner of the top left pixel
82
83 Finally, additional kwargs are passed to GDAL as driver-specific
84 dataset creation parameters.
85 """
86 if not isinstance(path, string_types):
87 raise TypeError("invalid path: %r" % path)
88 if mode and not isinstance(mode, string_types):
89 raise TypeError("invalid mode: %r" % mode)
90 if driver and not isinstance(driver, string_types):
91 raise TypeError("invalid driver: %r" % driver)
92
93 if transform:
94 transform = guard_transform(transform)
95 elif 'affine' in kwargs:
96 affine = kwargs.pop('affine')
97 transform = guard_transform(affine)
98
99 if mode == 'r':
100 from rasterio._io import RasterReader
101 s = RasterReader(path)
102 elif mode == 'r+':
103 from rasterio._io import writer
104 s = writer(path, mode)
105 elif mode == 'r-':
106 from rasterio._base import DatasetReader
107 s = DatasetReader(path)
108 elif mode == 'w':
109 from rasterio._io import writer
110 s = writer(path, mode, driver=driver,
111 width=width, height=height, count=count,
112 crs=crs, transform=transform, dtype=dtype,
113 nodata=nodata,
114 **kwargs)
115 else:
116 raise ValueError(
117 "mode string must be one of 'r', 'r+', or 'w', not %s" % mode)
118 s.start()
119 return s
120
121
122 def copy(src, dst, **kw):
123 """Copy a source dataset to a new destination with driver specific
124 creation options.
125
126 ``src`` must be an existing file and ``dst`` a valid output file.
127
128 A ``driver`` keyword argument with value like 'GTiff' or 'JPEG' is
129 used to control the output format.
130
131 This is the one way to create write-once files like JPEGs.
132 """
133 from rasterio._copy import RasterCopier
134 with drivers():
135 return RasterCopier()(src, dst, **kw)
136
137
138 def drivers(**kwargs):
139 """Returns a gdal environment with registered drivers."""
140 if driver_count() == 0:
141 log.debug("Creating a chief GDALEnv in drivers()")
142 return GDALEnv(True, **kwargs)
143 else:
144 log.debug("Creating a not-responsible GDALEnv in drivers()")
145 return GDALEnv(False, **kwargs)
146
147
148 Band = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])
149
150 def band(ds, bidx):
151 """Wraps a dataset and a band index up as a 'Band'"""
152 return Band(
153 ds,
154 bidx,
155 set(ds.dtypes).pop(),
156 ds.shape)
157
158
159 def pad(array, transform, pad_width, mode=None, **kwargs):
160 """Returns a padded array and shifted affine transform matrix.
161
162 Array is padded using `numpy.pad()`."""
163 import numpy
164 transform = guard_transform(transform)
165 padded_array = numpy.pad(array, pad_width, mode, **kwargs)
166 padded_trans = list(transform)
167 padded_trans[2] -= pad_width*padded_trans[0]
168 padded_trans[5] -= pad_width*padded_trans[4]
169 return padded_array, Affine(*padded_trans[:6])
170
[end of rasterio/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/__init__.py b/rasterio/__init__.py
--- a/rasterio/__init__.py
+++ b/rasterio/__init__.py
@@ -167,3 +167,83 @@
padded_trans[2] -= pad_width*padded_trans[0]
padded_trans[5] -= pad_width*padded_trans[4]
return padded_array, Affine(*padded_trans[:6])
+
+
+def get_data_window(arr, nodata=None):
+ """
+ Returns a window for the non-nodata pixels within the input array.
+
+ Parameters
+ ----------
+ arr: numpy ndarray, <= 3 dimensions
+ nodata: number
+ If None, will either return a full window if arr is not a masked
+ array, or will use the mask to determine non-nodata pixels.
+ If provided, it must be a number within the valid range of the dtype
+ of the input array.
+
+ Returns
+ -------
+ ((row_start, row_stop), (col_start, col_stop))
+
+ """
+
+ from rasterio._io import get_data_window
+ return get_data_window(arr, nodata)
+
+
+def window_union(windows):
+ """
+ Union windows and return the outermost extent they cover.
+
+ Parameters
+ ----------
+ windows: list-like of window objects
+ ((row_start, row_stop), (col_start, col_stop))
+
+ Returns
+ -------
+ ((row_start, row_stop), (col_start, col_stop))
+ """
+
+ from rasterio._io import window_union
+ return window_union(windows)
+
+
+def window_intersection(windows):
+ """
+ Intersect windows and return the innermost extent they cover.
+
+ Will raise ValueError if windows do not intersect.
+
+ Parameters
+ ----------
+ windows: list-like of window objects
+ ((row_start, row_stop), (col_start, col_stop))
+
+ Returns
+ -------
+ ((row_start, row_stop), (col_start, col_stop))
+ """
+
+ from rasterio._io import window_intersection
+ return window_intersection(windows)
+
+
+def windows_intersect(windows):
+ """
+ Test if windows intersect.
+
+ Parameters
+ ----------
+ windows: list-like of window objects
+ ((row_start, row_stop), (col_start, col_stop))
+
+ Returns
+ -------
+ boolean:
+ True if all windows intersect.
+ """
+
+ from rasterio._io import windows_intersect
+ return windows_intersect(windows)
| {"golden_diff": "diff --git a/rasterio/__init__.py b/rasterio/__init__.py\n--- a/rasterio/__init__.py\n+++ b/rasterio/__init__.py\n@@ -167,3 +167,83 @@\n padded_trans[2] -= pad_width*padded_trans[0]\n padded_trans[5] -= pad_width*padded_trans[4]\n return padded_array, Affine(*padded_trans[:6])\n+\n+\n+def get_data_window(arr, nodata=None):\n+ \"\"\"\n+ Returns a window for the non-nodata pixels within the input array.\n+\n+ Parameters\n+ ----------\n+ arr: numpy ndarray, <= 3 dimensions\n+ nodata: number\n+ If None, will either return a full window if arr is not a masked\n+ array, or will use the mask to determine non-nodata pixels.\n+ If provided, it must be a number within the valid range of the dtype\n+ of the input array.\n+\n+ Returns\n+ -------\n+ ((row_start, row_stop), (col_start, col_stop))\n+\n+ \"\"\"\n+\n+ from rasterio._io import get_data_window\n+ return get_data_window(arr, nodata)\n+\n+\n+def window_union(windows):\n+ \"\"\"\n+ Union windows and return the outermost extent they cover.\n+\n+ Parameters\n+ ----------\n+ windows: list-like of window objects\n+ ((row_start, row_stop), (col_start, col_stop))\n+\n+ Returns\n+ -------\n+ ((row_start, row_stop), (col_start, col_stop))\n+ \"\"\"\n+\n+ from rasterio._io import window_union\n+ return window_union(windows)\n+\n+\n+def window_intersection(windows):\n+ \"\"\"\n+ Intersect windows and return the innermost extent they cover.\n+\n+ Will raise ValueError if windows do not intersect.\n+\n+ Parameters\n+ ----------\n+ windows: list-like of window objects\n+ ((row_start, row_stop), (col_start, col_stop))\n+\n+ Returns\n+ -------\n+ ((row_start, row_stop), (col_start, col_stop))\n+ \"\"\"\n+\n+ from rasterio._io import window_intersection\n+ return window_intersection(windows)\n+\n+\n+def windows_intersect(windows):\n+ \"\"\"\n+ Test if windows intersect.\n+\n+ Parameters\n+ ----------\n+ windows: list-like of window objects\n+ ((row_start, row_stop), (col_start, col_stop))\n+\n+ Returns\n+ -------\n+ boolean:\n+ True if all windows intersect.\n+ \"\"\"\n+\n+ from rasterio._io import windows_intersect\n+ return windows_intersect(windows)\n", "issue": "Extract data bounds (excluding NODATA) from raster\nI have a need for auto-cropping rasters that have large amounts of `NODATA` around the periphery. To support that, it seems like we could use a general function similar to `bounds`, e.g., `data_bounds` which would return the minimum bounding rectangle of non `NODATA` pixels.\n\nI looked for prior works here and in scipy and didn't see anything obvious.\n\n@sgillies any issues with adding this here?\n\n", "before_files": [{"content": "# rasterio\n\nfrom collections import namedtuple\nimport logging\nimport os\nimport warnings\n\nfrom rasterio._base import eval_window, window_shape, window_index\nfrom rasterio._drivers import driver_count, GDALEnv\nimport rasterio.dtypes\nfrom rasterio.dtypes import (\n bool_, ubyte, uint8, uint16, int16, uint32, int32, float32, float64,\n complex_)\nfrom rasterio.five import string_types\nfrom rasterio.profiles import default_gtiff_profile\nfrom rasterio.transform import Affine, guard_transform\n\n# These modules are imported from the Cython extensions, but are also import\n# here to help tools like cx_Freeze find them automatically\nfrom rasterio import _err, coords, enums\n\n# Classes in rasterio._io are imported below just before we need them.\n\n__all__ = [\n 'band', 'open', 'drivers', 'copy', 'pad']\n__version__ = \"0.29.0\"\n\nlog = logging.getLogger('rasterio')\nclass NullHandler(logging.Handler):\n def emit(self, record):\n pass\nlog.addHandler(NullHandler())\n\n\ndef open(\n path, mode='r', \n driver=None,\n width=None, height=None,\n count=None,\n crs=None, transform=None,\n dtype=None,\n nodata=None,\n **kwargs):\n \"\"\"Open file at ``path`` in ``mode`` \"r\" (read), \"r+\" (read/write),\n or \"w\" (write) and return a ``Reader`` or ``Updater`` object.\n \n In write mode, a driver name such as \"GTiff\" or \"JPEG\" (see GDAL\n docs or ``gdal_translate --help`` on the command line), ``width``\n (number of pixels per line) and ``height`` (number of lines), the\n ``count`` number of bands in the new file must be specified.\n Additionally, the data type for bands such as ``rasterio.ubyte`` for\n 8-bit bands or ``rasterio.uint16`` for 16-bit bands must be\n specified using the ``dtype`` argument.\n\n A coordinate reference system for raster datasets in write mode can\n be defined by the ``crs`` argument. It takes Proj4 style mappings\n like\n \n {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',\n 'no_defs': True}\n\n An affine transformation that maps ``col,row`` pixel coordinates to\n ``x,y`` coordinates in the coordinate reference system can be\n specified using the ``transform`` argument. The value may be either\n an instance of ``affine.Affine`` or a 6-element sequence of the\n affine transformation matrix coefficients ``a, b, c, d, e, f``.\n These coefficients are shown in the figure below.\n\n | x | | a b c | | c |\n | y | = | d e f | | r |\n | 1 | | 0 0 1 | | 1 |\n\n a: rate of change of X with respect to increasing column, i.e.\n pixel width\n b: rotation, 0 if the raster is oriented \"north up\" \n c: X coordinate of the top left corner of the top left pixel \n f: Y coordinate of the top left corner of the top left pixel \n d: rotation, 0 if the raster is oriented \"north up\"\n e: rate of change of Y with respect to increasing row, usually\n a negative number i.e. -1 * pixel height\n f: Y coordinate of the top left corner of the top left pixel \n\n Finally, additional kwargs are passed to GDAL as driver-specific\n dataset creation parameters.\n \"\"\"\n if not isinstance(path, string_types):\n raise TypeError(\"invalid path: %r\" % path)\n if mode and not isinstance(mode, string_types):\n raise TypeError(\"invalid mode: %r\" % mode)\n if driver and not isinstance(driver, string_types):\n raise TypeError(\"invalid driver: %r\" % driver)\n\n if transform:\n transform = guard_transform(transform)\n elif 'affine' in kwargs:\n affine = kwargs.pop('affine')\n transform = guard_transform(affine)\n\n if mode == 'r':\n from rasterio._io import RasterReader\n s = RasterReader(path)\n elif mode == 'r+':\n from rasterio._io import writer\n s = writer(path, mode)\n elif mode == 'r-':\n from rasterio._base import DatasetReader\n s = DatasetReader(path)\n elif mode == 'w':\n from rasterio._io import writer\n s = writer(path, mode, driver=driver,\n width=width, height=height, count=count,\n crs=crs, transform=transform, dtype=dtype,\n nodata=nodata,\n **kwargs)\n else:\n raise ValueError(\n \"mode string must be one of 'r', 'r+', or 'w', not %s\" % mode)\n s.start()\n return s\n\n\ndef copy(src, dst, **kw):\n \"\"\"Copy a source dataset to a new destination with driver specific\n creation options.\n\n ``src`` must be an existing file and ``dst`` a valid output file.\n\n A ``driver`` keyword argument with value like 'GTiff' or 'JPEG' is\n used to control the output format.\n \n This is the one way to create write-once files like JPEGs.\n \"\"\"\n from rasterio._copy import RasterCopier\n with drivers():\n return RasterCopier()(src, dst, **kw)\n\n\ndef drivers(**kwargs):\n \"\"\"Returns a gdal environment with registered drivers.\"\"\"\n if driver_count() == 0:\n log.debug(\"Creating a chief GDALEnv in drivers()\")\n return GDALEnv(True, **kwargs)\n else:\n log.debug(\"Creating a not-responsible GDALEnv in drivers()\")\n return GDALEnv(False, **kwargs)\n\n\nBand = namedtuple('Band', ['ds', 'bidx', 'dtype', 'shape'])\n\ndef band(ds, bidx):\n \"\"\"Wraps a dataset and a band index up as a 'Band'\"\"\"\n return Band(\n ds, \n bidx, \n set(ds.dtypes).pop(),\n ds.shape)\n\n\ndef pad(array, transform, pad_width, mode=None, **kwargs):\n \"\"\"Returns a padded array and shifted affine transform matrix.\n \n Array is padded using `numpy.pad()`.\"\"\"\n import numpy\n transform = guard_transform(transform)\n padded_array = numpy.pad(array, pad_width, mode, **kwargs)\n padded_trans = list(transform)\n padded_trans[2] -= pad_width*padded_trans[0]\n padded_trans[5] -= pad_width*padded_trans[4]\n return padded_array, Affine(*padded_trans[:6])\n", "path": "rasterio/__init__.py"}]} | 2,575 | 590 |
gh_patches_debug_2172 | rasdani/github-patches | git_diff | liqd__a4-opin-1799 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changing the Organisation Details is not possible
**URL:**
https://opin.me/en/dashboard/organisations/liquid-democracy/settings/
**user:**
Initiators, who try to fill in the Organisations details & as an admin too.
**expected behaviour:**
If I fill in Organisation details, save them and it is there
**behaviour:**
I fill in the Organisation details, press save and it reloads, but do not save.
**important screensize:**
**device & browser:**
Firefox 73.0.1 (64-Bit)
**Comment/Question:**
Screenshot?
</issue>
<code>
[start of euth/dashboard/forms.py]
1
2 import parler
3 from django import forms
4 from django.conf import settings
5 from django.core.exceptions import ValidationError
6 from django.utils.translation import ugettext_lazy as _
7
8 from euth.organisations.models import Organisation
9
10
11 class OrganisationForm(forms.ModelForm):
12 translated_fields = [
13 ('description_why', forms.CharField, {
14 'label': _('description why'),
15 'widget': forms.Textarea,
16 }),
17 ('description_how', forms.CharField, {
18 'widget': forms.Textarea,
19 'label': _('description how')
20 }),
21 ('description', forms.CharField, {
22 'label': _('description'),
23 'help_text': _(
24 'More info about the organisation / '
25 'Short text for organisation overview'),
26 'widget': forms.Textarea,
27 })
28 ]
29 languages = [lang_code for lang_code, lang in settings.LANGUAGES]
30
31 class Meta:
32 model = Organisation
33 fields = [
34 'name', 'image', 'logo', 'twitter_handle', 'facebook_handle',
35 'instagram_handle', 'webpage', 'country', 'place'
36 ]
37 help_texts = {
38 'name': _('The title of your organisation'),
39 }
40
41 def _get_identifier(self, language, fieldname):
42 return '{}__{}'.format(language, fieldname)
43
44 def __init__(self, *args, **kwargs):
45 super().__init__(*args, **kwargs)
46
47 # inject additional form fields for translated model fields
48 for lang_code in self.languages:
49 for name, field_cls, kwargs in self.translated_fields:
50 self.instance.set_current_language(lang_code)
51 field = field_cls(**kwargs)
52 identifier = self._get_identifier(
53 lang_code, name)
54 field.required = False
55
56 try:
57 translation = self.instance.get_translation(lang_code)
58 initial = getattr(translation, name)
59 except parler.models.TranslationDoesNotExist:
60 initial = ''
61
62 field.initial = initial
63 self.fields[identifier] = field
64
65 def translated(self):
66 """
67 Return translated fields as list of tuples (language code, fields).
68 """
69
70 from itertools import groupby
71 fields = [(field.html_name.split('__')[0], field) for field in self
72 if '__' in field.html_name]
73 groups = groupby(fields, lambda x: x[0])
74 values = [(lang, list(map(lambda x: x[1], group)))
75 for lang, group in groups]
76 return values
77
78 def untranslated(self):
79 """
80 Return untranslated fields as flat list.
81 """
82 return [field for field in self if '__' not in field.html_name]
83
84 def prefiled_languages(self):
85 """
86 Return languages tabs that need to be displayed.
87 """
88 languages = [lang for lang in self.languages
89 if lang in self.data
90 or self.instance.has_translation(lang)]
91 # always provide english
92 if 'en' not in languages:
93 languages.insert(0, 'en')
94 return languages
95
96 def save(self, commit=True):
97 instance = super().save(commit=commit)
98 if commit is True:
99 for lang_code in self.languages:
100 if lang_code in self.data:
101 instance.set_current_language(lang_code)
102 for fieldname, _cls, _kwargs in self.translated_fields:
103 identifier = '{}__{}'.format(lang_code, fieldname)
104 setattr(instance, fieldname,
105 self.cleaned_data.get(identifier))
106 instance.save()
107 elif instance.has_translation(lang_code):
108 instance.delete_translation(lang_code)
109 return instance
110
111 def clean(self):
112 for lang_code in self.languages:
113 if lang_code in self.data:
114 for fieldname in self.translated_fields:
115 identifier = self._get_identifier(lang_code, fieldname[0])
116 data = self.cleaned_data
117 if identifier not in data or not data[identifier]:
118 msg = 'This field is required'
119 raise ValidationError((identifier, msg))
120
121 return self.cleaned_data
122
[end of euth/dashboard/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/dashboard/forms.py b/euth/dashboard/forms.py
--- a/euth/dashboard/forms.py
+++ b/euth/dashboard/forms.py
@@ -81,7 +81,7 @@
"""
return [field for field in self if '__' not in field.html_name]
- def prefiled_languages(self):
+ def prefilled_languages(self):
"""
Return languages tabs that need to be displayed.
"""
| {"golden_diff": "diff --git a/euth/dashboard/forms.py b/euth/dashboard/forms.py\n--- a/euth/dashboard/forms.py\n+++ b/euth/dashboard/forms.py\n@@ -81,7 +81,7 @@\n \"\"\"\n return [field for field in self if '__' not in field.html_name]\n \n- def prefiled_languages(self):\n+ def prefilled_languages(self):\n \"\"\"\n Return languages tabs that need to be displayed.\n \"\"\"\n", "issue": "Changing the Organisation Details is not possible\n**URL:** \r\nhttps://opin.me/en/dashboard/organisations/liquid-democracy/settings/\r\n**user:** \r\nInitiators, who try to fill in the Organisations details & as an admin too.\r\n**expected behaviour:** \r\nIf I fill in Organisation details, save them and it is there\r\n**behaviour:** \r\nI fill in the Organisation details, press save and it reloads, but do not save.\r\n**important screensize:**\r\n\r\n**device & browser:** \r\nFirefox 73.0.1 (64-Bit)\r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "\nimport parler\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom euth.organisations.models import Organisation\n\n\nclass OrganisationForm(forms.ModelForm):\n translated_fields = [\n ('description_why', forms.CharField, {\n 'label': _('description why'),\n 'widget': forms.Textarea,\n }),\n ('description_how', forms.CharField, {\n 'widget': forms.Textarea,\n 'label': _('description how')\n }),\n ('description', forms.CharField, {\n 'label': _('description'),\n 'help_text': _(\n 'More info about the organisation / '\n 'Short text for organisation overview'),\n 'widget': forms.Textarea,\n })\n ]\n languages = [lang_code for lang_code, lang in settings.LANGUAGES]\n\n class Meta:\n model = Organisation\n fields = [\n 'name', 'image', 'logo', 'twitter_handle', 'facebook_handle',\n 'instagram_handle', 'webpage', 'country', 'place'\n ]\n help_texts = {\n 'name': _('The title of your organisation'),\n }\n\n def _get_identifier(self, language, fieldname):\n return '{}__{}'.format(language, fieldname)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # inject additional form fields for translated model fields\n for lang_code in self.languages:\n for name, field_cls, kwargs in self.translated_fields:\n self.instance.set_current_language(lang_code)\n field = field_cls(**kwargs)\n identifier = self._get_identifier(\n lang_code, name)\n field.required = False\n\n try:\n translation = self.instance.get_translation(lang_code)\n initial = getattr(translation, name)\n except parler.models.TranslationDoesNotExist:\n initial = ''\n\n field.initial = initial\n self.fields[identifier] = field\n\n def translated(self):\n \"\"\"\n Return translated fields as list of tuples (language code, fields).\n \"\"\"\n\n from itertools import groupby\n fields = [(field.html_name.split('__')[0], field) for field in self\n if '__' in field.html_name]\n groups = groupby(fields, lambda x: x[0])\n values = [(lang, list(map(lambda x: x[1], group)))\n for lang, group in groups]\n return values\n\n def untranslated(self):\n \"\"\"\n Return untranslated fields as flat list.\n \"\"\"\n return [field for field in self if '__' not in field.html_name]\n\n def prefiled_languages(self):\n \"\"\"\n Return languages tabs that need to be displayed.\n \"\"\"\n languages = [lang for lang in self.languages\n if lang in self.data\n or self.instance.has_translation(lang)]\n # always provide english\n if 'en' not in languages:\n languages.insert(0, 'en')\n return languages\n\n def save(self, commit=True):\n instance = super().save(commit=commit)\n if commit is True:\n for lang_code in self.languages:\n if lang_code in self.data:\n instance.set_current_language(lang_code)\n for fieldname, _cls, _kwargs in self.translated_fields:\n identifier = '{}__{}'.format(lang_code, fieldname)\n setattr(instance, fieldname,\n self.cleaned_data.get(identifier))\n instance.save()\n elif instance.has_translation(lang_code):\n instance.delete_translation(lang_code)\n return instance\n\n def clean(self):\n for lang_code in self.languages:\n if lang_code in self.data:\n for fieldname in self.translated_fields:\n identifier = self._get_identifier(lang_code, fieldname[0])\n data = self.cleaned_data\n if identifier not in data or not data[identifier]:\n msg = 'This field is required'\n raise ValidationError((identifier, msg))\n\n return self.cleaned_data\n", "path": "euth/dashboard/forms.py"}]} | 1,757 | 95 |
gh_patches_debug_16640 | rasdani/github-patches | git_diff | PrefectHQ__prefect-9724 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PrefectHTTPStatusError: Client error '429 Too Many Requests' for url
### First check
- [X] I added a descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the Prefect documentation for this issue.
- [X] I checked that this issue is related to Prefect and not one of its dependencies.
### Bug summary
While using `prefect` with `prefect-dask` I encountered a rate limit error. this shouldn't be happening as prefect client base should retry on those. I'm not sure why this is happening but this has risen at `2.10.10` and did not exist before
### Reproduction
```python3
Any Flow with prefect-dask
```
### Error
```python3
Traceback (most recent call last):
File "/usr/local/lib/python3.11/dist-packages/distributed/client.py", line 1697, in _close
await self.scheduler_comm.close()
asyncio.exceptions.CancelledError
01:00:08.452 | ERROR | Flow run 'psi5-alastria-x' - Crash detected! Execution was interrupted by an unexpected exception: PrefectHTTPStatusError: Client error '429 Too Many Requests' for url 'https://cloud-url/task_runs/'
Response: {'detail': 'Orchestration API rate limit reached'}
For more information check: https://httpstatuses.com/429
```
### Versions
```Text
Version: 2.10.10
API version: 0.8.4
Python version: 3.11.2
Git commit: 8159450b
Built: Thu, May 18, 2023 3:43 PM
OS/Arch: linux/x86_64
Profile: default
Server type: server
```
### Additional context
_No response_
</issue>
<code>
[start of src/prefect/client/cloud.py]
1 import re
2 from typing import Any, Dict, List, Optional
3
4 import anyio
5 import httpx
6 import pydantic
7 from fastapi import status
8
9 import prefect.context
10 import prefect.settings
11 from prefect.client.schemas import Workspace
12 from prefect.exceptions import PrefectException
13 from prefect.settings import PREFECT_API_KEY, PREFECT_CLOUD_API_URL
14
15
16 def get_cloud_client(
17 host: Optional[str] = None,
18 api_key: Optional[str] = None,
19 httpx_settings: Optional[dict] = None,
20 infer_cloud_url: bool = False,
21 ) -> "CloudClient":
22 """
23 Needs a docstring.
24 """
25 if httpx_settings is not None:
26 httpx_settings = httpx_settings.copy()
27
28 if infer_cloud_url is False:
29 host = host or PREFECT_CLOUD_API_URL.value()
30 else:
31 configured_url = prefect.settings.PREFECT_API_URL.value()
32 host = re.sub(r"accounts/.{36}/workspaces/.{36}\Z", "", configured_url)
33
34 return CloudClient(
35 host=host,
36 api_key=api_key or PREFECT_API_KEY.value(),
37 httpx_settings=httpx_settings,
38 )
39
40
41 class CloudUnauthorizedError(PrefectException):
42 """
43 Raised when the CloudClient receives a 401 or 403 from the Cloud API.
44 """
45
46
47 class CloudClient:
48 def __init__(
49 self,
50 host: str,
51 api_key: str,
52 httpx_settings: dict = None,
53 ) -> None:
54 httpx_settings = httpx_settings or dict()
55 httpx_settings.setdefault("headers", dict())
56 httpx_settings["headers"].setdefault("Authorization", f"Bearer {api_key}")
57
58 httpx_settings.setdefault("base_url", host)
59 self._client = httpx.AsyncClient(**httpx_settings)
60
61 async def api_healthcheck(self):
62 """
63 Attempts to connect to the Cloud API and raises the encountered exception if not
64 successful.
65
66 If successful, returns `None`.
67 """
68 with anyio.fail_after(10):
69 await self.read_workspaces()
70
71 async def read_workspaces(self) -> List[Workspace]:
72 return pydantic.parse_obj_as(List[Workspace], await self.get("/me/workspaces"))
73
74 async def read_worker_metadata(self) -> Dict[str, Any]:
75 return await self.get("collections/views/aggregate-worker-metadata")
76
77 async def __aenter__(self):
78 await self._client.__aenter__()
79 return self
80
81 async def __aexit__(self, *exc_info):
82 return await self._client.__aexit__(*exc_info)
83
84 def __enter__(self):
85 raise RuntimeError(
86 "The `CloudClient` must be entered with an async context. Use 'async "
87 "with CloudClient(...)' not 'with CloudClient(...)'"
88 )
89
90 def __exit__(self, *_):
91 assert False, "This should never be called but must be defined for __enter__"
92
93 async def get(self, route, **kwargs):
94 try:
95 res = await self._client.get(route, **kwargs)
96 res.raise_for_status()
97 except httpx.HTTPStatusError as exc:
98 if exc.response.status_code in (
99 status.HTTP_401_UNAUTHORIZED,
100 status.HTTP_403_FORBIDDEN,
101 ):
102 raise CloudUnauthorizedError
103 else:
104 raise exc
105
106 return res.json()
107
[end of src/prefect/client/cloud.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/prefect/client/cloud.py b/src/prefect/client/cloud.py
--- a/src/prefect/client/cloud.py
+++ b/src/prefect/client/cloud.py
@@ -8,6 +8,7 @@
import prefect.context
import prefect.settings
+from prefect.client.base import PrefectHttpxClient
from prefect.client.schemas import Workspace
from prefect.exceptions import PrefectException
from prefect.settings import PREFECT_API_KEY, PREFECT_CLOUD_API_URL
@@ -56,7 +57,7 @@
httpx_settings["headers"].setdefault("Authorization", f"Bearer {api_key}")
httpx_settings.setdefault("base_url", host)
- self._client = httpx.AsyncClient(**httpx_settings)
+ self._client = PrefectHttpxClient(**httpx_settings)
async def api_healthcheck(self):
"""
| {"golden_diff": "diff --git a/src/prefect/client/cloud.py b/src/prefect/client/cloud.py\n--- a/src/prefect/client/cloud.py\n+++ b/src/prefect/client/cloud.py\n@@ -8,6 +8,7 @@\n \n import prefect.context\n import prefect.settings\n+from prefect.client.base import PrefectHttpxClient\n from prefect.client.schemas import Workspace\n from prefect.exceptions import PrefectException\n from prefect.settings import PREFECT_API_KEY, PREFECT_CLOUD_API_URL\n@@ -56,7 +57,7 @@\n httpx_settings[\"headers\"].setdefault(\"Authorization\", f\"Bearer {api_key}\")\n \n httpx_settings.setdefault(\"base_url\", host)\n- self._client = httpx.AsyncClient(**httpx_settings)\n+ self._client = PrefectHttpxClient(**httpx_settings)\n \n async def api_healthcheck(self):\n \"\"\"\n", "issue": "PrefectHTTPStatusError: Client error '429 Too Many Requests' for url\n### First check\r\n\r\n- [X] I added a descriptive title to this issue.\r\n- [X] I used the GitHub search to find a similar issue and didn't find it.\r\n- [X] I searched the Prefect documentation for this issue.\r\n- [X] I checked that this issue is related to Prefect and not one of its dependencies.\r\n\r\n### Bug summary\r\n\r\nWhile using `prefect` with `prefect-dask` I encountered a rate limit error. this shouldn't be happening as prefect client base should retry on those. I'm not sure why this is happening but this has risen at `2.10.10` and did not exist before\r\n\r\n### Reproduction\r\n\r\n```python3\r\nAny Flow with prefect-dask\r\n```\r\n\r\n\r\n### Error\r\n\r\n```python3\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/dist-packages/distributed/client.py\", line 1697, in _close\r\n await self.scheduler_comm.close()\r\nasyncio.exceptions.CancelledError\r\n01:00:08.452 | ERROR | Flow run 'psi5-alastria-x' - Crash detected! Execution was interrupted by an unexpected exception: PrefectHTTPStatusError: Client error '429 Too Many Requests' for url 'https://cloud-url/task_runs/'\r\nResponse: {'detail': 'Orchestration API rate limit reached'}\r\nFor more information check: https://httpstatuses.com/429\r\n```\r\n\r\n\r\n### Versions\r\n\r\n```Text\r\nVersion: 2.10.10\r\nAPI version: 0.8.4\r\nPython version: 3.11.2\r\nGit commit: 8159450b\r\nBuilt: Thu, May 18, 2023 3:43 PM\r\nOS/Arch: linux/x86_64\r\nProfile: default\r\nServer type: server\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Optional\n\nimport anyio\nimport httpx\nimport pydantic\nfrom fastapi import status\n\nimport prefect.context\nimport prefect.settings\nfrom prefect.client.schemas import Workspace\nfrom prefect.exceptions import PrefectException\nfrom prefect.settings import PREFECT_API_KEY, PREFECT_CLOUD_API_URL\n\n\ndef get_cloud_client(\n host: Optional[str] = None,\n api_key: Optional[str] = None,\n httpx_settings: Optional[dict] = None,\n infer_cloud_url: bool = False,\n) -> \"CloudClient\":\n \"\"\"\n Needs a docstring.\n \"\"\"\n if httpx_settings is not None:\n httpx_settings = httpx_settings.copy()\n\n if infer_cloud_url is False:\n host = host or PREFECT_CLOUD_API_URL.value()\n else:\n configured_url = prefect.settings.PREFECT_API_URL.value()\n host = re.sub(r\"accounts/.{36}/workspaces/.{36}\\Z\", \"\", configured_url)\n\n return CloudClient(\n host=host,\n api_key=api_key or PREFECT_API_KEY.value(),\n httpx_settings=httpx_settings,\n )\n\n\nclass CloudUnauthorizedError(PrefectException):\n \"\"\"\n Raised when the CloudClient receives a 401 or 403 from the Cloud API.\n \"\"\"\n\n\nclass CloudClient:\n def __init__(\n self,\n host: str,\n api_key: str,\n httpx_settings: dict = None,\n ) -> None:\n httpx_settings = httpx_settings or dict()\n httpx_settings.setdefault(\"headers\", dict())\n httpx_settings[\"headers\"].setdefault(\"Authorization\", f\"Bearer {api_key}\")\n\n httpx_settings.setdefault(\"base_url\", host)\n self._client = httpx.AsyncClient(**httpx_settings)\n\n async def api_healthcheck(self):\n \"\"\"\n Attempts to connect to the Cloud API and raises the encountered exception if not\n successful.\n\n If successful, returns `None`.\n \"\"\"\n with anyio.fail_after(10):\n await self.read_workspaces()\n\n async def read_workspaces(self) -> List[Workspace]:\n return pydantic.parse_obj_as(List[Workspace], await self.get(\"/me/workspaces\"))\n\n async def read_worker_metadata(self) -> Dict[str, Any]:\n return await self.get(\"collections/views/aggregate-worker-metadata\")\n\n async def __aenter__(self):\n await self._client.__aenter__()\n return self\n\n async def __aexit__(self, *exc_info):\n return await self._client.__aexit__(*exc_info)\n\n def __enter__(self):\n raise RuntimeError(\n \"The `CloudClient` must be entered with an async context. Use 'async \"\n \"with CloudClient(...)' not 'with CloudClient(...)'\"\n )\n\n def __exit__(self, *_):\n assert False, \"This should never be called but must be defined for __enter__\"\n\n async def get(self, route, **kwargs):\n try:\n res = await self._client.get(route, **kwargs)\n res.raise_for_status()\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code in (\n status.HTTP_401_UNAUTHORIZED,\n status.HTTP_403_FORBIDDEN,\n ):\n raise CloudUnauthorizedError\n else:\n raise exc\n\n return res.json()\n", "path": "src/prefect/client/cloud.py"}]} | 1,929 | 188 |
gh_patches_debug_11073 | rasdani/github-patches | git_diff | mozmeao__snippets-service-1398 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Re-generate all bundles with Distribution changes after Timestamp
</issue>
<code>
[start of snippets/base/management/commands/generate_bundles.py]
1 import os
2 import json
3 import itertools
4 from datetime import datetime
5
6 import brotli
7 from product_details import product_details
8
9 from django.conf import settings
10 from django.core.files.base import ContentFile
11 from django.core.management.base import BaseCommand
12 from django.db.models import Q
13 from django.core.files.storage import default_storage
14
15 from snippets.base.models import DistributionBundle, Job
16
17
18 class Command(BaseCommand):
19 args = '(no args)'
20 help = 'Generate bundles'
21
22 def add_arguments(self, parser):
23 # Named (optional) arguments
24 parser.add_argument(
25 '--timestamp',
26 help='Parse Jobs last modified after <timestamp>',
27 )
28
29 def handle(self, *args, **options):
30 if not options['timestamp']:
31 self.stdout.write('Generating all bundles.')
32 total_jobs = Job.objects.all()
33 else:
34 self.stdout.write(
35 'Generating bundles with Jobs modified on or after {}'.format(options['timestamp'])
36 )
37 total_jobs = Job.objects.filter(snippet__modified__gte=options['timestamp'])
38
39 if not total_jobs:
40 self.stdout.write('Nothing to do…')
41 return
42
43 self.stdout.write('Processing bundles…')
44
45 combinations_to_process = set(
46 itertools.chain.from_iterable(
47 itertools.product(
48 job.channels,
49 job.snippet.locale.code.strip(',').split(',')
50 )
51 for job in total_jobs
52 )
53 )
54 distribution_bundles_to_process = DistributionBundle.objects.filter(
55 distributions__jobs__in=total_jobs
56 ).distinct().order_by('id')
57
58 for distribution_bundle in distribution_bundles_to_process:
59 distributions = distribution_bundle.distributions.all()
60
61 for channel, locale in combinations_to_process:
62 additional_jobs = []
63 if channel == 'nightly' and settings.NIGHTLY_INCLUDES_RELEASE:
64 additional_jobs = Job.objects.filter(
65 status=Job.PUBLISHED).filter(**{
66 'targets__on_release': True,
67 'distribution__in': distributions,
68 })
69
70 channel_jobs = Job.objects.filter(
71 status=Job.PUBLISHED).filter(
72 Q(**{
73 'targets__on_{}'.format(channel): True,
74 'distribution__in': distributions,
75 }))
76
77 all_jobs = Job.objects.filter(
78 Q(id__in=additional_jobs) | Q(id__in=channel_jobs)
79 )
80
81 locales_to_process = [
82 key.lower() for key in product_details.languages.keys()
83 if key.lower().startswith(locale)
84 ]
85
86 for locale_to_process in locales_to_process:
87 filename = 'Firefox/{channel}/{locale}/{distribution}.json'.format(
88 channel=channel,
89 locale=locale_to_process,
90 distribution=distribution_bundle.code_name,
91 )
92 filename = os.path.join(settings.MEDIA_BUNDLES_PREGEN_ROOT, filename)
93 full_locale = ',{},'.format(locale_to_process.lower())
94 splitted_locale = ',{},'.format(locale_to_process.lower().split('-', 1)[0])
95 bundle_jobs = all_jobs.filter(
96 Q(snippet__locale__code__contains=splitted_locale) |
97 Q(snippet__locale__code__contains=full_locale)).distinct()
98
99 # If DistributionBundle is not enabled, or if there are no
100 # Published Jobs for the channel / locale / distribution
101 # combination, delete the current bundle file if it exists.
102 if not distribution_bundle.enabled or not bundle_jobs.exists():
103 if default_storage.exists(filename):
104 self.stdout.write('Removing {}'.format(filename))
105 default_storage.delete(filename)
106 continue
107
108 data = []
109 channel_job_ids = list(channel_jobs.values_list('id', flat=True))
110 for job in bundle_jobs:
111 if job.id in channel_job_ids:
112 render = job.render()
113 else:
114 render = job.render(always_eval_to_false=True)
115 data.append(render)
116
117 bundle_content = json.dumps({
118 'messages': data,
119 'metadata': {
120 'generated_at': datetime.utcnow().isoformat(),
121 'number_of_snippets': len(data),
122 'channel': channel,
123 }
124 })
125
126 # Convert str to bytes.
127 if isinstance(bundle_content, str):
128 bundle_content = bundle_content.encode('utf-8')
129
130 if settings.BUNDLE_BROTLI_COMPRESS:
131 content_file = ContentFile(brotli.compress(bundle_content))
132 content_file.content_encoding = 'br'
133 else:
134 content_file = ContentFile(bundle_content)
135
136 default_storage.save(filename, content_file)
137 self.stdout.write(self.style.SUCCESS('Writing bundle {}'.format(filename)))
138
[end of snippets/base/management/commands/generate_bundles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/snippets/base/management/commands/generate_bundles.py b/snippets/base/management/commands/generate_bundles.py
--- a/snippets/base/management/commands/generate_bundles.py
+++ b/snippets/base/management/commands/generate_bundles.py
@@ -34,7 +34,10 @@
self.stdout.write(
'Generating bundles with Jobs modified on or after {}'.format(options['timestamp'])
)
- total_jobs = Job.objects.filter(snippet__modified__gte=options['timestamp'])
+ total_jobs = Job.objects.filter(
+ Q(snippet__modified__gte=options['timestamp']) |
+ Q(distribution__distributionbundle__modified__gte=options['timestamp'])
+ ).distinct()
if not total_jobs:
self.stdout.write('Nothing to do…')
| {"golden_diff": "diff --git a/snippets/base/management/commands/generate_bundles.py b/snippets/base/management/commands/generate_bundles.py\n--- a/snippets/base/management/commands/generate_bundles.py\n+++ b/snippets/base/management/commands/generate_bundles.py\n@@ -34,7 +34,10 @@\n self.stdout.write(\n 'Generating bundles with Jobs modified on or after {}'.format(options['timestamp'])\n )\n- total_jobs = Job.objects.filter(snippet__modified__gte=options['timestamp'])\n+ total_jobs = Job.objects.filter(\n+ Q(snippet__modified__gte=options['timestamp']) |\n+ Q(distribution__distributionbundle__modified__gte=options['timestamp'])\n+ ).distinct()\n \n if not total_jobs:\n self.stdout.write('Nothing to do\u2026')\n", "issue": "Re-generate all bundles with Distribution changes after Timestamp\n\n", "before_files": [{"content": "import os\nimport json\nimport itertools\nfrom datetime import datetime\n\nimport brotli\nfrom product_details import product_details\n\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Q\nfrom django.core.files.storage import default_storage\n\nfrom snippets.base.models import DistributionBundle, Job\n\n\nclass Command(BaseCommand):\n args = '(no args)'\n help = 'Generate bundles'\n\n def add_arguments(self, parser):\n # Named (optional) arguments\n parser.add_argument(\n '--timestamp',\n help='Parse Jobs last modified after <timestamp>',\n )\n\n def handle(self, *args, **options):\n if not options['timestamp']:\n self.stdout.write('Generating all bundles.')\n total_jobs = Job.objects.all()\n else:\n self.stdout.write(\n 'Generating bundles with Jobs modified on or after {}'.format(options['timestamp'])\n )\n total_jobs = Job.objects.filter(snippet__modified__gte=options['timestamp'])\n\n if not total_jobs:\n self.stdout.write('Nothing to do\u2026')\n return\n\n self.stdout.write('Processing bundles\u2026')\n\n combinations_to_process = set(\n itertools.chain.from_iterable(\n itertools.product(\n job.channels,\n job.snippet.locale.code.strip(',').split(',')\n )\n for job in total_jobs\n )\n )\n distribution_bundles_to_process = DistributionBundle.objects.filter(\n distributions__jobs__in=total_jobs\n ).distinct().order_by('id')\n\n for distribution_bundle in distribution_bundles_to_process:\n distributions = distribution_bundle.distributions.all()\n\n for channel, locale in combinations_to_process:\n additional_jobs = []\n if channel == 'nightly' and settings.NIGHTLY_INCLUDES_RELEASE:\n additional_jobs = Job.objects.filter(\n status=Job.PUBLISHED).filter(**{\n 'targets__on_release': True,\n 'distribution__in': distributions,\n })\n\n channel_jobs = Job.objects.filter(\n status=Job.PUBLISHED).filter(\n Q(**{\n 'targets__on_{}'.format(channel): True,\n 'distribution__in': distributions,\n }))\n\n all_jobs = Job.objects.filter(\n Q(id__in=additional_jobs) | Q(id__in=channel_jobs)\n )\n\n locales_to_process = [\n key.lower() for key in product_details.languages.keys()\n if key.lower().startswith(locale)\n ]\n\n for locale_to_process in locales_to_process:\n filename = 'Firefox/{channel}/{locale}/{distribution}.json'.format(\n channel=channel,\n locale=locale_to_process,\n distribution=distribution_bundle.code_name,\n )\n filename = os.path.join(settings.MEDIA_BUNDLES_PREGEN_ROOT, filename)\n full_locale = ',{},'.format(locale_to_process.lower())\n splitted_locale = ',{},'.format(locale_to_process.lower().split('-', 1)[0])\n bundle_jobs = all_jobs.filter(\n Q(snippet__locale__code__contains=splitted_locale) |\n Q(snippet__locale__code__contains=full_locale)).distinct()\n\n # If DistributionBundle is not enabled, or if there are no\n # Published Jobs for the channel / locale / distribution\n # combination, delete the current bundle file if it exists.\n if not distribution_bundle.enabled or not bundle_jobs.exists():\n if default_storage.exists(filename):\n self.stdout.write('Removing {}'.format(filename))\n default_storage.delete(filename)\n continue\n\n data = []\n channel_job_ids = list(channel_jobs.values_list('id', flat=True))\n for job in bundle_jobs:\n if job.id in channel_job_ids:\n render = job.render()\n else:\n render = job.render(always_eval_to_false=True)\n data.append(render)\n\n bundle_content = json.dumps({\n 'messages': data,\n 'metadata': {\n 'generated_at': datetime.utcnow().isoformat(),\n 'number_of_snippets': len(data),\n 'channel': channel,\n }\n })\n\n # Convert str to bytes.\n if isinstance(bundle_content, str):\n bundle_content = bundle_content.encode('utf-8')\n\n if settings.BUNDLE_BROTLI_COMPRESS:\n content_file = ContentFile(brotli.compress(bundle_content))\n content_file.content_encoding = 'br'\n else:\n content_file = ContentFile(bundle_content)\n\n default_storage.save(filename, content_file)\n self.stdout.write(self.style.SUCCESS('Writing bundle {}'.format(filename)))\n", "path": "snippets/base/management/commands/generate_bundles.py"}]} | 1,814 | 175 |
gh_patches_debug_147 | rasdani/github-patches | git_diff | encode__httpx-868 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0.12.0 PyPI wheel contains both public- and private-name modules
The following works in httpx 0.11.1:
```python
In [1]: import httpx
...: from httpx.exceptions import InvalidURL
In [2]: try:
...: httpx.get("foo.bar")
...: except InvalidURL:
...: pass
...:
```
In 0.12.0 the exception isn't caught:
```python
In [1]: import httpx
...: from httpx.exceptions import InvalidURL
In [2]: try:
...: httpx.get("foo.bar")
...: except InvalidURL:
...: pass
...:
---------------------------------------------------------------------------
InvalidURL Traceback (most recent call last)
<ipython-input-2-87135a63c42c> in <module>
1 try:
----> 2 httpx.get("foo.bar")
3 except InvalidURL:
4 pass
5
~/.venv/lib/python3.7/site-packages/httpx/_api.py in get(url, params, headers, cookies, auth, allow_redirects, cert, verify, timeout, trust_env)
166 verify=verify,
167 timeout=timeout,
--> 168 trust_env=trust_env,
169 )
170
~/.venv/lib/python3.7/site-packages/httpx/_api.py in request(method, url, params, data, files, json, headers, cookies, auth, timeout, allow_redirects, verify, cert, trust_env)
92 cookies=cookies,
93 auth=auth,
---> 94 allow_redirects=allow_redirects,
95 )
96
~/.venv/lib/python3.7/site-packages/httpx/_client.py in request(self, method, url, data, files, json, params, headers, cookies, auth, allow_redirects, timeout)
566 params=params,
567 headers=headers,
--> 568 cookies=cookies,
569 )
570 return self.send(
~/.venv/lib/python3.7/site-packages/httpx/_client.py in build_request(self, method, url, data, files, json, params, headers, cookies)
196 Build and return a request instance.
197 """
--> 198 url = self.merge_url(url)
199 headers = self.merge_headers(headers)
200 cookies = self.merge_cookies(cookies)
~/.venv/lib/python3.7/site-packages/httpx/_client.py in merge_url(self, url)
216 to create the URL used for the outgoing request.
217 """
--> 218 url = self.base_url.join(relative_url=url)
219 if url.scheme == "http" and hstspreload.in_hsts_preload(url.host):
220 port = None if url.port == 80 else url.port
~/.venv/lib/python3.7/site-packages/httpx/_models.py in join(self, relative_url)
227 """
228 if self.is_relative_url:
--> 229 return URL(relative_url)
230
231 # We drop any fragment portion, because RFC 3986 strictly
~/.venv/lib/python3.7/site-packages/httpx/_models.py in __init__(self, url, allow_relative, params)
104 if not allow_relative:
105 if not self.scheme:
--> 106 raise InvalidURL("No scheme included in URL.")
107 if not self.host:
108 raise InvalidURL("No host included in URL.")
InvalidURL: No scheme included in URL.
```
This works though:
```python
In [3]: import httpx
...: from httpx._exceptions import InvalidURL
In [4]: try:
...: httpx.get("foo.bar")
...: except InvalidURL:
...: pass
...:
```
</issue>
<code>
[start of httpx/__version__.py]
1 __title__ = "httpx"
2 __description__ = "A next generation HTTP client, for Python 3."
3 __version__ = "0.12.0"
4
[end of httpx/__version__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/httpx/__version__.py b/httpx/__version__.py
--- a/httpx/__version__.py
+++ b/httpx/__version__.py
@@ -1,3 +1,3 @@
__title__ = "httpx"
__description__ = "A next generation HTTP client, for Python 3."
-__version__ = "0.12.0"
+__version__ = "0.12.1"
| {"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.12.0\"\n+__version__ = \"0.12.1\"\n", "issue": "0.12.0 PyPI wheel contains both public- and private-name modules\nThe following works in httpx 0.11.1:\r\n\r\n```python\r\nIn [1]: import httpx \r\n ...: from httpx.exceptions import InvalidURL \r\n\r\nIn [2]: try: \r\n ...: httpx.get(\"foo.bar\") \r\n ...: except InvalidURL: \r\n ...: pass \r\n ...: \r\n```\r\n\r\nIn 0.12.0 the exception isn't caught:\r\n\r\n```python\r\nIn [1]: import httpx \r\n ...: from httpx.exceptions import InvalidURL \r\n\r\nIn [2]: try: \r\n ...: httpx.get(\"foo.bar\") \r\n ...: except InvalidURL: \r\n ...: pass \r\n ...: \r\n---------------------------------------------------------------------------\r\nInvalidURL Traceback (most recent call last)\r\n<ipython-input-2-87135a63c42c> in <module>\r\n 1 try:\r\n----> 2 httpx.get(\"foo.bar\")\r\n 3 except InvalidURL:\r\n 4 pass\r\n 5 \r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_api.py in get(url, params, headers, cookies, auth, allow_redirects, cert, verify, timeout, trust_env)\r\n 166 verify=verify,\r\n 167 timeout=timeout,\r\n--> 168 trust_env=trust_env,\r\n 169 )\r\n 170 \r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_api.py in request(method, url, params, data, files, json, headers, cookies, auth, timeout, allow_redirects, verify, cert, trust_env)\r\n 92 cookies=cookies,\r\n 93 auth=auth,\r\n---> 94 allow_redirects=allow_redirects,\r\n 95 )\r\n 96 \r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_client.py in request(self, method, url, data, files, json, params, headers, cookies, auth, allow_redirects, timeout)\r\n 566 params=params,\r\n 567 headers=headers,\r\n--> 568 cookies=cookies,\r\n 569 )\r\n 570 return self.send(\r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_client.py in build_request(self, method, url, data, files, json, params, headers, cookies)\r\n 196 Build and return a request instance.\r\n 197 \"\"\"\r\n--> 198 url = self.merge_url(url)\r\n 199 headers = self.merge_headers(headers)\r\n 200 cookies = self.merge_cookies(cookies)\r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_client.py in merge_url(self, url)\r\n 216 to create the URL used for the outgoing request.\r\n 217 \"\"\"\r\n--> 218 url = self.base_url.join(relative_url=url)\r\n 219 if url.scheme == \"http\" and hstspreload.in_hsts_preload(url.host):\r\n 220 port = None if url.port == 80 else url.port\r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_models.py in join(self, relative_url)\r\n 227 \"\"\"\r\n 228 if self.is_relative_url:\r\n--> 229 return URL(relative_url)\r\n 230 \r\n 231 # We drop any fragment portion, because RFC 3986 strictly\r\n\r\n~/.venv/lib/python3.7/site-packages/httpx/_models.py in __init__(self, url, allow_relative, params)\r\n 104 if not allow_relative:\r\n 105 if not self.scheme:\r\n--> 106 raise InvalidURL(\"No scheme included in URL.\")\r\n 107 if not self.host:\r\n 108 raise InvalidURL(\"No host included in URL.\")\r\n\r\nInvalidURL: No scheme included in URL.\r\n```\r\n\r\nThis works though:\r\n\r\n```python\r\nIn [3]: import httpx \r\n ...: from httpx._exceptions import InvalidURL \r\n\r\nIn [4]: try: \r\n ...: httpx.get(\"foo.bar\") \r\n ...: except InvalidURL: \r\n ...: pass \r\n ...: \r\n```\n", "before_files": [{"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.12.0\"\n", "path": "httpx/__version__.py"}]} | 1,549 | 96 |
gh_patches_debug_6543 | rasdani/github-patches | git_diff | saleor__saleor-10987 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to filter customers with 0 orders
### **Steps to reproduce the problem:**
```graphql
query Customers{
customers(filter: {numberOfOrders: {lte: 0, gte: 0}}, first: 10){
edges{
node{
id
email
orders{
totalCount
}
}
}
totalCount
}
}
```
### **Current result:**
Backend returns all customers instead of those with 0 orders
### **Expected result:**
Return all customers with 0 orders
### **Screenshots:**
### **System information:**
### **Environment:**
master.staging core v3.8.0-a
### **Additional info/links:**
https://master.staging.saleor.cloud/dashboard/customers/?asc=true&sort=name&numberOfOrdersFrom=0&numberOfOrdersTo=0
</issue>
<code>
[start of saleor/graphql/utils/filters.py]
1 from django.utils import timezone
2
3 from ..core.enums import ReportingPeriod
4
5
6 def reporting_period_to_date(period):
7 now = timezone.now()
8 if period == ReportingPeriod.TODAY:
9 start_date = now.replace(hour=0, minute=0, second=0, microsecond=0)
10 elif period == ReportingPeriod.THIS_MONTH:
11 start_date = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
12 else:
13 raise ValueError("Unknown period: %s" % period)
14 return start_date
15
16
17 def filter_by_period(queryset, period, field_name):
18 start_date = reporting_period_to_date(period)
19 return queryset.filter(**{"%s__gte" % field_name: start_date})
20
21
22 def filter_range_field(qs, field, value):
23 gte, lte = value.get("gte"), value.get("lte")
24 if gte:
25 lookup = {f"{field}__gte": gte}
26 qs = qs.filter(**lookup)
27 if lte:
28 lookup = {f"{field}__lte": lte}
29 qs = qs.filter(**lookup)
30 return qs
31
32
33 def filter_by_id(object_type):
34 from . import resolve_global_ids_to_primary_keys
35
36 def inner(qs, _, value):
37 if not value:
38 return qs
39 _, obj_pks = resolve_global_ids_to_primary_keys(value, object_type)
40 return qs.filter(id__in=obj_pks)
41
42 return inner
43
[end of saleor/graphql/utils/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/graphql/utils/filters.py b/saleor/graphql/utils/filters.py
--- a/saleor/graphql/utils/filters.py
+++ b/saleor/graphql/utils/filters.py
@@ -21,10 +21,10 @@
def filter_range_field(qs, field, value):
gte, lte = value.get("gte"), value.get("lte")
- if gte:
+ if gte is not None:
lookup = {f"{field}__gte": gte}
qs = qs.filter(**lookup)
- if lte:
+ if lte is not None:
lookup = {f"{field}__lte": lte}
qs = qs.filter(**lookup)
return qs
| {"golden_diff": "diff --git a/saleor/graphql/utils/filters.py b/saleor/graphql/utils/filters.py\n--- a/saleor/graphql/utils/filters.py\n+++ b/saleor/graphql/utils/filters.py\n@@ -21,10 +21,10 @@\n \n def filter_range_field(qs, field, value):\n gte, lte = value.get(\"gte\"), value.get(\"lte\")\n- if gte:\n+ if gte is not None:\n lookup = {f\"{field}__gte\": gte}\n qs = qs.filter(**lookup)\n- if lte:\n+ if lte is not None:\n lookup = {f\"{field}__lte\": lte}\n qs = qs.filter(**lookup)\n return qs\n", "issue": "Unable to filter customers with 0 orders\n### **Steps to reproduce the problem:**\n```graphql\nquery Customers{\n customers(filter: {numberOfOrders: {lte: 0, gte: 0}}, first: 10){\n edges{\n node{\n id\n email\n orders{\n totalCount\n }\n }\n }\n totalCount\n }\n}\n```\n\n### **Current result:**\nBackend returns all customers instead of those with 0 orders\n\n### **Expected result:**\nReturn all customers with 0 orders\n\n### **Screenshots:**\n\n### **System information:**\n\n### **Environment:**\nmaster.staging core v3.8.0-a\n\n### **Additional info/links:**\nhttps://master.staging.saleor.cloud/dashboard/customers/?asc=true&sort=name&numberOfOrdersFrom=0&numberOfOrdersTo=0\n", "before_files": [{"content": "from django.utils import timezone\n\nfrom ..core.enums import ReportingPeriod\n\n\ndef reporting_period_to_date(period):\n now = timezone.now()\n if period == ReportingPeriod.TODAY:\n start_date = now.replace(hour=0, minute=0, second=0, microsecond=0)\n elif period == ReportingPeriod.THIS_MONTH:\n start_date = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)\n else:\n raise ValueError(\"Unknown period: %s\" % period)\n return start_date\n\n\ndef filter_by_period(queryset, period, field_name):\n start_date = reporting_period_to_date(period)\n return queryset.filter(**{\"%s__gte\" % field_name: start_date})\n\n\ndef filter_range_field(qs, field, value):\n gte, lte = value.get(\"gte\"), value.get(\"lte\")\n if gte:\n lookup = {f\"{field}__gte\": gte}\n qs = qs.filter(**lookup)\n if lte:\n lookup = {f\"{field}__lte\": lte}\n qs = qs.filter(**lookup)\n return qs\n\n\ndef filter_by_id(object_type):\n from . import resolve_global_ids_to_primary_keys\n\n def inner(qs, _, value):\n if not value:\n return qs\n _, obj_pks = resolve_global_ids_to_primary_keys(value, object_type)\n return qs.filter(id__in=obj_pks)\n\n return inner\n", "path": "saleor/graphql/utils/filters.py"}]} | 1,121 | 165 |
gh_patches_debug_20573 | rasdani/github-patches | git_diff | python__mypy-14737 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stub suggestions for non-typeshed stubs
Currently mypy only suggests installing stub packages if they're from typeshed. This restriction is important, because package installation can result in arbitrary code execution; potentially automatically when using `--install-types`.
However, we can loosen this a little bit. A concrete suggestion is to allow suggestions of stub packages that are under the same Github org as the actual package or maintained by the actual package maintainer. In particular, we've had a couple requests for lxml-stubs and pandas-stubs.
cc @JukkaL
</issue>
<code>
[start of mypy/stubinfo.py]
1 from __future__ import annotations
2
3
4 def is_legacy_bundled_package(prefix: str) -> bool:
5 return prefix in legacy_bundled_packages
6
7
8 def approved_stub_package_exists(prefix: str) -> bool:
9 return is_legacy_bundled_package(prefix) or prefix in non_bundled_packages
10
11
12 def stub_package_name(prefix: str) -> str:
13 return legacy_bundled_packages.get(prefix) or non_bundled_packages[prefix]
14
15
16 # Stubs for these third-party packages used to be shipped with mypy.
17 #
18 # Map package name to PyPI stub distribution name.
19 #
20 # Package name can have one or two components ('a' or 'a.b').
21 legacy_bundled_packages = {
22 "aiofiles": "types-aiofiles",
23 "backports": "types-backports",
24 "backports_abc": "types-backports_abc",
25 "bleach": "types-bleach",
26 "boto": "types-boto",
27 "cachetools": "types-cachetools",
28 "click_spinner": "types-click-spinner",
29 "contextvars": "types-contextvars",
30 "croniter": "types-croniter",
31 "dataclasses": "types-dataclasses",
32 "dateparser": "types-dateparser",
33 "datetimerange": "types-DateTimeRange",
34 "dateutil": "types-python-dateutil",
35 "decorator": "types-decorator",
36 "deprecated": "types-Deprecated",
37 "docutils": "types-docutils",
38 "first": "types-first",
39 "geoip2": "types-geoip2",
40 "gflags": "types-python-gflags",
41 "google.protobuf": "types-protobuf",
42 "markdown": "types-Markdown",
43 "maxminddb": "types-maxminddb",
44 "mock": "types-mock",
45 "OpenSSL": "types-pyOpenSSL",
46 "paramiko": "types-paramiko",
47 "pkg_resources": "types-setuptools",
48 "polib": "types-polib",
49 "pycurl": "types-pycurl",
50 "pymysql": "types-PyMySQL",
51 "pyrfc3339": "types-pyRFC3339",
52 "python2": "types-six",
53 "pytz": "types-pytz",
54 "pyVmomi": "types-pyvmomi",
55 "redis": "types-redis",
56 "requests": "types-requests",
57 "retry": "types-retry",
58 "simplejson": "types-simplejson",
59 "singledispatch": "types-singledispatch",
60 "six": "types-six",
61 "slugify": "types-python-slugify",
62 "tabulate": "types-tabulate",
63 "toml": "types-toml",
64 "typed_ast": "types-typed-ast",
65 "tzlocal": "types-tzlocal",
66 "ujson": "types-ujson",
67 "waitress": "types-waitress",
68 "yaml": "types-PyYAML",
69 }
70
71 # Map package name to PyPI stub distribution name from typeshed.
72 # Stubs for these packages were never bundled with mypy. Don't
73 # include packages that have a release that includes PEP 561 type
74 # information.
75 #
76 # Package name can have one or two components ('a' or 'a.b').
77 #
78 # Note that these packages are omitted for now:
79 # sqlalchemy: It's unclear which stub package to suggest. There's also
80 # a mypy plugin available.
81 # pika: typeshed's stubs are on PyPI as types-pika-ts.
82 # types-pika already exists on PyPI, and is more complete in many ways,
83 # but is a non-typeshed stubs package.
84 non_bundled_packages = {
85 "MySQLdb": "types-mysqlclient",
86 "PIL": "types-Pillow",
87 "PyInstaller": "types-pyinstaller",
88 "Xlib": "types-python-xlib",
89 "annoy": "types-annoy",
90 "appdirs": "types-appdirs",
91 "aws_xray_sdk": "types-aws-xray-sdk",
92 "babel": "types-babel",
93 "backports.ssl_match_hostname": "types-backports.ssl_match_hostname",
94 "braintree": "types-braintree",
95 "bs4": "types-beautifulsoup4",
96 "bugbear": "types-flake8-bugbear",
97 "caldav": "types-caldav",
98 "cffi": "types-cffi",
99 "chevron": "types-chevron",
100 "colorama": "types-colorama",
101 "commonmark": "types-commonmark",
102 "consolemenu": "types-console-menu",
103 "crontab": "types-python-crontab",
104 "d3dshot": "types-D3DShot",
105 "dj_database_url": "types-dj-database-url",
106 "dockerfile_parse": "types-dockerfile-parse",
107 "docopt": "types-docopt",
108 "editdistance": "types-editdistance",
109 "entrypoints": "types-entrypoints",
110 "farmhash": "types-pyfarmhash",
111 "flake8_2020": "types-flake8-2020",
112 "flake8_builtins": "types-flake8-builtins",
113 "flake8_docstrings": "types-flake8-docstrings",
114 "flake8_plugin_utils": "types-flake8-plugin-utils",
115 "flake8_rst_docstrings": "types-flake8-rst-docstrings",
116 "flake8_simplify": "types-flake8-simplify",
117 "flake8_typing_imports": "types-flake8-typing-imports",
118 "flask_cors": "types-Flask-Cors",
119 "flask_migrate": "types-Flask-Migrate",
120 "flask_sqlalchemy": "types-Flask-SQLAlchemy",
121 "fpdf": "types-fpdf2",
122 "gdb": "types-gdb",
123 "google.cloud": "types-google-cloud-ndb",
124 "hdbcli": "types-hdbcli",
125 "html5lib": "types-html5lib",
126 "httplib2": "types-httplib2",
127 "humanfriendly": "types-humanfriendly",
128 "invoke": "types-invoke",
129 "jack": "types-JACK-Client",
130 "jmespath": "types-jmespath",
131 "jose": "types-python-jose",
132 "jsonschema": "types-jsonschema",
133 "keyboard": "types-keyboard",
134 "ldap3": "types-ldap3",
135 "nmap": "types-python-nmap",
136 "oauthlib": "types-oauthlib",
137 "openpyxl": "types-openpyxl",
138 "opentracing": "types-opentracing",
139 "paho.mqtt": "types-paho-mqtt",
140 "parsimonious": "types-parsimonious",
141 "passlib": "types-passlib",
142 "passpy": "types-passpy",
143 "peewee": "types-peewee",
144 "pep8ext_naming": "types-pep8-naming",
145 "playsound": "types-playsound",
146 "psutil": "types-psutil",
147 "psycopg2": "types-psycopg2",
148 "pyaudio": "types-pyaudio",
149 "pyautogui": "types-PyAutoGUI",
150 "pycocotools": "types-pycocotools",
151 "pyflakes": "types-pyflakes",
152 "pygments": "types-Pygments",
153 "pyi_splash": "types-pyinstaller",
154 "pynput": "types-pynput",
155 "pythoncom": "types-pywin32",
156 "pythonwin": "types-pywin32",
157 "pyscreeze": "types-PyScreeze",
158 "pysftp": "types-pysftp",
159 "pytest_lazyfixture": "types-pytest-lazy-fixture",
160 "pywintypes": "types-pywin32",
161 "regex": "types-regex",
162 "send2trash": "types-Send2Trash",
163 "slumber": "types-slumber",
164 "stdlib_list": "types-stdlib-list",
165 "stripe": "types-stripe",
166 "toposort": "types-toposort",
167 "tqdm": "types-tqdm",
168 "tree_sitter": "types-tree-sitter",
169 "tree_sitter_languages": "types-tree-sitter-languages",
170 "ttkthemes": "types-ttkthemes",
171 "urllib3": "types-urllib3",
172 "vobject": "types-vobject",
173 "whatthepatch": "types-whatthepatch",
174 "win32": "types-pywin32",
175 "win32api": "types-pywin32",
176 "win32con": "types-pywin32",
177 "win32com": "types-pywin32",
178 "win32comext": "types-pywin32",
179 "win32gui": "types-pywin32",
180 "xmltodict": "types-xmltodict",
181 "xxhash": "types-xxhash",
182 "zxcvbn": "types-zxcvbn",
183 }
184
[end of mypy/stubinfo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mypy/stubinfo.py b/mypy/stubinfo.py
--- a/mypy/stubinfo.py
+++ b/mypy/stubinfo.py
@@ -76,8 +76,6 @@
# Package name can have one or two components ('a' or 'a.b').
#
# Note that these packages are omitted for now:
-# sqlalchemy: It's unclear which stub package to suggest. There's also
-# a mypy plugin available.
# pika: typeshed's stubs are on PyPI as types-pika-ts.
# types-pika already exists on PyPI, and is more complete in many ways,
# but is a non-typeshed stubs package.
@@ -180,4 +178,9 @@
"xmltodict": "types-xmltodict",
"xxhash": "types-xxhash",
"zxcvbn": "types-zxcvbn",
+ # Stub packages that are not from typeshed
+ # Since these can be installed automatically via --install-types, we have a high trust bar
+ # for additions here
+ "pandas": "pandas-stubs", # https://github.com/pandas-dev/pandas-stubs
+ "lxml": "lxml-stubs", # https://github.com/lxml/lxml-stubs
}
| {"golden_diff": "diff --git a/mypy/stubinfo.py b/mypy/stubinfo.py\n--- a/mypy/stubinfo.py\n+++ b/mypy/stubinfo.py\n@@ -76,8 +76,6 @@\n # Package name can have one or two components ('a' or 'a.b').\n #\n # Note that these packages are omitted for now:\n-# sqlalchemy: It's unclear which stub package to suggest. There's also\n-# a mypy plugin available.\n # pika: typeshed's stubs are on PyPI as types-pika-ts.\n # types-pika already exists on PyPI, and is more complete in many ways,\n # but is a non-typeshed stubs package.\n@@ -180,4 +178,9 @@\n \"xmltodict\": \"types-xmltodict\",\n \"xxhash\": \"types-xxhash\",\n \"zxcvbn\": \"types-zxcvbn\",\n+ # Stub packages that are not from typeshed\n+ # Since these can be installed automatically via --install-types, we have a high trust bar\n+ # for additions here\n+ \"pandas\": \"pandas-stubs\", # https://github.com/pandas-dev/pandas-stubs\n+ \"lxml\": \"lxml-stubs\", # https://github.com/lxml/lxml-stubs\n }\n", "issue": "Stub suggestions for non-typeshed stubs\nCurrently mypy only suggests installing stub packages if they're from typeshed. This restriction is important, because package installation can result in arbitrary code execution; potentially automatically when using `--install-types`.\r\n\r\nHowever, we can loosen this a little bit. A concrete suggestion is to allow suggestions of stub packages that are under the same Github org as the actual package or maintained by the actual package maintainer. In particular, we've had a couple requests for lxml-stubs and pandas-stubs.\r\n\r\ncc @JukkaL \n", "before_files": [{"content": "from __future__ import annotations\n\n\ndef is_legacy_bundled_package(prefix: str) -> bool:\n return prefix in legacy_bundled_packages\n\n\ndef approved_stub_package_exists(prefix: str) -> bool:\n return is_legacy_bundled_package(prefix) or prefix in non_bundled_packages\n\n\ndef stub_package_name(prefix: str) -> str:\n return legacy_bundled_packages.get(prefix) or non_bundled_packages[prefix]\n\n\n# Stubs for these third-party packages used to be shipped with mypy.\n#\n# Map package name to PyPI stub distribution name.\n#\n# Package name can have one or two components ('a' or 'a.b').\nlegacy_bundled_packages = {\n \"aiofiles\": \"types-aiofiles\",\n \"backports\": \"types-backports\",\n \"backports_abc\": \"types-backports_abc\",\n \"bleach\": \"types-bleach\",\n \"boto\": \"types-boto\",\n \"cachetools\": \"types-cachetools\",\n \"click_spinner\": \"types-click-spinner\",\n \"contextvars\": \"types-contextvars\",\n \"croniter\": \"types-croniter\",\n \"dataclasses\": \"types-dataclasses\",\n \"dateparser\": \"types-dateparser\",\n \"datetimerange\": \"types-DateTimeRange\",\n \"dateutil\": \"types-python-dateutil\",\n \"decorator\": \"types-decorator\",\n \"deprecated\": \"types-Deprecated\",\n \"docutils\": \"types-docutils\",\n \"first\": \"types-first\",\n \"geoip2\": \"types-geoip2\",\n \"gflags\": \"types-python-gflags\",\n \"google.protobuf\": \"types-protobuf\",\n \"markdown\": \"types-Markdown\",\n \"maxminddb\": \"types-maxminddb\",\n \"mock\": \"types-mock\",\n \"OpenSSL\": \"types-pyOpenSSL\",\n \"paramiko\": \"types-paramiko\",\n \"pkg_resources\": \"types-setuptools\",\n \"polib\": \"types-polib\",\n \"pycurl\": \"types-pycurl\",\n \"pymysql\": \"types-PyMySQL\",\n \"pyrfc3339\": \"types-pyRFC3339\",\n \"python2\": \"types-six\",\n \"pytz\": \"types-pytz\",\n \"pyVmomi\": \"types-pyvmomi\",\n \"redis\": \"types-redis\",\n \"requests\": \"types-requests\",\n \"retry\": \"types-retry\",\n \"simplejson\": \"types-simplejson\",\n \"singledispatch\": \"types-singledispatch\",\n \"six\": \"types-six\",\n \"slugify\": \"types-python-slugify\",\n \"tabulate\": \"types-tabulate\",\n \"toml\": \"types-toml\",\n \"typed_ast\": \"types-typed-ast\",\n \"tzlocal\": \"types-tzlocal\",\n \"ujson\": \"types-ujson\",\n \"waitress\": \"types-waitress\",\n \"yaml\": \"types-PyYAML\",\n}\n\n# Map package name to PyPI stub distribution name from typeshed.\n# Stubs for these packages were never bundled with mypy. Don't\n# include packages that have a release that includes PEP 561 type\n# information.\n#\n# Package name can have one or two components ('a' or 'a.b').\n#\n# Note that these packages are omitted for now:\n# sqlalchemy: It's unclear which stub package to suggest. There's also\n# a mypy plugin available.\n# pika: typeshed's stubs are on PyPI as types-pika-ts.\n# types-pika already exists on PyPI, and is more complete in many ways,\n# but is a non-typeshed stubs package.\nnon_bundled_packages = {\n \"MySQLdb\": \"types-mysqlclient\",\n \"PIL\": \"types-Pillow\",\n \"PyInstaller\": \"types-pyinstaller\",\n \"Xlib\": \"types-python-xlib\",\n \"annoy\": \"types-annoy\",\n \"appdirs\": \"types-appdirs\",\n \"aws_xray_sdk\": \"types-aws-xray-sdk\",\n \"babel\": \"types-babel\",\n \"backports.ssl_match_hostname\": \"types-backports.ssl_match_hostname\",\n \"braintree\": \"types-braintree\",\n \"bs4\": \"types-beautifulsoup4\",\n \"bugbear\": \"types-flake8-bugbear\",\n \"caldav\": \"types-caldav\",\n \"cffi\": \"types-cffi\",\n \"chevron\": \"types-chevron\",\n \"colorama\": \"types-colorama\",\n \"commonmark\": \"types-commonmark\",\n \"consolemenu\": \"types-console-menu\",\n \"crontab\": \"types-python-crontab\",\n \"d3dshot\": \"types-D3DShot\",\n \"dj_database_url\": \"types-dj-database-url\",\n \"dockerfile_parse\": \"types-dockerfile-parse\",\n \"docopt\": \"types-docopt\",\n \"editdistance\": \"types-editdistance\",\n \"entrypoints\": \"types-entrypoints\",\n \"farmhash\": \"types-pyfarmhash\",\n \"flake8_2020\": \"types-flake8-2020\",\n \"flake8_builtins\": \"types-flake8-builtins\",\n \"flake8_docstrings\": \"types-flake8-docstrings\",\n \"flake8_plugin_utils\": \"types-flake8-plugin-utils\",\n \"flake8_rst_docstrings\": \"types-flake8-rst-docstrings\",\n \"flake8_simplify\": \"types-flake8-simplify\",\n \"flake8_typing_imports\": \"types-flake8-typing-imports\",\n \"flask_cors\": \"types-Flask-Cors\",\n \"flask_migrate\": \"types-Flask-Migrate\",\n \"flask_sqlalchemy\": \"types-Flask-SQLAlchemy\",\n \"fpdf\": \"types-fpdf2\",\n \"gdb\": \"types-gdb\",\n \"google.cloud\": \"types-google-cloud-ndb\",\n \"hdbcli\": \"types-hdbcli\",\n \"html5lib\": \"types-html5lib\",\n \"httplib2\": \"types-httplib2\",\n \"humanfriendly\": \"types-humanfriendly\",\n \"invoke\": \"types-invoke\",\n \"jack\": \"types-JACK-Client\",\n \"jmespath\": \"types-jmespath\",\n \"jose\": \"types-python-jose\",\n \"jsonschema\": \"types-jsonschema\",\n \"keyboard\": \"types-keyboard\",\n \"ldap3\": \"types-ldap3\",\n \"nmap\": \"types-python-nmap\",\n \"oauthlib\": \"types-oauthlib\",\n \"openpyxl\": \"types-openpyxl\",\n \"opentracing\": \"types-opentracing\",\n \"paho.mqtt\": \"types-paho-mqtt\",\n \"parsimonious\": \"types-parsimonious\",\n \"passlib\": \"types-passlib\",\n \"passpy\": \"types-passpy\",\n \"peewee\": \"types-peewee\",\n \"pep8ext_naming\": \"types-pep8-naming\",\n \"playsound\": \"types-playsound\",\n \"psutil\": \"types-psutil\",\n \"psycopg2\": \"types-psycopg2\",\n \"pyaudio\": \"types-pyaudio\",\n \"pyautogui\": \"types-PyAutoGUI\",\n \"pycocotools\": \"types-pycocotools\",\n \"pyflakes\": \"types-pyflakes\",\n \"pygments\": \"types-Pygments\",\n \"pyi_splash\": \"types-pyinstaller\",\n \"pynput\": \"types-pynput\",\n \"pythoncom\": \"types-pywin32\",\n \"pythonwin\": \"types-pywin32\",\n \"pyscreeze\": \"types-PyScreeze\",\n \"pysftp\": \"types-pysftp\",\n \"pytest_lazyfixture\": \"types-pytest-lazy-fixture\",\n \"pywintypes\": \"types-pywin32\",\n \"regex\": \"types-regex\",\n \"send2trash\": \"types-Send2Trash\",\n \"slumber\": \"types-slumber\",\n \"stdlib_list\": \"types-stdlib-list\",\n \"stripe\": \"types-stripe\",\n \"toposort\": \"types-toposort\",\n \"tqdm\": \"types-tqdm\",\n \"tree_sitter\": \"types-tree-sitter\",\n \"tree_sitter_languages\": \"types-tree-sitter-languages\",\n \"ttkthemes\": \"types-ttkthemes\",\n \"urllib3\": \"types-urllib3\",\n \"vobject\": \"types-vobject\",\n \"whatthepatch\": \"types-whatthepatch\",\n \"win32\": \"types-pywin32\",\n \"win32api\": \"types-pywin32\",\n \"win32con\": \"types-pywin32\",\n \"win32com\": \"types-pywin32\",\n \"win32comext\": \"types-pywin32\",\n \"win32gui\": \"types-pywin32\",\n \"xmltodict\": \"types-xmltodict\",\n \"xxhash\": \"types-xxhash\",\n \"zxcvbn\": \"types-zxcvbn\",\n}\n", "path": "mypy/stubinfo.py"}]} | 3,155 | 299 |
gh_patches_debug_36303 | rasdani/github-patches | git_diff | pydantic__pydantic-2055 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mypy "error: <nothing> not callable" when passing config to validate_arguments
# Bug
Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`:
```
pydantic version: 1.7
pydantic compiled: True
install path: /home/brian/repos/medigator/venv/lib/python3.6/site-packages/pydantic
python version: 3.6.9 (default, Oct 8 2020, 12:12:24) [GCC 8.4.0]
platform: Linux-4.15.0-121-generic-x86_64-with-Ubuntu-18.04-bionic
optional deps. installed: ['typing-extensions', 'email-validator']
```
The example code from the docs demonstrating the use of passing a config to `validate_arguments` causes an error when running mypy (version 0.790):
```py
from pydantic import ValidationError, validate_arguments
class Foobar:
def __init__(self, v: str):
self.v = v
def __add__(self, other: 'Foobar') -> str:
return f'{self} + {other}'
def __str__(self) -> str:
return f'Foobar({self.v})'
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def add_foobars(a: Foobar, b: Foobar):
return a + b
```
```
test.py:15: error: <nothing> not callable
```
If I remove the `config` parameter but still call `validate_arguments` it still occurs.
If I remove the call and just decorate with `@validate_arguments` then mypy is happy.
I assume it has something to do with the type annotation of `validate_arguments`.
mypy "error: <nothing> not callable" when passing config to validate_arguments
# Bug
Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`:
```
pydantic version: 1.7
pydantic compiled: True
install path: /home/brian/repos/medigator/venv/lib/python3.6/site-packages/pydantic
python version: 3.6.9 (default, Oct 8 2020, 12:12:24) [GCC 8.4.0]
platform: Linux-4.15.0-121-generic-x86_64-with-Ubuntu-18.04-bionic
optional deps. installed: ['typing-extensions', 'email-validator']
```
The example code from the docs demonstrating the use of passing a config to `validate_arguments` causes an error when running mypy (version 0.790):
```py
from pydantic import ValidationError, validate_arguments
class Foobar:
def __init__(self, v: str):
self.v = v
def __add__(self, other: 'Foobar') -> str:
return f'{self} + {other}'
def __str__(self) -> str:
return f'Foobar({self.v})'
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def add_foobars(a: Foobar, b: Foobar):
return a + b
```
```
test.py:15: error: <nothing> not callable
```
If I remove the `config` parameter but still call `validate_arguments` it still occurs.
If I remove the call and just decorate with `@validate_arguments` then mypy is happy.
I assume it has something to do with the type annotation of `validate_arguments`.
</issue>
<code>
[start of pydantic/decorator.py]
1 from functools import wraps
2 from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Tuple, Type, TypeVar, Union, cast, get_type_hints
3
4 from . import validator
5 from .errors import ConfigError
6 from .main import BaseModel, Extra, create_model
7 from .utils import to_camel
8
9 __all__ = ('validate_arguments',)
10
11 if TYPE_CHECKING:
12 from .typing import AnyCallable
13
14 Callable = TypeVar('Callable', bound=AnyCallable)
15 ConfigType = Union[None, Type[Any], Dict[str, Any]]
16
17
18 def validate_arguments(func: 'Callable' = None, *, config: 'ConfigType' = None) -> 'Callable':
19 """
20 Decorator to validate the arguments passed to a function.
21 """
22
23 def validate(_func: 'Callable') -> 'Callable':
24 vd = ValidatedFunction(_func, config)
25
26 @wraps(_func)
27 def wrapper_function(*args: Any, **kwargs: Any) -> Any:
28 return vd.call(*args, **kwargs)
29
30 wrapper_function.vd = vd # type: ignore
31 wrapper_function.raw_function = vd.raw_function # type: ignore
32 wrapper_function.model = vd.model # type: ignore
33 return cast('Callable', wrapper_function)
34
35 if func:
36 return validate(func)
37 else:
38 return cast('Callable', validate)
39
40
41 ALT_V_ARGS = 'v__args'
42 ALT_V_KWARGS = 'v__kwargs'
43 V_POSITIONAL_ONLY_NAME = 'v__positional_only'
44
45
46 class ValidatedFunction:
47 def __init__(self, function: 'Callable', config: 'ConfigType'): # noqa C901
48 from inspect import Parameter, signature
49
50 parameters: Mapping[str, Parameter] = signature(function).parameters
51
52 if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME}:
53 raise ConfigError(
54 f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}" and "{V_POSITIONAL_ONLY_NAME}" are not permitted as argument '
55 f'names when using the "{validate_arguments.__name__}" decorator'
56 )
57
58 self.raw_function = function
59 self.arg_mapping: Dict[int, str] = {}
60 self.positional_only_args = set()
61 self.v_args_name = 'args'
62 self.v_kwargs_name = 'kwargs'
63
64 type_hints = get_type_hints(function)
65 takes_args = False
66 takes_kwargs = False
67 fields: Dict[str, Tuple[Any, Any]] = {}
68 for i, (name, p) in enumerate(parameters.items()):
69 if p.annotation == p.empty:
70 annotation = Any
71 else:
72 annotation = type_hints[name]
73
74 default = ... if p.default == p.empty else p.default
75 if p.kind == Parameter.POSITIONAL_ONLY:
76 self.arg_mapping[i] = name
77 fields[name] = annotation, default
78 fields[V_POSITIONAL_ONLY_NAME] = List[str], None
79 self.positional_only_args.add(name)
80 elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:
81 self.arg_mapping[i] = name
82 fields[name] = annotation, default
83 elif p.kind == Parameter.KEYWORD_ONLY:
84 fields[name] = annotation, default
85 elif p.kind == Parameter.VAR_POSITIONAL:
86 self.v_args_name = name
87 fields[name] = Tuple[annotation, ...], None
88 takes_args = True
89 else:
90 assert p.kind == Parameter.VAR_KEYWORD, p.kind
91 self.v_kwargs_name = name
92 fields[name] = Dict[str, annotation], None # type: ignore
93 takes_kwargs = True
94
95 # these checks avoid a clash between "args" and a field with that name
96 if not takes_args and self.v_args_name in fields:
97 self.v_args_name = ALT_V_ARGS
98
99 # same with "kwargs"
100 if not takes_kwargs and self.v_kwargs_name in fields:
101 self.v_kwargs_name = ALT_V_KWARGS
102
103 if not takes_args:
104 # we add the field so validation below can raise the correct exception
105 fields[self.v_args_name] = List[Any], None
106
107 if not takes_kwargs:
108 # same with kwargs
109 fields[self.v_kwargs_name] = Dict[Any, Any], None
110
111 self.create_model(fields, takes_args, takes_kwargs, config)
112
113 def call(self, *args: Any, **kwargs: Any) -> Any:
114 values = self.build_values(args, kwargs)
115 m = self.model(**values)
116 return self.execute(m)
117
118 def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:
119 values: Dict[str, Any] = {}
120 if args:
121 arg_iter = enumerate(args)
122 while True:
123 try:
124 i, a = next(arg_iter)
125 except StopIteration:
126 break
127 arg_name = self.arg_mapping.get(i)
128 if arg_name is not None:
129 values[arg_name] = a
130 else:
131 values[self.v_args_name] = [a] + [a for _, a in arg_iter]
132 break
133
134 var_kwargs = {}
135 wrong_positional_args = []
136 for k, v in kwargs.items():
137 if k in self.model.__fields__:
138 if k in self.positional_only_args:
139 wrong_positional_args.append(k)
140 values[k] = v
141 else:
142 var_kwargs[k] = v
143
144 if var_kwargs:
145 values[self.v_kwargs_name] = var_kwargs
146 if wrong_positional_args:
147 values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args
148 return values
149
150 def execute(self, m: BaseModel) -> Any:
151 d = {k: v for k, v in m._iter() if k in m.__fields_set__}
152 kwargs = d.pop(self.v_kwargs_name, None)
153 if kwargs:
154 d.update(kwargs)
155
156 if self.v_args_name in d:
157 args_: List[Any] = []
158 in_kwargs = False
159 kwargs = {}
160 for name, value in d.items():
161 if in_kwargs:
162 kwargs[name] = value
163 elif name == self.v_args_name:
164 args_ += value
165 in_kwargs = True
166 else:
167 args_.append(value)
168 return self.raw_function(*args_, **kwargs)
169 elif self.positional_only_args:
170 args_ = []
171 kwargs = {}
172 for name, value in d.items():
173 if name in self.positional_only_args:
174 args_.append(value)
175 else:
176 kwargs[name] = value
177 return self.raw_function(*args_, **kwargs)
178 else:
179 return self.raw_function(**d)
180
181 def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:
182 pos_args = len(self.arg_mapping)
183
184 class CustomConfig:
185 pass
186
187 if not TYPE_CHECKING: # pragma: no branch
188 if isinstance(config, dict):
189 CustomConfig = type('Config', (), config) # noqa: F811
190 elif config is not None:
191 CustomConfig = config # noqa: F811
192
193 if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):
194 raise ConfigError(
195 'Setting the "fields" and "alias_generator" property on custom Config for '
196 '@validate_arguments is not yet supported, please remove.'
197 )
198
199 class DecoratorBaseModel(BaseModel):
200 @validator(self.v_args_name, check_fields=False, allow_reuse=True)
201 def check_args(cls, v: List[Any]) -> List[Any]:
202 if takes_args:
203 return v
204
205 raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')
206
207 @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)
208 def check_kwargs(cls, v: Dict[str, Any]) -> Dict[str, Any]:
209 if takes_kwargs:
210 return v
211
212 plural = '' if len(v) == 1 else 's'
213 keys = ', '.join(map(repr, v.keys()))
214 raise TypeError(f'unexpected keyword argument{plural}: {keys}')
215
216 @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)
217 def check_positional_only(cls, v: List[str]) -> None:
218 plural = '' if len(v) == 1 else 's'
219 keys = ', '.join(map(repr, v))
220 raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')
221
222 class Config(CustomConfig):
223 extra = Extra.forbid
224
225 self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)
226
[end of pydantic/decorator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydantic/decorator.py b/pydantic/decorator.py
--- a/pydantic/decorator.py
+++ b/pydantic/decorator.py
@@ -1,5 +1,19 @@
from functools import wraps
-from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Tuple, Type, TypeVar, Union, cast, get_type_hints
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Dict,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ get_type_hints,
+ overload,
+)
from . import validator
from .errors import ConfigError
@@ -11,16 +25,26 @@
if TYPE_CHECKING:
from .typing import AnyCallable
- Callable = TypeVar('Callable', bound=AnyCallable)
+ AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)
ConfigType = Union[None, Type[Any], Dict[str, Any]]
-def validate_arguments(func: 'Callable' = None, *, config: 'ConfigType' = None) -> 'Callable':
+@overload
+def validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:
+ ...
+
+
+@overload
+def validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':
+ ...
+
+
+def validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:
"""
Decorator to validate the arguments passed to a function.
"""
- def validate(_func: 'Callable') -> 'Callable':
+ def validate(_func: 'AnyCallable') -> 'AnyCallable':
vd = ValidatedFunction(_func, config)
@wraps(_func)
@@ -30,12 +54,12 @@
wrapper_function.vd = vd # type: ignore
wrapper_function.raw_function = vd.raw_function # type: ignore
wrapper_function.model = vd.model # type: ignore
- return cast('Callable', wrapper_function)
+ return wrapper_function
if func:
return validate(func)
else:
- return cast('Callable', validate)
+ return validate
ALT_V_ARGS = 'v__args'
@@ -44,7 +68,7 @@
class ValidatedFunction:
- def __init__(self, function: 'Callable', config: 'ConfigType'): # noqa C901
+ def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901
from inspect import Parameter, signature
parameters: Mapping[str, Parameter] = signature(function).parameters
| {"golden_diff": "diff --git a/pydantic/decorator.py b/pydantic/decorator.py\n--- a/pydantic/decorator.py\n+++ b/pydantic/decorator.py\n@@ -1,5 +1,19 @@\n from functools import wraps\n-from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Tuple, Type, TypeVar, Union, cast, get_type_hints\n+from typing import (\n+ TYPE_CHECKING,\n+ Any,\n+ Callable,\n+ Dict,\n+ List,\n+ Mapping,\n+ Optional,\n+ Tuple,\n+ Type,\n+ TypeVar,\n+ Union,\n+ get_type_hints,\n+ overload,\n+)\n \n from . import validator\n from .errors import ConfigError\n@@ -11,16 +25,26 @@\n if TYPE_CHECKING:\n from .typing import AnyCallable\n \n- Callable = TypeVar('Callable', bound=AnyCallable)\n+ AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)\n ConfigType = Union[None, Type[Any], Dict[str, Any]]\n \n \n-def validate_arguments(func: 'Callable' = None, *, config: 'ConfigType' = None) -> 'Callable':\n+@overload\n+def validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:\n+ ...\n+\n+\n+@overload\n+def validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':\n+ ...\n+\n+\n+def validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:\n \"\"\"\n Decorator to validate the arguments passed to a function.\n \"\"\"\n \n- def validate(_func: 'Callable') -> 'Callable':\n+ def validate(_func: 'AnyCallable') -> 'AnyCallable':\n vd = ValidatedFunction(_func, config)\n \n @wraps(_func)\n@@ -30,12 +54,12 @@\n wrapper_function.vd = vd # type: ignore\n wrapper_function.raw_function = vd.raw_function # type: ignore\n wrapper_function.model = vd.model # type: ignore\n- return cast('Callable', wrapper_function)\n+ return wrapper_function\n \n if func:\n return validate(func)\n else:\n- return cast('Callable', validate)\n+ return validate\n \n \n ALT_V_ARGS = 'v__args'\n@@ -44,7 +68,7 @@\n \n \n class ValidatedFunction:\n- def __init__(self, function: 'Callable', config: 'ConfigType'): # noqa C901\n+ def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901\n from inspect import Parameter, signature\n \n parameters: Mapping[str, Parameter] = signature(function).parameters\n", "issue": "mypy \"error: <nothing> not callable\" when passing config to validate_arguments\n# Bug\r\n\r\nOutput of `python -c \"import pydantic.utils; print(pydantic.utils.version_info())\"`:\r\n```\r\n pydantic version: 1.7\r\n pydantic compiled: True\r\n install path: /home/brian/repos/medigator/venv/lib/python3.6/site-packages/pydantic\r\n python version: 3.6.9 (default, Oct 8 2020, 12:12:24) [GCC 8.4.0]\r\n platform: Linux-4.15.0-121-generic-x86_64-with-Ubuntu-18.04-bionic\r\n optional deps. installed: ['typing-extensions', 'email-validator']\r\n```\r\n\r\nThe example code from the docs demonstrating the use of passing a config to `validate_arguments` causes an error when running mypy (version 0.790):\r\n\r\n```py\r\nfrom pydantic import ValidationError, validate_arguments\r\n\r\n\r\nclass Foobar:\r\n def __init__(self, v: str):\r\n self.v = v\r\n\r\n def __add__(self, other: 'Foobar') -> str:\r\n return f'{self} + {other}'\r\n\r\n def __str__(self) -> str:\r\n return f'Foobar({self.v})'\r\n\r\n\r\n@validate_arguments(config=dict(arbitrary_types_allowed=True))\r\ndef add_foobars(a: Foobar, b: Foobar):\r\n return a + b\r\n```\r\n\r\n```\r\ntest.py:15: error: <nothing> not callable\r\n```\r\n\r\nIf I remove the `config` parameter but still call `validate_arguments` it still occurs.\r\nIf I remove the call and just decorate with `@validate_arguments` then mypy is happy.\r\n\r\nI assume it has something to do with the type annotation of `validate_arguments`.\nmypy \"error: <nothing> not callable\" when passing config to validate_arguments\n# Bug\r\n\r\nOutput of `python -c \"import pydantic.utils; print(pydantic.utils.version_info())\"`:\r\n```\r\n pydantic version: 1.7\r\n pydantic compiled: True\r\n install path: /home/brian/repos/medigator/venv/lib/python3.6/site-packages/pydantic\r\n python version: 3.6.9 (default, Oct 8 2020, 12:12:24) [GCC 8.4.0]\r\n platform: Linux-4.15.0-121-generic-x86_64-with-Ubuntu-18.04-bionic\r\n optional deps. installed: ['typing-extensions', 'email-validator']\r\n```\r\n\r\nThe example code from the docs demonstrating the use of passing a config to `validate_arguments` causes an error when running mypy (version 0.790):\r\n\r\n```py\r\nfrom pydantic import ValidationError, validate_arguments\r\n\r\n\r\nclass Foobar:\r\n def __init__(self, v: str):\r\n self.v = v\r\n\r\n def __add__(self, other: 'Foobar') -> str:\r\n return f'{self} + {other}'\r\n\r\n def __str__(self) -> str:\r\n return f'Foobar({self.v})'\r\n\r\n\r\n@validate_arguments(config=dict(arbitrary_types_allowed=True))\r\ndef add_foobars(a: Foobar, b: Foobar):\r\n return a + b\r\n```\r\n\r\n```\r\ntest.py:15: error: <nothing> not callable\r\n```\r\n\r\nIf I remove the `config` parameter but still call `validate_arguments` it still occurs.\r\nIf I remove the call and just decorate with `@validate_arguments` then mypy is happy.\r\n\r\nI assume it has something to do with the type annotation of `validate_arguments`.\n", "before_files": [{"content": "from functools import wraps\nfrom typing import TYPE_CHECKING, Any, Dict, List, Mapping, Tuple, Type, TypeVar, Union, cast, get_type_hints\n\nfrom . import validator\nfrom .errors import ConfigError\nfrom .main import BaseModel, Extra, create_model\nfrom .utils import to_camel\n\n__all__ = ('validate_arguments',)\n\nif TYPE_CHECKING:\n from .typing import AnyCallable\n\n Callable = TypeVar('Callable', bound=AnyCallable)\n ConfigType = Union[None, Type[Any], Dict[str, Any]]\n\n\ndef validate_arguments(func: 'Callable' = None, *, config: 'ConfigType' = None) -> 'Callable':\n \"\"\"\n Decorator to validate the arguments passed to a function.\n \"\"\"\n\n def validate(_func: 'Callable') -> 'Callable':\n vd = ValidatedFunction(_func, config)\n\n @wraps(_func)\n def wrapper_function(*args: Any, **kwargs: Any) -> Any:\n return vd.call(*args, **kwargs)\n\n wrapper_function.vd = vd # type: ignore\n wrapper_function.raw_function = vd.raw_function # type: ignore\n wrapper_function.model = vd.model # type: ignore\n return cast('Callable', wrapper_function)\n\n if func:\n return validate(func)\n else:\n return cast('Callable', validate)\n\n\nALT_V_ARGS = 'v__args'\nALT_V_KWARGS = 'v__kwargs'\nV_POSITIONAL_ONLY_NAME = 'v__positional_only'\n\n\nclass ValidatedFunction:\n def __init__(self, function: 'Callable', config: 'ConfigType'): # noqa C901\n from inspect import Parameter, signature\n\n parameters: Mapping[str, Parameter] = signature(function).parameters\n\n if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME}:\n raise ConfigError(\n f'\"{ALT_V_ARGS}\", \"{ALT_V_KWARGS}\" and \"{V_POSITIONAL_ONLY_NAME}\" are not permitted as argument '\n f'names when using the \"{validate_arguments.__name__}\" decorator'\n )\n\n self.raw_function = function\n self.arg_mapping: Dict[int, str] = {}\n self.positional_only_args = set()\n self.v_args_name = 'args'\n self.v_kwargs_name = 'kwargs'\n\n type_hints = get_type_hints(function)\n takes_args = False\n takes_kwargs = False\n fields: Dict[str, Tuple[Any, Any]] = {}\n for i, (name, p) in enumerate(parameters.items()):\n if p.annotation == p.empty:\n annotation = Any\n else:\n annotation = type_hints[name]\n\n default = ... if p.default == p.empty else p.default\n if p.kind == Parameter.POSITIONAL_ONLY:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n fields[V_POSITIONAL_ONLY_NAME] = List[str], None\n self.positional_only_args.add(name)\n elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n elif p.kind == Parameter.KEYWORD_ONLY:\n fields[name] = annotation, default\n elif p.kind == Parameter.VAR_POSITIONAL:\n self.v_args_name = name\n fields[name] = Tuple[annotation, ...], None\n takes_args = True\n else:\n assert p.kind == Parameter.VAR_KEYWORD, p.kind\n self.v_kwargs_name = name\n fields[name] = Dict[str, annotation], None # type: ignore\n takes_kwargs = True\n\n # these checks avoid a clash between \"args\" and a field with that name\n if not takes_args and self.v_args_name in fields:\n self.v_args_name = ALT_V_ARGS\n\n # same with \"kwargs\"\n if not takes_kwargs and self.v_kwargs_name in fields:\n self.v_kwargs_name = ALT_V_KWARGS\n\n if not takes_args:\n # we add the field so validation below can raise the correct exception\n fields[self.v_args_name] = List[Any], None\n\n if not takes_kwargs:\n # same with kwargs\n fields[self.v_kwargs_name] = Dict[Any, Any], None\n\n self.create_model(fields, takes_args, takes_kwargs, config)\n\n def call(self, *args: Any, **kwargs: Any) -> Any:\n values = self.build_values(args, kwargs)\n m = self.model(**values)\n return self.execute(m)\n\n def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:\n values: Dict[str, Any] = {}\n if args:\n arg_iter = enumerate(args)\n while True:\n try:\n i, a = next(arg_iter)\n except StopIteration:\n break\n arg_name = self.arg_mapping.get(i)\n if arg_name is not None:\n values[arg_name] = a\n else:\n values[self.v_args_name] = [a] + [a for _, a in arg_iter]\n break\n\n var_kwargs = {}\n wrong_positional_args = []\n for k, v in kwargs.items():\n if k in self.model.__fields__:\n if k in self.positional_only_args:\n wrong_positional_args.append(k)\n values[k] = v\n else:\n var_kwargs[k] = v\n\n if var_kwargs:\n values[self.v_kwargs_name] = var_kwargs\n if wrong_positional_args:\n values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args\n return values\n\n def execute(self, m: BaseModel) -> Any:\n d = {k: v for k, v in m._iter() if k in m.__fields_set__}\n kwargs = d.pop(self.v_kwargs_name, None)\n if kwargs:\n d.update(kwargs)\n\n if self.v_args_name in d:\n args_: List[Any] = []\n in_kwargs = False\n kwargs = {}\n for name, value in d.items():\n if in_kwargs:\n kwargs[name] = value\n elif name == self.v_args_name:\n args_ += value\n in_kwargs = True\n else:\n args_.append(value)\n return self.raw_function(*args_, **kwargs)\n elif self.positional_only_args:\n args_ = []\n kwargs = {}\n for name, value in d.items():\n if name in self.positional_only_args:\n args_.append(value)\n else:\n kwargs[name] = value\n return self.raw_function(*args_, **kwargs)\n else:\n return self.raw_function(**d)\n\n def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:\n pos_args = len(self.arg_mapping)\n\n class CustomConfig:\n pass\n\n if not TYPE_CHECKING: # pragma: no branch\n if isinstance(config, dict):\n CustomConfig = type('Config', (), config) # noqa: F811\n elif config is not None:\n CustomConfig = config # noqa: F811\n\n if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):\n raise ConfigError(\n 'Setting the \"fields\" and \"alias_generator\" property on custom Config for '\n '@validate_arguments is not yet supported, please remove.'\n )\n\n class DecoratorBaseModel(BaseModel):\n @validator(self.v_args_name, check_fields=False, allow_reuse=True)\n def check_args(cls, v: List[Any]) -> List[Any]:\n if takes_args:\n return v\n\n raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')\n\n @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)\n def check_kwargs(cls, v: Dict[str, Any]) -> Dict[str, Any]:\n if takes_kwargs:\n return v\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v.keys()))\n raise TypeError(f'unexpected keyword argument{plural}: {keys}')\n\n @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)\n def check_positional_only(cls, v: List[str]) -> None:\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')\n\n class Config(CustomConfig):\n extra = Extra.forbid\n\n self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)\n", "path": "pydantic/decorator.py"}]} | 3,830 | 632 |
gh_patches_debug_28741 | rasdani/github-patches | git_diff | pymedusa__Medusa-4925 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problem with YGGTORRENT because of url change : ww[3].yggtorrent.is
**Describe the bug**
Problem when trying to download episodes with YGGTORRENT provider.
I see that the URL of yggtorrent has changed since the last version of Medusa, from "ww2.yggtorrent.is" to "ww3.yggtorrent.is".
**To Reproduce**
Steps to reproduce the behavior:
1. Snatch episode with YGGTORRENT provider.
2. The torrent file won't be downloaded, just downloading an empty file
3. Warning in Medusa and no file downloaded by the torrent client.
**Expected behavior**
Dev : Find a way please to configure the provider URL with the Medusa interface... please, to avoid multiples problems like this because many providers change their url frequently.
**Medusa (please complete the following information):**
- OS: Windows 10, Medusa is installed in DSM 6 on Synology
- Branch: master (up to date)
</issue>
<code>
[start of medusa/providers/torrent/html/yggtorrent.py]
1 # coding=utf-8
2
3 """Provider code for Yggtorrent."""
4
5 from __future__ import unicode_literals
6
7 import logging
8 import re
9
10 from medusa import tv
11 from medusa.bs4_parser import BS4Parser
12 from medusa.helper.common import (
13 convert_size,
14 try_int,
15 )
16 from medusa.logger.adapters.style import BraceAdapter
17 from medusa.providers.torrent.torrent_provider import TorrentProvider
18
19 from requests.compat import urljoin
20
21 log = BraceAdapter(logging.getLogger(__name__))
22 log.logger.addHandler(logging.NullHandler())
23
24
25 class YggtorrentProvider(TorrentProvider):
26 """Yggtorrent Torrent provider."""
27
28 torrent_id_pattern = re.compile(r'\/(\d+)-')
29
30 def __init__(self):
31 """Initialize the class."""
32 super(YggtorrentProvider, self).__init__('Yggtorrent')
33
34 # Credentials
35 self.username = None
36 self.password = None
37
38 # URLs
39 self.url = 'https://ww3.yggtorrent.is'
40 self.urls = {
41 'login': urljoin(self.url, 'user/login'),
42 'search': urljoin(self.url, 'engine/search'),
43 'download': urljoin(self.url, 'engine/download_torrent?id={0}')
44 }
45
46 # Proper Strings
47 self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']
48
49 # Torrent Stats
50 self.minseed = None
51 self.minleech = None
52
53 # Cache
54 self.cache = tv.Cache(self, min_time=20)
55
56 def search(self, search_strings, age=0, ep_obj=None, **kwargs):
57 """
58 Search a provider and parse the results.
59
60 :param search_strings: A dict with mode (key) and the search value (value)
61 :param age: Not used
62 :param ep_obj: Not used
63 :returns: A list of search results (structure)
64 """
65 results = []
66 if not self.login():
67 return results
68
69 # Search Params
70 search_params = {
71 'category': 2145,
72 'do': 'search'
73 }
74
75 for mode in search_strings:
76 log.debug('Search mode: {0}', mode)
77
78 for search_string in search_strings[mode]:
79
80 if mode != 'RSS':
81 log.debug('Search string: {search}',
82 {'search': search_string})
83
84 search_params['name'] = re.sub(r'[()]', '', search_string)
85
86 response = self.session.get(self.urls['search'], params=search_params)
87 if not response or not response.text:
88 log.debug('No data returned from provider')
89 continue
90
91 results += self.parse(response.text, mode)
92
93 return results
94
95 def parse(self, data, mode):
96 """
97 Parse search results for items.
98
99 :param data: The raw response from a search
100 :param mode: The current mode used to search, e.g. RSS
101
102 :return: A list of items found
103 """
104 # Units
105 units = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']
106
107 items = []
108
109 with BS4Parser(data, 'html5lib') as html:
110 torrent_table = html.find(class_='table-responsive results')
111 torrent_rows = torrent_table('tr') if torrent_table else []
112
113 # Continue only if at least one Release is found
114 if len(torrent_rows) < 2:
115 log.debug('Data returned from provider does not contain any torrents')
116 return items
117
118 # Skip column headers
119 for result in torrent_rows[1:]:
120 cells = result('td')
121 if len(cells) < 9:
122 continue
123
124 try:
125 info = cells[1].find('a')
126 title = info.get_text(strip=True)
127 download_url = info.get('href')
128 if not (title and download_url):
129 continue
130
131 torrent_id = self.torrent_id_pattern.search(download_url)
132 download_url = self.urls['download'].format(torrent_id.group(1))
133
134 seeders = try_int(cells[7].get_text(strip=True), 0)
135 leechers = try_int(cells[8].get_text(strip=True), 0)
136
137 # Filter unseeded torrent
138 if seeders < min(self.minseed, 1):
139 if mode != 'RSS':
140 log.debug("Discarding torrent because it doesn't meet the"
141 ' minimum seeders: {0}. Seeders: {1}',
142 title, seeders)
143 continue
144
145 torrent_size = cells[5].get_text()
146 size = convert_size(torrent_size, sep='', units=units, default=-1)
147
148 pubdate_raw = cells[4].find('div', class_='hidden').get_text(strip=True)
149 pubdate = self.parse_pubdate(pubdate_raw, fromtimestamp=True)
150
151 item = {
152 'title': title,
153 'link': download_url,
154 'size': size,
155 'seeders': seeders,
156 'leechers': leechers,
157 'pubdate': pubdate,
158 }
159 if mode != 'RSS':
160 log.debug('Found result: {0} with {1} seeders and {2} leechers',
161 title, seeders, leechers)
162
163 items.append(item)
164 except (AttributeError, TypeError, KeyError, ValueError, IndexError):
165 log.exception('Failed parsing provider.')
166
167 return items
168
169 def login(self):
170 """Login method used for logging in before doing search and torrent downloads."""
171 login_params = {
172 'id': self.username,
173 'pass': self.password
174 }
175
176 login_resp = self.session.post(self.urls['login'], data=login_params)
177 if not login_resp:
178 log.warning('Invalid username or password. Check your settings')
179 return False
180
181 response = self.session.get(self.url)
182 if not response:
183 log.warning('Unable to connect to provider')
184 return False
185
186 if 'Bienvenue' not in response.text:
187 log.warning('Unable to login to provider')
188 return False
189
190 return True
191
192
193 provider = YggtorrentProvider()
194
[end of medusa/providers/torrent/html/yggtorrent.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py
--- a/medusa/providers/torrent/html/yggtorrent.py
+++ b/medusa/providers/torrent/html/yggtorrent.py
@@ -36,8 +36,9 @@
self.password = None
# URLs
- self.url = 'https://ww3.yggtorrent.is'
+ self.url = 'https://ww4.yggtorrent.is'
self.urls = {
+ 'auth': urljoin(self.url, 'user/ajax_usermenu'),
'login': urljoin(self.url, 'user/login'),
'search': urljoin(self.url, 'engine/search'),
'download': urljoin(self.url, 'engine/download_torrent?id={0}')
@@ -173,18 +174,19 @@
'pass': self.password
}
- login_resp = self.session.post(self.urls['login'], data=login_params)
- if not login_resp:
- log.warning('Invalid username or password. Check your settings')
- return False
+ if not self._is_authenticated():
+ login_url = self.get_redirect_url(self.urls['login'])
+ login_resp = self.session.post(login_url, data=login_params)
+ if not login_resp or not self._is_authenticated():
+ log.warning('Invalid username or password. Check your settings')
+ return False
- response = self.session.get(self.url)
- if not response:
- log.warning('Unable to connect to provider')
- return False
+ return True
- if 'Bienvenue' not in response.text:
- log.warning('Unable to login to provider')
+ def _is_authenticated(self):
+ response = self.session.get(self.urls['auth'])
+ if not response:
+ log.warning('Unable to connect or login to provider')
return False
return True
| {"golden_diff": "diff --git a/medusa/providers/torrent/html/yggtorrent.py b/medusa/providers/torrent/html/yggtorrent.py\n--- a/medusa/providers/torrent/html/yggtorrent.py\n+++ b/medusa/providers/torrent/html/yggtorrent.py\n@@ -36,8 +36,9 @@\n self.password = None\n \n # URLs\n- self.url = 'https://ww3.yggtorrent.is'\n+ self.url = 'https://ww4.yggtorrent.is'\n self.urls = {\n+ 'auth': urljoin(self.url, 'user/ajax_usermenu'),\n 'login': urljoin(self.url, 'user/login'),\n 'search': urljoin(self.url, 'engine/search'),\n 'download': urljoin(self.url, 'engine/download_torrent?id={0}')\n@@ -173,18 +174,19 @@\n 'pass': self.password\n }\n \n- login_resp = self.session.post(self.urls['login'], data=login_params)\n- if not login_resp:\n- log.warning('Invalid username or password. Check your settings')\n- return False\n+ if not self._is_authenticated():\n+ login_url = self.get_redirect_url(self.urls['login'])\n+ login_resp = self.session.post(login_url, data=login_params)\n+ if not login_resp or not self._is_authenticated():\n+ log.warning('Invalid username or password. Check your settings')\n+ return False\n \n- response = self.session.get(self.url)\n- if not response:\n- log.warning('Unable to connect to provider')\n- return False\n+ return True\n \n- if 'Bienvenue' not in response.text:\n- log.warning('Unable to login to provider')\n+ def _is_authenticated(self):\n+ response = self.session.get(self.urls['auth'])\n+ if not response:\n+ log.warning('Unable to connect or login to provider')\n return False\n \n return True\n", "issue": "Problem with YGGTORRENT because of url change : ww[3].yggtorrent.is\n**Describe the bug**\r\nProblem when trying to download episodes with YGGTORRENT provider. \r\nI see that the URL of yggtorrent has changed since the last version of Medusa, from \"ww2.yggtorrent.is\" to \"ww3.yggtorrent.is\". \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Snatch episode with YGGTORRENT provider.\r\n2. The torrent file won't be downloaded, just downloading an empty file\r\n3. Warning in Medusa and no file downloaded by the torrent client.\r\n\r\n**Expected behavior**\r\nDev : Find a way please to configure the provider URL with the Medusa interface... please, to avoid multiples problems like this because many providers change their url frequently.\r\n\r\n**Medusa (please complete the following information):**\r\n - OS: Windows 10, Medusa is installed in DSM 6 on Synology\r\n - Branch: master (up to date)\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Yggtorrent.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import (\n convert_size,\n try_int,\n)\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass YggtorrentProvider(TorrentProvider):\n \"\"\"Yggtorrent Torrent provider.\"\"\"\n\n torrent_id_pattern = re.compile(r'\\/(\\d+)-')\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(YggtorrentProvider, self).__init__('Yggtorrent')\n\n # Credentials\n self.username = None\n self.password = None\n\n # URLs\n self.url = 'https://ww3.yggtorrent.is'\n self.urls = {\n 'login': urljoin(self.url, 'user/login'),\n 'search': urljoin(self.url, 'engine/search'),\n 'download': urljoin(self.url, 'engine/download_torrent?id={0}')\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n # Search Params\n search_params = {\n 'category': 2145,\n 'do': 'search'\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n\n search_params['name'] = re.sub(r'[()]', '', search_string)\n\n response = self.session.get(self.urls['search'], params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n # Units\n units = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n torrent_table = html.find(class_='table-responsive results')\n torrent_rows = torrent_table('tr') if torrent_table else []\n\n # Continue only if at least one Release is found\n if len(torrent_rows) < 2:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n # Skip column headers\n for result in torrent_rows[1:]:\n cells = result('td')\n if len(cells) < 9:\n continue\n\n try:\n info = cells[1].find('a')\n title = info.get_text(strip=True)\n download_url = info.get('href')\n if not (title and download_url):\n continue\n\n torrent_id = self.torrent_id_pattern.search(download_url)\n download_url = self.urls['download'].format(torrent_id.group(1))\n\n seeders = try_int(cells[7].get_text(strip=True), 0)\n leechers = try_int(cells[8].get_text(strip=True), 0)\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n ' minimum seeders: {0}. Seeders: {1}',\n title, seeders)\n continue\n\n torrent_size = cells[5].get_text()\n size = convert_size(torrent_size, sep='', units=units, default=-1)\n\n pubdate_raw = cells[4].find('div', class_='hidden').get_text(strip=True)\n pubdate = self.parse_pubdate(pubdate_raw, fromtimestamp=True)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n login_params = {\n 'id': self.username,\n 'pass': self.password\n }\n\n login_resp = self.session.post(self.urls['login'], data=login_params)\n if not login_resp:\n log.warning('Invalid username or password. Check your settings')\n return False\n\n response = self.session.get(self.url)\n if not response:\n log.warning('Unable to connect to provider')\n return False\n\n if 'Bienvenue' not in response.text:\n log.warning('Unable to login to provider')\n return False\n\n return True\n\n\nprovider = YggtorrentProvider()\n", "path": "medusa/providers/torrent/html/yggtorrent.py"}]} | 2,570 | 427 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.