problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_8326
|
rasdani/github-patches
|
git_diff
|
google__clusterfuzz-1163
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Command field empty in OSS-Fuzz testcases
See https://oss-fuzz.com/testcase-detail/5204819744915456 for example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/bot/untrusted_runner/tasks_impl.py`
Content:
```
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Tasks RPC implementations."""
15 from __future__ import absolute_import
16
17 from google.protobuf import wrappers_pb2
18 from google.protobuf.any_pb2 import Any
19 import six
20
21 from . import protobuf_utils
22
23 from bot import testcase_manager
24 from bot.fuzzers import engine
25 from bot.tasks import corpus_pruning_task
26 from bot.tasks import fuzz_task
27 from bot.tasks import minimize_task
28 from datastore import data_types
29 from protos import untrusted_runner_pb2
30 from system import environment
31
32
33 def _proto_to_fuzz_target(proto):
34 """Convert protobuf to FuzzTarget."""
35 return data_types.FuzzTarget(
36 engine=proto.engine, project=proto.project, binary=proto.binary)
37
38
39 def _proto_to_cross_pollinate_fuzzer(proto):
40 """Convert protobuf to CrossPollinateFuzzer."""
41 return corpus_pruning_task.CrossPollinateFuzzer(
42 fuzz_target=_proto_to_fuzz_target(proto.fuzz_target),
43 backup_bucket_name=proto.backup_bucket_name,
44 corpus_engine_name=proto.corpus_engine_name)
45
46
47 def prune_corpus(request, _):
48 """Prune corpus."""
49 context = corpus_pruning_task.Context(
50 _proto_to_fuzz_target(request.fuzz_target), [
51 _proto_to_cross_pollinate_fuzzer(proto)
52 for proto in request.cross_pollinate_fuzzers
53 ], environment.get_value('USE_MINIJAIL'))
54
55 result = corpus_pruning_task.do_corpus_pruning(
56 context, request.last_execution_failed, request.revision)
57
58 # Intentionally skip edge and function coverage values as those would come
59 # from fuzzer coverage cron task (see src/go/server/cron/coverage.go).
60 coverage_info = untrusted_runner_pb2.CoverageInfo(
61 corpus_size_units=result.coverage_info.corpus_size_units,
62 corpus_size_bytes=result.coverage_info.corpus_size_bytes,
63 corpus_location=result.coverage_info.corpus_location,
64 corpus_backup_location=result.coverage_info.corpus_backup_location,
65 quarantine_size_units=result.coverage_info.quarantine_size_units,
66 quarantine_size_bytes=result.coverage_info.quarantine_size_bytes,
67 quarantine_location=result.coverage_info.quarantine_location)
68
69 crashes = [
70 untrusted_runner_pb2.CorpusCrash(
71 crash_state=crash.crash_state,
72 crash_type=crash.crash_type,
73 crash_address=crash.crash_address,
74 crash_stacktrace=protobuf_utils.encode_utf8_if_unicode(
75 crash.crash_stacktrace),
76 unit_path=crash.unit_path,
77 security_flag=crash.security_flag,
78 ) for crash in result.crashes
79 ]
80
81 return untrusted_runner_pb2.PruneCorpusResponse(
82 coverage_info=coverage_info,
83 crashes=crashes,
84 fuzzer_binary_name=result.fuzzer_binary_name,
85 revision=result.revision)
86
87
88 def process_testcase(request, _):
89 """Process testcase."""
90 tool_name_map = {
91 untrusted_runner_pb2.ProcessTestcaseRequest.MINIMIZE: 'minimize',
92 untrusted_runner_pb2.ProcessTestcaseRequest.CLEANSE: 'cleanse',
93 }
94
95 # TODO(ochang): Support other engines.
96 assert request.engine == 'libFuzzer'
97 assert request.operation in tool_name_map
98
99 result = minimize_task.run_libfuzzer_engine(
100 tool_name_map[request.operation], request.target_name, request.arguments,
101 request.testcase_path, request.output_path, request.timeout)
102
103 return untrusted_runner_pb2.EngineReproduceResult(
104 return_code=result.return_code,
105 time_executed=result.time_executed,
106 output=result.output)
107
108
109 def engine_fuzz(request, _):
110 """Run engine fuzzer."""
111 engine_impl = engine.get(request.engine)
112 result, fuzzer_metadata = fuzz_task.run_engine_fuzzer(
113 engine_impl, request.target_name, request.sync_corpus_directory,
114 request.testcase_directory)
115
116 crashes = [
117 untrusted_runner_pb2.EngineCrash(
118 input_path=crash.input_path,
119 stacktrace=protobuf_utils.encode_utf8_if_unicode(crash.stacktrace),
120 reproduce_args=crash.reproduce_args,
121 crash_time=crash.crash_time) for crash in result.crashes
122 ]
123
124 packed_stats = {}
125 for key, value in six.iteritems(result.stats):
126 packed_value = Any()
127 if isinstance(value, float):
128 packed_value.Pack(wrappers_pb2.DoubleValue(value=value))
129 elif isinstance(value, int):
130 packed_value.Pack(wrappers_pb2.Int32Value(value=value))
131 elif isinstance(value, six.string_types):
132 packed_value.Pack(wrappers_pb2.StringValue(value=value))
133 else:
134 raise ValueError('Unknown stat type for ' + key)
135
136 packed_stats[key] = packed_value
137
138 return untrusted_runner_pb2.EngineFuzzResponse(
139 logs=protobuf_utils.encode_utf8_if_unicode(result.logs),
140 command=result.command,
141 crashes=crashes,
142 stats=packed_stats,
143 time_executed=result.time_executed,
144 fuzzer_metadata=fuzzer_metadata)
145
146
147 def engine_reproduce(request, _):
148 """Run engine reproduce."""
149 engine_impl = engine.get(request.engine)
150 result = testcase_manager.engine_reproduce(engine_impl, request.target_name,
151 request.testcase_path,
152 request.arguments, request.timeout)
153 return untrusted_runner_pb2.EngineReproduceResult(
154 return_code=result.return_code,
155 time_executed=result.time_executed,
156 output=result.output)
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/python/bot/untrusted_runner/tasks_impl.py b/src/python/bot/untrusted_runner/tasks_impl.py
--- a/src/python/bot/untrusted_runner/tasks_impl.py
+++ b/src/python/bot/untrusted_runner/tasks_impl.py
@@ -151,6 +151,7 @@
request.testcase_path,
request.arguments, request.timeout)
return untrusted_runner_pb2.EngineReproduceResult(
+ command=result.command,
return_code=result.return_code,
time_executed=result.time_executed,
output=result.output)
|
{"golden_diff": "diff --git a/src/python/bot/untrusted_runner/tasks_impl.py b/src/python/bot/untrusted_runner/tasks_impl.py\n--- a/src/python/bot/untrusted_runner/tasks_impl.py\n+++ b/src/python/bot/untrusted_runner/tasks_impl.py\n@@ -151,6 +151,7 @@\n request.testcase_path,\n request.arguments, request.timeout)\n return untrusted_runner_pb2.EngineReproduceResult(\n+ command=result.command,\n return_code=result.return_code,\n time_executed=result.time_executed,\n output=result.output)\n", "issue": "Command field empty in OSS-Fuzz testcases\nSee https://oss-fuzz.com/testcase-detail/5204819744915456 for example.\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tasks RPC implementations.\"\"\"\nfrom __future__ import absolute_import\n\nfrom google.protobuf import wrappers_pb2\nfrom google.protobuf.any_pb2 import Any\nimport six\n\nfrom . import protobuf_utils\n\nfrom bot import testcase_manager\nfrom bot.fuzzers import engine\nfrom bot.tasks import corpus_pruning_task\nfrom bot.tasks import fuzz_task\nfrom bot.tasks import minimize_task\nfrom datastore import data_types\nfrom protos import untrusted_runner_pb2\nfrom system import environment\n\n\ndef _proto_to_fuzz_target(proto):\n \"\"\"Convert protobuf to FuzzTarget.\"\"\"\n return data_types.FuzzTarget(\n engine=proto.engine, project=proto.project, binary=proto.binary)\n\n\ndef _proto_to_cross_pollinate_fuzzer(proto):\n \"\"\"Convert protobuf to CrossPollinateFuzzer.\"\"\"\n return corpus_pruning_task.CrossPollinateFuzzer(\n fuzz_target=_proto_to_fuzz_target(proto.fuzz_target),\n backup_bucket_name=proto.backup_bucket_name,\n corpus_engine_name=proto.corpus_engine_name)\n\n\ndef prune_corpus(request, _):\n \"\"\"Prune corpus.\"\"\"\n context = corpus_pruning_task.Context(\n _proto_to_fuzz_target(request.fuzz_target), [\n _proto_to_cross_pollinate_fuzzer(proto)\n for proto in request.cross_pollinate_fuzzers\n ], environment.get_value('USE_MINIJAIL'))\n\n result = corpus_pruning_task.do_corpus_pruning(\n context, request.last_execution_failed, request.revision)\n\n # Intentionally skip edge and function coverage values as those would come\n # from fuzzer coverage cron task (see src/go/server/cron/coverage.go).\n coverage_info = untrusted_runner_pb2.CoverageInfo(\n corpus_size_units=result.coverage_info.corpus_size_units,\n corpus_size_bytes=result.coverage_info.corpus_size_bytes,\n corpus_location=result.coverage_info.corpus_location,\n corpus_backup_location=result.coverage_info.corpus_backup_location,\n quarantine_size_units=result.coverage_info.quarantine_size_units,\n quarantine_size_bytes=result.coverage_info.quarantine_size_bytes,\n quarantine_location=result.coverage_info.quarantine_location)\n\n crashes = [\n untrusted_runner_pb2.CorpusCrash(\n crash_state=crash.crash_state,\n crash_type=crash.crash_type,\n crash_address=crash.crash_address,\n crash_stacktrace=protobuf_utils.encode_utf8_if_unicode(\n crash.crash_stacktrace),\n unit_path=crash.unit_path,\n security_flag=crash.security_flag,\n ) for crash in result.crashes\n ]\n\n return untrusted_runner_pb2.PruneCorpusResponse(\n coverage_info=coverage_info,\n crashes=crashes,\n fuzzer_binary_name=result.fuzzer_binary_name,\n revision=result.revision)\n\n\ndef process_testcase(request, _):\n \"\"\"Process testcase.\"\"\"\n tool_name_map = {\n untrusted_runner_pb2.ProcessTestcaseRequest.MINIMIZE: 'minimize',\n untrusted_runner_pb2.ProcessTestcaseRequest.CLEANSE: 'cleanse',\n }\n\n # TODO(ochang): Support other engines.\n assert request.engine == 'libFuzzer'\n assert request.operation in tool_name_map\n\n result = minimize_task.run_libfuzzer_engine(\n tool_name_map[request.operation], request.target_name, request.arguments,\n request.testcase_path, request.output_path, request.timeout)\n\n return untrusted_runner_pb2.EngineReproduceResult(\n return_code=result.return_code,\n time_executed=result.time_executed,\n output=result.output)\n\n\ndef engine_fuzz(request, _):\n \"\"\"Run engine fuzzer.\"\"\"\n engine_impl = engine.get(request.engine)\n result, fuzzer_metadata = fuzz_task.run_engine_fuzzer(\n engine_impl, request.target_name, request.sync_corpus_directory,\n request.testcase_directory)\n\n crashes = [\n untrusted_runner_pb2.EngineCrash(\n input_path=crash.input_path,\n stacktrace=protobuf_utils.encode_utf8_if_unicode(crash.stacktrace),\n reproduce_args=crash.reproduce_args,\n crash_time=crash.crash_time) for crash in result.crashes\n ]\n\n packed_stats = {}\n for key, value in six.iteritems(result.stats):\n packed_value = Any()\n if isinstance(value, float):\n packed_value.Pack(wrappers_pb2.DoubleValue(value=value))\n elif isinstance(value, int):\n packed_value.Pack(wrappers_pb2.Int32Value(value=value))\n elif isinstance(value, six.string_types):\n packed_value.Pack(wrappers_pb2.StringValue(value=value))\n else:\n raise ValueError('Unknown stat type for ' + key)\n\n packed_stats[key] = packed_value\n\n return untrusted_runner_pb2.EngineFuzzResponse(\n logs=protobuf_utils.encode_utf8_if_unicode(result.logs),\n command=result.command,\n crashes=crashes,\n stats=packed_stats,\n time_executed=result.time_executed,\n fuzzer_metadata=fuzzer_metadata)\n\n\ndef engine_reproduce(request, _):\n \"\"\"Run engine reproduce.\"\"\"\n engine_impl = engine.get(request.engine)\n result = testcase_manager.engine_reproduce(engine_impl, request.target_name,\n request.testcase_path,\n request.arguments, request.timeout)\n return untrusted_runner_pb2.EngineReproduceResult(\n return_code=result.return_code,\n time_executed=result.time_executed,\n output=result.output)\n", "path": "src/python/bot/untrusted_runner/tasks_impl.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tasks RPC implementations.\"\"\"\nfrom __future__ import absolute_import\n\nfrom google.protobuf import wrappers_pb2\nfrom google.protobuf.any_pb2 import Any\nimport six\n\nfrom . import protobuf_utils\n\nfrom bot import testcase_manager\nfrom bot.fuzzers import engine\nfrom bot.tasks import corpus_pruning_task\nfrom bot.tasks import fuzz_task\nfrom bot.tasks import minimize_task\nfrom datastore import data_types\nfrom protos import untrusted_runner_pb2\nfrom system import environment\n\n\ndef _proto_to_fuzz_target(proto):\n \"\"\"Convert protobuf to FuzzTarget.\"\"\"\n return data_types.FuzzTarget(\n engine=proto.engine, project=proto.project, binary=proto.binary)\n\n\ndef _proto_to_cross_pollinate_fuzzer(proto):\n \"\"\"Convert protobuf to CrossPollinateFuzzer.\"\"\"\n return corpus_pruning_task.CrossPollinateFuzzer(\n fuzz_target=_proto_to_fuzz_target(proto.fuzz_target),\n backup_bucket_name=proto.backup_bucket_name,\n corpus_engine_name=proto.corpus_engine_name)\n\n\ndef prune_corpus(request, _):\n \"\"\"Prune corpus.\"\"\"\n context = corpus_pruning_task.Context(\n _proto_to_fuzz_target(request.fuzz_target), [\n _proto_to_cross_pollinate_fuzzer(proto)\n for proto in request.cross_pollinate_fuzzers\n ], environment.get_value('USE_MINIJAIL'))\n\n result = corpus_pruning_task.do_corpus_pruning(\n context, request.last_execution_failed, request.revision)\n\n # Intentionally skip edge and function coverage values as those would come\n # from fuzzer coverage cron task (see src/go/server/cron/coverage.go).\n coverage_info = untrusted_runner_pb2.CoverageInfo(\n corpus_size_units=result.coverage_info.corpus_size_units,\n corpus_size_bytes=result.coverage_info.corpus_size_bytes,\n corpus_location=result.coverage_info.corpus_location,\n corpus_backup_location=result.coverage_info.corpus_backup_location,\n quarantine_size_units=result.coverage_info.quarantine_size_units,\n quarantine_size_bytes=result.coverage_info.quarantine_size_bytes,\n quarantine_location=result.coverage_info.quarantine_location)\n\n crashes = [\n untrusted_runner_pb2.CorpusCrash(\n crash_state=crash.crash_state,\n crash_type=crash.crash_type,\n crash_address=crash.crash_address,\n crash_stacktrace=protobuf_utils.encode_utf8_if_unicode(\n crash.crash_stacktrace),\n unit_path=crash.unit_path,\n security_flag=crash.security_flag,\n ) for crash in result.crashes\n ]\n\n return untrusted_runner_pb2.PruneCorpusResponse(\n coverage_info=coverage_info,\n crashes=crashes,\n fuzzer_binary_name=result.fuzzer_binary_name,\n revision=result.revision)\n\n\ndef process_testcase(request, _):\n \"\"\"Process testcase.\"\"\"\n tool_name_map = {\n untrusted_runner_pb2.ProcessTestcaseRequest.MINIMIZE: 'minimize',\n untrusted_runner_pb2.ProcessTestcaseRequest.CLEANSE: 'cleanse',\n }\n\n # TODO(ochang): Support other engines.\n assert request.engine == 'libFuzzer'\n assert request.operation in tool_name_map\n\n result = minimize_task.run_libfuzzer_engine(\n tool_name_map[request.operation], request.target_name, request.arguments,\n request.testcase_path, request.output_path, request.timeout)\n\n return untrusted_runner_pb2.EngineReproduceResult(\n return_code=result.return_code,\n time_executed=result.time_executed,\n output=result.output)\n\n\ndef engine_fuzz(request, _):\n \"\"\"Run engine fuzzer.\"\"\"\n engine_impl = engine.get(request.engine)\n result, fuzzer_metadata = fuzz_task.run_engine_fuzzer(\n engine_impl, request.target_name, request.sync_corpus_directory,\n request.testcase_directory)\n\n crashes = [\n untrusted_runner_pb2.EngineCrash(\n input_path=crash.input_path,\n stacktrace=protobuf_utils.encode_utf8_if_unicode(crash.stacktrace),\n reproduce_args=crash.reproduce_args,\n crash_time=crash.crash_time) for crash in result.crashes\n ]\n\n packed_stats = {}\n for key, value in six.iteritems(result.stats):\n packed_value = Any()\n if isinstance(value, float):\n packed_value.Pack(wrappers_pb2.DoubleValue(value=value))\n elif isinstance(value, int):\n packed_value.Pack(wrappers_pb2.Int32Value(value=value))\n elif isinstance(value, six.string_types):\n packed_value.Pack(wrappers_pb2.StringValue(value=value))\n else:\n raise ValueError('Unknown stat type for ' + key)\n\n packed_stats[key] = packed_value\n\n return untrusted_runner_pb2.EngineFuzzResponse(\n logs=protobuf_utils.encode_utf8_if_unicode(result.logs),\n command=result.command,\n crashes=crashes,\n stats=packed_stats,\n time_executed=result.time_executed,\n fuzzer_metadata=fuzzer_metadata)\n\n\ndef engine_reproduce(request, _):\n \"\"\"Run engine reproduce.\"\"\"\n engine_impl = engine.get(request.engine)\n result = testcase_manager.engine_reproduce(engine_impl, request.target_name,\n request.testcase_path,\n request.arguments, request.timeout)\n return untrusted_runner_pb2.EngineReproduceResult(\n command=result.command,\n return_code=result.return_code,\n time_executed=result.time_executed,\n output=result.output)\n", "path": "src/python/bot/untrusted_runner/tasks_impl.py"}]}
| 1,942 | 118 |
gh_patches_debug_3051
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-2533
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pubsub message getting wrong attribute for publishTime
According the [REST docs](https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage), a `PubsubMessage` has the field `publishTime`
In [message.py](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/pubsub/google/cloud/pubsub/message.py), `from_api_repr` is getting the field `publishTimestamp` below:
```
instance._service_timestamp = api_repr.get('publishTimestamp')
```
The current tests are self-confirming of this issue as they simply set up the api_repr with `publishTimestamp`
A quick fix seems to adjust the following:
**message.py**
``` python
@classmethod
def from_api_repr(cls, api_repr):
"""Factory: construct message from API representation.
:type api_repr: dict or None
:param api_repr: The API representation of the message
:rtype: :class:`Message`
:returns: The message created from the response.
"""
data = base64.b64decode(api_repr.get('data', b''))
instance = cls(
data=data, message_id=api_repr['messageId'],
attributes=api_repr.get('attributes'))
instance._service_timestamp = api_repr.get('publishTime')
return instance
```
**test_message.py**
``` python
def test_from_api_repr_no_attributes(self):
from base64 import b64encode as b64
DATA = b'DEADBEEF'
B64_DATA = b64(DATA)
MESSAGE_ID = '12345'
TIMESTAMP = '2016-03-18-19:38:22.001393427Z'
api_repr = {
'data': B64_DATA,
'messageId': MESSAGE_ID,
'publishTime': TIMESTAMP,
}
message = self._getTargetClass().from_api_repr(api_repr)
self.assertEqual(message.data, DATA)
self.assertEqual(message.message_id, MESSAGE_ID)
self.assertEqual(message.attributes, {})
self.assertEqual(message.service_timestamp, TIMESTAMP)
def test_from_api_repr_w_attributes(self):
from base64 import b64encode as b64
DATA = b'DEADBEEF'
B64_DATA = b64(DATA)
MESSAGE_ID = '12345'
ATTRS = {'a': 'b'}
TIMESTAMP = '2016-03-18-19:38:22.001393427Z'
api_repr = {
'data': B64_DATA,
'messageId': MESSAGE_ID,
'publishTime': TIMESTAMP,
'attributes': ATTRS,
}
message = self._getTargetClass().from_api_repr(api_repr)
self.assertEqual(message.data, DATA)
self.assertEqual(message.message_id, MESSAGE_ID)
self.assertEqual(message.service_timestamp, TIMESTAMP)
self.assertEqual(message.attributes, ATTRS)
```
I don't currently have a contributor license signed, but will work on that. In the meantime, hoping that someone can pick this up.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pubsub/google/cloud/pubsub/message.py`
Content:
```
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Define API Topics."""
16
17 import base64
18
19 from google.cloud._helpers import _rfc3339_to_datetime
20
21
22 class Message(object):
23 """Messages can be published to a topic and received by subscribers.
24
25 See:
26 https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
27
28 :type data: bytes
29 :param data: the payload of the message.
30
31 :type message_id: string
32 :param message_id: An ID assigned to the message by the API.
33
34 :type attributes: dict or None
35 :param attributes: Extra metadata associated by the publisher with the
36 message.
37 """
38 _service_timestamp = None
39
40 def __init__(self, data, message_id, attributes=None):
41 self.data = data
42 self.message_id = message_id
43 self._attributes = attributes
44
45 @property
46 def attributes(self):
47 """Lazily-constructed attribute dictionary."""
48 if self._attributes is None:
49 self._attributes = {}
50 return self._attributes
51
52 @property
53 def timestamp(self):
54 """Return sortable timestamp from attributes, if passed.
55
56 Allows sorting messages in publication order (assuming consistent
57 clocks across all publishers).
58
59 :rtype: :class:`datetime.datetime`
60 :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp
61 :raises: ValueError if timestamp not in ``attributes``, or if it does
62 not match the RFC 3339 format.
63 """
64 stamp = self.attributes.get('timestamp')
65 if stamp is None:
66 raise ValueError('No timestamp')
67 return _rfc3339_to_datetime(stamp)
68
69 @property
70 def service_timestamp(self):
71 """Return server-set timestamp.
72
73 :rtype: string
74 :returns: timestamp (in UTC timezone) in RFC 3339 format
75 """
76 return self._service_timestamp
77
78 @classmethod
79 def from_api_repr(cls, api_repr):
80 """Factory: construct message from API representation.
81
82 :type api_repr: dict or None
83 :param api_repr: The API representation of the message
84
85 :rtype: :class:`Message`
86 :returns: The message created from the response.
87 """
88 data = base64.b64decode(api_repr.get('data', b''))
89 instance = cls(
90 data=data, message_id=api_repr['messageId'],
91 attributes=api_repr.get('attributes'))
92 instance._service_timestamp = api_repr.get('publishTimestamp')
93 return instance
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pubsub/google/cloud/pubsub/message.py b/pubsub/google/cloud/pubsub/message.py
--- a/pubsub/google/cloud/pubsub/message.py
+++ b/pubsub/google/cloud/pubsub/message.py
@@ -89,5 +89,5 @@
instance = cls(
data=data, message_id=api_repr['messageId'],
attributes=api_repr.get('attributes'))
- instance._service_timestamp = api_repr.get('publishTimestamp')
+ instance._service_timestamp = api_repr.get('publishTime')
return instance
|
{"golden_diff": "diff --git a/pubsub/google/cloud/pubsub/message.py b/pubsub/google/cloud/pubsub/message.py\n--- a/pubsub/google/cloud/pubsub/message.py\n+++ b/pubsub/google/cloud/pubsub/message.py\n@@ -89,5 +89,5 @@\n instance = cls(\n data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n- instance._service_timestamp = api_repr.get('publishTimestamp')\n+ instance._service_timestamp = api_repr.get('publishTime')\n return instance\n", "issue": "Pubsub message getting wrong attribute for publishTime\nAccording the [REST docs](https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage), a `PubsubMessage` has the field `publishTime`\n\nIn [message.py](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/pubsub/google/cloud/pubsub/message.py), `from_api_repr` is getting the field `publishTimestamp` below:\n\n```\ninstance._service_timestamp = api_repr.get('publishTimestamp')\n```\n\nThe current tests are self-confirming of this issue as they simply set up the api_repr with `publishTimestamp`\n\nA quick fix seems to adjust the following:\n**message.py**\n\n``` python\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n\n :rtype: :class:`Message`\n :returns: The message created from the response.\n \"\"\"\n data = base64.b64decode(api_repr.get('data', b''))\n instance = cls(\n data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n instance._service_timestamp = api_repr.get('publishTime')\n return instance\n```\n\n**test_message.py**\n\n``` python\n def test_from_api_repr_no_attributes(self):\n from base64 import b64encode as b64\n DATA = b'DEADBEEF'\n B64_DATA = b64(DATA)\n MESSAGE_ID = '12345'\n TIMESTAMP = '2016-03-18-19:38:22.001393427Z'\n api_repr = {\n 'data': B64_DATA,\n 'messageId': MESSAGE_ID,\n 'publishTime': TIMESTAMP,\n }\n message = self._getTargetClass().from_api_repr(api_repr)\n self.assertEqual(message.data, DATA)\n self.assertEqual(message.message_id, MESSAGE_ID)\n self.assertEqual(message.attributes, {})\n self.assertEqual(message.service_timestamp, TIMESTAMP)\n\n def test_from_api_repr_w_attributes(self):\n from base64 import b64encode as b64\n DATA = b'DEADBEEF'\n B64_DATA = b64(DATA)\n MESSAGE_ID = '12345'\n ATTRS = {'a': 'b'}\n TIMESTAMP = '2016-03-18-19:38:22.001393427Z'\n api_repr = {\n 'data': B64_DATA,\n 'messageId': MESSAGE_ID,\n 'publishTime': TIMESTAMP,\n 'attributes': ATTRS,\n }\n message = self._getTargetClass().from_api_repr(api_repr)\n self.assertEqual(message.data, DATA)\n self.assertEqual(message.message_id, MESSAGE_ID)\n self.assertEqual(message.service_timestamp, TIMESTAMP)\n self.assertEqual(message.attributes, ATTRS)\n```\n\nI don't currently have a contributor license signed, but will work on that. In the meantime, hoping that someone can pick this up.\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Topics.\"\"\"\n\nimport base64\n\nfrom google.cloud._helpers import _rfc3339_to_datetime\n\n\nclass Message(object):\n \"\"\"Messages can be published to a topic and received by subscribers.\n\n See:\n https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage\n\n :type data: bytes\n :param data: the payload of the message.\n\n :type message_id: string\n :param message_id: An ID assigned to the message by the API.\n\n :type attributes: dict or None\n :param attributes: Extra metadata associated by the publisher with the\n message.\n \"\"\"\n _service_timestamp = None\n\n def __init__(self, data, message_id, attributes=None):\n self.data = data\n self.message_id = message_id\n self._attributes = attributes\n\n @property\n def attributes(self):\n \"\"\"Lazily-constructed attribute dictionary.\"\"\"\n if self._attributes is None:\n self._attributes = {}\n return self._attributes\n\n @property\n def timestamp(self):\n \"\"\"Return sortable timestamp from attributes, if passed.\n\n Allows sorting messages in publication order (assuming consistent\n clocks across all publishers).\n\n :rtype: :class:`datetime.datetime`\n :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp\n :raises: ValueError if timestamp not in ``attributes``, or if it does\n not match the RFC 3339 format.\n \"\"\"\n stamp = self.attributes.get('timestamp')\n if stamp is None:\n raise ValueError('No timestamp')\n return _rfc3339_to_datetime(stamp)\n\n @property\n def service_timestamp(self):\n \"\"\"Return server-set timestamp.\n\n :rtype: string\n :returns: timestamp (in UTC timezone) in RFC 3339 format\n \"\"\"\n return self._service_timestamp\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n\n :rtype: :class:`Message`\n :returns: The message created from the response.\n \"\"\"\n data = base64.b64decode(api_repr.get('data', b''))\n instance = cls(\n data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n instance._service_timestamp = api_repr.get('publishTimestamp')\n return instance\n", "path": "pubsub/google/cloud/pubsub/message.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Topics.\"\"\"\n\nimport base64\n\nfrom google.cloud._helpers import _rfc3339_to_datetime\n\n\nclass Message(object):\n \"\"\"Messages can be published to a topic and received by subscribers.\n\n See:\n https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage\n\n :type data: bytes\n :param data: the payload of the message.\n\n :type message_id: string\n :param message_id: An ID assigned to the message by the API.\n\n :type attributes: dict or None\n :param attributes: Extra metadata associated by the publisher with the\n message.\n \"\"\"\n _service_timestamp = None\n\n def __init__(self, data, message_id, attributes=None):\n self.data = data\n self.message_id = message_id\n self._attributes = attributes\n\n @property\n def attributes(self):\n \"\"\"Lazily-constructed attribute dictionary.\"\"\"\n if self._attributes is None:\n self._attributes = {}\n return self._attributes\n\n @property\n def timestamp(self):\n \"\"\"Return sortable timestamp from attributes, if passed.\n\n Allows sorting messages in publication order (assuming consistent\n clocks across all publishers).\n\n :rtype: :class:`datetime.datetime`\n :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp\n :raises: ValueError if timestamp not in ``attributes``, or if it does\n not match the RFC 3339 format.\n \"\"\"\n stamp = self.attributes.get('timestamp')\n if stamp is None:\n raise ValueError('No timestamp')\n return _rfc3339_to_datetime(stamp)\n\n @property\n def service_timestamp(self):\n \"\"\"Return server-set timestamp.\n\n :rtype: string\n :returns: timestamp (in UTC timezone) in RFC 3339 format\n \"\"\"\n return self._service_timestamp\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n\n :rtype: :class:`Message`\n :returns: The message created from the response.\n \"\"\"\n data = base64.b64decode(api_repr.get('data', b''))\n instance = cls(\n data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n instance._service_timestamp = api_repr.get('publishTime')\n return instance\n", "path": "pubsub/google/cloud/pubsub/message.py"}]}
| 1,805 | 115 |
gh_patches_debug_13455
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-3811
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gcp serverless runtime error on implicit boto dependency
reported in gitter, gcp functions should not need to depend on boto3, looks like some of the securityhub work caused an implicit dependency on boto3.
```
textPayload: "ModuleNotFoundError: No module named 'boto3'" - Getting this error for the cloud function to stop a instance in GCP
instance-off
qte7iow5dhzi
Traceback (most recent call last): File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 346, in run_http_function result = _function_handler.invoke_user_function(flask.request) File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 217, in invoke_user_function return call_user_function(request_or_event) File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 210, in call_user_function return self._user_function(request_or_event) File "/user_code/main.py", line 21, in run from c7n_gcp.handler import run File "/user_code/c7n_gcp/handler.py", line 24, in <module> from c7n_gcp.entry import initialize_gcp File "/user_code/c7n_gcp/entry.py", line 18, in <module> import c7n_gcp.resources.bigquery File "/user_code/c7n_gcp/resources/bigquery.py", line 16, in <module> from c7n_gcp.query import QueryResourceManager, TypeInfo File "/user_code/c7n_gcp/query.py", line 23, in <module> from c7n.filters import FilterRegistry File "/user_code/c7n/filters/init.py", line 32, in <module> from .securityhub import SecurityHubFindingFilter File "/user_code/c7n/filters/securityhub.py", line 19, in <module> from c7n.resources import aws File "/user_code/c7n/resources/aws.py", line 31, in <module> import boto3 ModuleNotFoundError: No module named 'boto3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/filters/securityhub.py`
Content:
```
1 # Copyright 2019 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 from c7n.utils import local_session, type_schema
17 from .core import Filter
18 from c7n.manager import resources
19 from c7n.resources import aws
20
21
22 class SecurityHubFindingFilter(Filter):
23 """Check if there are Security Hub Findings related to the resources
24 """
25 schema = type_schema(
26 'finding',
27 # Many folks do an aggregator region, allow them to use that
28 # for filtering.
29 region={'type': 'string'},
30 query={'type': 'object'})
31
32 permissions = ('securityhub:GetFindings',)
33 annotation_key = 'c7n:finding-filter'
34 query_shape = 'AwsSecurityFindingFilters'
35
36 def validate(self):
37 query = self.data.get('query')
38 if query:
39 aws.shape_validate(query, self.query_shape, 'securityhub')
40
41 def process(self, resources, event=None):
42 client = local_session(
43 self.manager.session_factory).client(
44 'securityhub', region_name=self.data.get('region'))
45 found = []
46 params = dict(self.data.get('query', {}))
47
48 for r_arn, resource in zip(self.manager.get_arns(resources), resources):
49 params['ResourceId'] = [{"Value": r_arn, "Comparison": "EQUALS"}]
50 findings = client.get_findings(Filters=params).get("Findings")
51 if len(findings) > 0:
52 resource[self.annotation_key] = findings
53 found.append(resource)
54 return found
55
56 @classmethod
57 def register_resources(klass, registry, resource_class):
58 """ meta model subscriber on resource registration.
59
60 SecurityHub Findings Filter
61 """
62 for rtype, resource_manager in registry.items():
63 if not resource_manager.has_arn():
64 continue
65 if 'post-finding' in resource_manager.action_registry:
66 continue
67 resource_class.filter_registry.register('finding', klass)
68
69
70 resources.subscribe(resources.EVENT_REGISTER, SecurityHubFindingFilter.register_resources)
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/c7n/filters/securityhub.py b/c7n/filters/securityhub.py
--- a/c7n/filters/securityhub.py
+++ b/c7n/filters/securityhub.py
@@ -16,7 +16,6 @@
from c7n.utils import local_session, type_schema
from .core import Filter
from c7n.manager import resources
-from c7n.resources import aws
class SecurityHubFindingFilter(Filter):
@@ -36,6 +35,7 @@
def validate(self):
query = self.data.get('query')
if query:
+ from c7n.resources import aws
aws.shape_validate(query, self.query_shape, 'securityhub')
def process(self, resources, event=None):
|
{"golden_diff": "diff --git a/c7n/filters/securityhub.py b/c7n/filters/securityhub.py\n--- a/c7n/filters/securityhub.py\n+++ b/c7n/filters/securityhub.py\n@@ -16,7 +16,6 @@\n from c7n.utils import local_session, type_schema\n from .core import Filter\n from c7n.manager import resources\n-from c7n.resources import aws\n \n \n class SecurityHubFindingFilter(Filter):\n@@ -36,6 +35,7 @@\n def validate(self):\n query = self.data.get('query')\n if query:\n+ from c7n.resources import aws\n aws.shape_validate(query, self.query_shape, 'securityhub')\n \n def process(self, resources, event=None):\n", "issue": "gcp serverless runtime error on implicit boto dependency\nreported in gitter, gcp functions should not need to depend on boto3, looks like some of the securityhub work caused an implicit dependency on boto3.\r\n\r\n```\r\ntextPayload: \"ModuleNotFoundError: No module named 'boto3'\" - Getting this error for the cloud function to stop a instance in GCP\r\ninstance-off\r\nqte7iow5dhzi\r\nTraceback (most recent call last): File \"/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py\", line 346, in run_http_function result = _function_handler.invoke_user_function(flask.request) File \"/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py\", line 217, in invoke_user_function return call_user_function(request_or_event) File \"/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py\", line 210, in call_user_function return self._user_function(request_or_event) File \"/user_code/main.py\", line 21, in run from c7n_gcp.handler import run File \"/user_code/c7n_gcp/handler.py\", line 24, in <module> from c7n_gcp.entry import initialize_gcp File \"/user_code/c7n_gcp/entry.py\", line 18, in <module> import c7n_gcp.resources.bigquery File \"/user_code/c7n_gcp/resources/bigquery.py\", line 16, in <module> from c7n_gcp.query import QueryResourceManager, TypeInfo File \"/user_code/c7n_gcp/query.py\", line 23, in <module> from c7n.filters import FilterRegistry File \"/user_code/c7n/filters/init.py\", line 32, in <module> from .securityhub import SecurityHubFindingFilter File \"/user_code/c7n/filters/securityhub.py\", line 19, in <module> from c7n.resources import aws File \"/user_code/c7n/resources/aws.py\", line 31, in <module> import boto3 ModuleNotFoundError: No module named 'boto3\r\n```\n", "before_files": [{"content": "# Copyright 2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.utils import local_session, type_schema\nfrom .core import Filter\nfrom c7n.manager import resources\nfrom c7n.resources import aws\n\n\nclass SecurityHubFindingFilter(Filter):\n \"\"\"Check if there are Security Hub Findings related to the resources\n \"\"\"\n schema = type_schema(\n 'finding',\n # Many folks do an aggregator region, allow them to use that\n # for filtering.\n region={'type': 'string'},\n query={'type': 'object'})\n\n permissions = ('securityhub:GetFindings',)\n annotation_key = 'c7n:finding-filter'\n query_shape = 'AwsSecurityFindingFilters'\n\n def validate(self):\n query = self.data.get('query')\n if query:\n aws.shape_validate(query, self.query_shape, 'securityhub')\n\n def process(self, resources, event=None):\n client = local_session(\n self.manager.session_factory).client(\n 'securityhub', region_name=self.data.get('region'))\n found = []\n params = dict(self.data.get('query', {}))\n\n for r_arn, resource in zip(self.manager.get_arns(resources), resources):\n params['ResourceId'] = [{\"Value\": r_arn, \"Comparison\": \"EQUALS\"}]\n findings = client.get_findings(Filters=params).get(\"Findings\")\n if len(findings) > 0:\n resource[self.annotation_key] = findings\n found.append(resource)\n return found\n\n @classmethod\n def register_resources(klass, registry, resource_class):\n \"\"\" meta model subscriber on resource registration.\n\n SecurityHub Findings Filter\n \"\"\"\n for rtype, resource_manager in registry.items():\n if not resource_manager.has_arn():\n continue\n if 'post-finding' in resource_manager.action_registry:\n continue\n resource_class.filter_registry.register('finding', klass)\n\n\nresources.subscribe(resources.EVENT_REGISTER, SecurityHubFindingFilter.register_resources)\n", "path": "c7n/filters/securityhub.py"}], "after_files": [{"content": "# Copyright 2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.utils import local_session, type_schema\nfrom .core import Filter\nfrom c7n.manager import resources\n\n\nclass SecurityHubFindingFilter(Filter):\n \"\"\"Check if there are Security Hub Findings related to the resources\n \"\"\"\n schema = type_schema(\n 'finding',\n # Many folks do an aggregator region, allow them to use that\n # for filtering.\n region={'type': 'string'},\n query={'type': 'object'})\n\n permissions = ('securityhub:GetFindings',)\n annotation_key = 'c7n:finding-filter'\n query_shape = 'AwsSecurityFindingFilters'\n\n def validate(self):\n query = self.data.get('query')\n if query:\n from c7n.resources import aws\n aws.shape_validate(query, self.query_shape, 'securityhub')\n\n def process(self, resources, event=None):\n client = local_session(\n self.manager.session_factory).client(\n 'securityhub', region_name=self.data.get('region'))\n found = []\n params = dict(self.data.get('query', {}))\n\n for r_arn, resource in zip(self.manager.get_arns(resources), resources):\n params['ResourceId'] = [{\"Value\": r_arn, \"Comparison\": \"EQUALS\"}]\n findings = client.get_findings(Filters=params).get(\"Findings\")\n if len(findings) > 0:\n resource[self.annotation_key] = findings\n found.append(resource)\n return found\n\n @classmethod\n def register_resources(klass, registry, resource_class):\n \"\"\" meta model subscriber on resource registration.\n\n SecurityHub Findings Filter\n \"\"\"\n for rtype, resource_manager in registry.items():\n if not resource_manager.has_arn():\n continue\n if 'post-finding' in resource_manager.action_registry:\n continue\n resource_class.filter_registry.register('finding', klass)\n\n\nresources.subscribe(resources.EVENT_REGISTER, SecurityHubFindingFilter.register_resources)\n", "path": "c7n/filters/securityhub.py"}]}
| 1,411 | 163 |
gh_patches_debug_20277
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-1080
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reduce detail level of timestamp on posts
**Is your feature request related to a problem? Please describe.**
I think the time when a post was posted is a tad too detailed. For posts in the last 24h, it changes every time you refresh.

**Describe the solution you'd like**
I think the firstmost unit would be enough.
Also, after a few days (I suggest 3), the date (Apr 28) rather than "2 weeks(, 4 days in the current version)" seems a bit more helpful. After 1 year, the date could be shown in "Apr 2021",
This is subjective of course, but imho Bookwyrm is a platform where the "when" doesn't really matter (in comparison to e.g. Mastodon where many are posting news and other stuff where the temporal context is more important).
**Describe alternatives you've considered**
Hovering over the time could show the exact time as a tooltip. I think of this rather as an addition than an alternative and think both would complement each other.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/templatetags/status_display.py`
Content:
```
1 """ template filters """
2 from django import template
3
4 from bookwyrm import models
5 from bookwyrm.templatetags.utilities import get_user_identifier
6
7
8 register = template.Library()
9
10
11 @register.filter(name="mentions")
12 def get_mentions(status, user):
13 """people to @ in a reply: the parent and all mentions"""
14 mentions = set([status.user] + list(status.mention_users.all()))
15 return (
16 " ".join("@" + get_user_identifier(m) for m in mentions if not m == user) + " "
17 )
18
19
20 @register.filter(name="replies")
21 def get_replies(status):
22 """get all direct replies to a status"""
23 # TODO: this limit could cause problems
24 return models.Status.objects.filter(
25 reply_parent=status,
26 deleted=False,
27 ).select_subclasses()[:10]
28
29
30 @register.filter(name="parent")
31 def get_parent(status):
32 """get the reply parent for a status"""
33 return (
34 models.Status.objects.filter(id=status.reply_parent_id)
35 .select_subclasses()
36 .get()
37 )
38
39
40 @register.filter(name="boosted_status")
41 def get_boosted(boost):
42 """load a boosted status. have to do this or it won't get foreign keys"""
43 return models.Status.objects.select_subclasses().get(id=boost.boosted_status.id)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/templatetags/status_display.py b/bookwyrm/templatetags/status_display.py
--- a/bookwyrm/templatetags/status_display.py
+++ b/bookwyrm/templatetags/status_display.py
@@ -1,6 +1,8 @@
""" template filters """
+from dateutil.relativedelta import relativedelta
from django import template
-
+from django.contrib.humanize.templatetags.humanize import naturaltime, naturalday
+from django.utils import timezone
from bookwyrm import models
from bookwyrm.templatetags.utilities import get_user_identifier
@@ -41,3 +43,17 @@
def get_boosted(boost):
"""load a boosted status. have to do this or it won't get foreign keys"""
return models.Status.objects.select_subclasses().get(id=boost.boosted_status.id)
+
+
[email protected](name="published_date")
+def get_published_date(date):
+ """less verbose combo of humanize filters"""
+ if not date:
+ return ""
+ now = timezone.now()
+ delta = relativedelta(now, date)
+ if delta.years:
+ return naturalday(date)
+ if delta.days:
+ return naturalday(date, "M j")
+ return naturaltime(date)
|
{"golden_diff": "diff --git a/bookwyrm/templatetags/status_display.py b/bookwyrm/templatetags/status_display.py\n--- a/bookwyrm/templatetags/status_display.py\n+++ b/bookwyrm/templatetags/status_display.py\n@@ -1,6 +1,8 @@\n \"\"\" template filters \"\"\"\n+from dateutil.relativedelta import relativedelta\n from django import template\n-\n+from django.contrib.humanize.templatetags.humanize import naturaltime, naturalday\n+from django.utils import timezone\n from bookwyrm import models\n from bookwyrm.templatetags.utilities import get_user_identifier\n \n@@ -41,3 +43,17 @@\n def get_boosted(boost):\n \"\"\"load a boosted status. have to do this or it won't get foreign keys\"\"\"\n return models.Status.objects.select_subclasses().get(id=boost.boosted_status.id)\n+\n+\[email protected](name=\"published_date\")\n+def get_published_date(date):\n+ \"\"\"less verbose combo of humanize filters\"\"\"\n+ if not date:\n+ return \"\"\n+ now = timezone.now()\n+ delta = relativedelta(now, date)\n+ if delta.years:\n+ return naturalday(date)\n+ if delta.days:\n+ return naturalday(date, \"M j\")\n+ return naturaltime(date)\n", "issue": "Reduce detail level of timestamp on posts\n**Is your feature request related to a problem? Please describe.**\r\nI think the time when a post was posted is a tad too detailed. For posts in the last 24h, it changes every time you refresh.\r\n\r\n\r\n**Describe the solution you'd like**\r\nI think the firstmost unit would be enough.\r\n\r\nAlso, after a few days (I suggest 3), the date (Apr 28) rather than \"2 weeks(, 4 days in the current version)\" seems a bit more helpful. After 1 year, the date could be shown in \"Apr 2021\",\r\n\r\nThis is subjective of course, but imho Bookwyrm is a platform where the \"when\" doesn't really matter (in comparison to e.g. Mastodon where many are posting news and other stuff where the temporal context is more important). \r\n\r\n**Describe alternatives you've considered**\r\nHovering over the time could show the exact time as a tooltip. I think of this rather as an addition than an alternative and think both would complement each other.\n", "before_files": [{"content": "\"\"\" template filters \"\"\"\nfrom django import template\n\nfrom bookwyrm import models\nfrom bookwyrm.templatetags.utilities import get_user_identifier\n\n\nregister = template.Library()\n\n\[email protected](name=\"mentions\")\ndef get_mentions(status, user):\n \"\"\"people to @ in a reply: the parent and all mentions\"\"\"\n mentions = set([status.user] + list(status.mention_users.all()))\n return (\n \" \".join(\"@\" + get_user_identifier(m) for m in mentions if not m == user) + \" \"\n )\n\n\[email protected](name=\"replies\")\ndef get_replies(status):\n \"\"\"get all direct replies to a status\"\"\"\n # TODO: this limit could cause problems\n return models.Status.objects.filter(\n reply_parent=status,\n deleted=False,\n ).select_subclasses()[:10]\n\n\[email protected](name=\"parent\")\ndef get_parent(status):\n \"\"\"get the reply parent for a status\"\"\"\n return (\n models.Status.objects.filter(id=status.reply_parent_id)\n .select_subclasses()\n .get()\n )\n\n\[email protected](name=\"boosted_status\")\ndef get_boosted(boost):\n \"\"\"load a boosted status. have to do this or it won't get foreign keys\"\"\"\n return models.Status.objects.select_subclasses().get(id=boost.boosted_status.id)\n", "path": "bookwyrm/templatetags/status_display.py"}], "after_files": [{"content": "\"\"\" template filters \"\"\"\nfrom dateutil.relativedelta import relativedelta\nfrom django import template\nfrom django.contrib.humanize.templatetags.humanize import naturaltime, naturalday\nfrom django.utils import timezone\nfrom bookwyrm import models\nfrom bookwyrm.templatetags.utilities import get_user_identifier\n\n\nregister = template.Library()\n\n\[email protected](name=\"mentions\")\ndef get_mentions(status, user):\n \"\"\"people to @ in a reply: the parent and all mentions\"\"\"\n mentions = set([status.user] + list(status.mention_users.all()))\n return (\n \" \".join(\"@\" + get_user_identifier(m) for m in mentions if not m == user) + \" \"\n )\n\n\[email protected](name=\"replies\")\ndef get_replies(status):\n \"\"\"get all direct replies to a status\"\"\"\n # TODO: this limit could cause problems\n return models.Status.objects.filter(\n reply_parent=status,\n deleted=False,\n ).select_subclasses()[:10]\n\n\[email protected](name=\"parent\")\ndef get_parent(status):\n \"\"\"get the reply parent for a status\"\"\"\n return (\n models.Status.objects.filter(id=status.reply_parent_id)\n .select_subclasses()\n .get()\n )\n\n\[email protected](name=\"boosted_status\")\ndef get_boosted(boost):\n \"\"\"load a boosted status. have to do this or it won't get foreign keys\"\"\"\n return models.Status.objects.select_subclasses().get(id=boost.boosted_status.id)\n\n\[email protected](name=\"published_date\")\ndef get_published_date(date):\n \"\"\"less verbose combo of humanize filters\"\"\"\n if not date:\n return \"\"\n now = timezone.now()\n delta = relativedelta(now, date)\n if delta.years:\n return naturalday(date)\n if delta.days:\n return naturalday(date, \"M j\")\n return naturaltime(date)\n", "path": "bookwyrm/templatetags/status_display.py"}]}
| 910 | 291 |
gh_patches_debug_35071
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-53
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Auto release on PyPi on tags
General interest in that? Should be pretty easy with GitHub Actions, only have to set the a Pypi API key on your end.
Example: https://github.com/microsoft/playwright-python/new/master?filename=.github%2Fworkflows%2Fpython-publish.yml&workflow_template=python-publish
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `upload_package.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import subprocess
16
17 subprocess.run("python -m twine upload dist/*", shell=True)
18
```
Path: `setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import setuptools
16
17 with open("README.md", "r", encoding="utf-8") as fh:
18 long_description = fh.read()
19
20 setuptools.setup(
21 name="playwright",
22 version="0.0.3",
23 author="Microsoft Corporation",
24 author_email="",
25 description="A high-level API to automate web browsers",
26 long_description=long_description,
27 long_description_content_type="text/markdown",
28 url="https://github.com/Microsoft/playwright-python",
29 packages=setuptools.find_packages(),
30 include_package_data=True,
31 install_requires=["pyee", "typing-extensions",],
32 classifiers=[
33 "Topic :: Software Development :: Testing",
34 "Topic :: Internet :: WWW/HTTP :: Browsers",
35 "Intended Audience :: Developers",
36 "Programming Language :: Python :: 3",
37 "Programming Language :: Python :: 3.7",
38 "Programming Language :: Python :: 3.8",
39 "License :: OSI Approved :: Apache Software License",
40 "Operating System :: OS Independent",
41 ],
42 python_requires=">=3.7",
43 )
44
```
Path: `playwright/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from playwright.main import playwright_object
16 import playwright.helper as helper
17
18 chromium = playwright_object.chromium
19 firefox = playwright_object.firefox
20 webkit = playwright_object.webkit
21 devices = playwright_object.devices
22 browser_types = playwright_object.browser_types
23 Error = helper.Error
24 TimeoutError = helper.TimeoutError
25
26 __all__ = [
27 "browser_types",
28 "chromium",
29 "firefox",
30 "webkit",
31 "devices",
32 "Error",
33 "TimeoutError",
34 ]
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/playwright/__init__.py b/playwright/__init__.py
--- a/playwright/__init__.py
+++ b/playwright/__init__.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from playwright._repo_version import version as __version__ # noqa:F401
from playwright.main import playwright_object
import playwright.helper as helper
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,16 +19,15 @@
setuptools.setup(
name="playwright",
- version="0.0.3",
author="Microsoft Corporation",
author_email="",
description="A high-level API to automate web browsers",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Microsoft/playwright-python",
- packages=setuptools.find_packages(),
+ packages=["playwright"],
include_package_data=True,
- install_requires=["pyee", "typing-extensions",],
+ install_requires=["pyee", "typing-extensions"],
classifiers=[
"Topic :: Software Development :: Testing",
"Topic :: Internet :: WWW/HTTP :: Browsers",
@@ -40,4 +39,10 @@
"Operating System :: OS Independent",
],
python_requires=">=3.7",
+ use_scm_version={
+ "version_scheme": "post-release",
+ "write_to": "playwright/_repo_version.py",
+ "write_to_template": 'version = "{version}"\n',
+ },
+ setup_requires=["setuptools_scm"],
)
diff --git a/upload_package.py b/upload_package.py
deleted file mode 100644
--- a/upload_package.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import subprocess
-
-subprocess.run("python -m twine upload dist/*", shell=True)
|
{"golden_diff": "diff --git a/playwright/__init__.py b/playwright/__init__.py\n--- a/playwright/__init__.py\n+++ b/playwright/__init__.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from playwright._repo_version import version as __version__ # noqa:F401\n from playwright.main import playwright_object\n import playwright.helper as helper\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,16 +19,15 @@\n \n setuptools.setup(\n name=\"playwright\",\n- version=\"0.0.3\",\n author=\"Microsoft Corporation\",\n author_email=\"\",\n description=\"A high-level API to automate web browsers\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Microsoft/playwright-python\",\n- packages=setuptools.find_packages(),\n+ packages=[\"playwright\"],\n include_package_data=True,\n- install_requires=[\"pyee\", \"typing-extensions\",],\n+ install_requires=[\"pyee\", \"typing-extensions\"],\n classifiers=[\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Internet :: WWW/HTTP :: Browsers\",\n@@ -40,4 +39,10 @@\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\",\n+ use_scm_version={\n+ \"version_scheme\": \"post-release\",\n+ \"write_to\": \"playwright/_repo_version.py\",\n+ \"write_to_template\": 'version = \"{version}\"\\n',\n+ },\n+ setup_requires=[\"setuptools_scm\"],\n )\ndiff --git a/upload_package.py b/upload_package.py\ndeleted file mode 100644\n--- a/upload_package.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-# Copyright (c) Microsoft Corporation.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-import subprocess\n-\n-subprocess.run(\"python -m twine upload dist/*\", shell=True)\n", "issue": "Auto release on PyPi on tags\nGeneral interest in that? Should be pretty easy with GitHub Actions, only have to set the a Pypi API key on your end.\r\n\r\nExample: https://github.com/microsoft/playwright-python/new/master?filename=.github%2Fworkflows%2Fpython-publish.yml&workflow_template=python-publish\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\n\nsubprocess.run(\"python -m twine upload dist/*\", shell=True)\n", "path": "upload_package.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"playwright\",\n version=\"0.0.3\",\n author=\"Microsoft Corporation\",\n author_email=\"\",\n description=\"A high-level API to automate web browsers\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Microsoft/playwright-python\",\n packages=setuptools.find_packages(),\n include_package_data=True,\n install_requires=[\"pyee\", \"typing-extensions\",],\n classifiers=[\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Internet :: WWW/HTTP :: Browsers\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\",\n)\n", "path": "setup.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom playwright.main import playwright_object\nimport playwright.helper as helper\n\nchromium = playwright_object.chromium\nfirefox = playwright_object.firefox\nwebkit = playwright_object.webkit\ndevices = playwright_object.devices\nbrowser_types = playwright_object.browser_types\nError = helper.Error\nTimeoutError = helper.TimeoutError\n\n__all__ = [\n \"browser_types\",\n \"chromium\",\n \"firefox\",\n \"webkit\",\n \"devices\",\n \"Error\",\n \"TimeoutError\",\n]\n", "path": "playwright/__init__.py"}], "after_files": [{"content": null, "path": "upload_package.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"playwright\",\n author=\"Microsoft Corporation\",\n author_email=\"\",\n description=\"A high-level API to automate web browsers\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Microsoft/playwright-python\",\n packages=[\"playwright\"],\n include_package_data=True,\n install_requires=[\"pyee\", \"typing-extensions\"],\n classifiers=[\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Internet :: WWW/HTTP :: Browsers\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\",\n use_scm_version={\n \"version_scheme\": \"post-release\",\n \"write_to\": \"playwright/_repo_version.py\",\n \"write_to_template\": 'version = \"{version}\"\\n',\n },\n setup_requires=[\"setuptools_scm\"],\n)\n", "path": "setup.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom playwright._repo_version import version as __version__ # noqa:F401\nfrom playwright.main import playwright_object\nimport playwright.helper as helper\n\nchromium = playwright_object.chromium\nfirefox = playwright_object.firefox\nwebkit = playwright_object.webkit\ndevices = playwright_object.devices\nbrowser_types = playwright_object.browser_types\nError = helper.Error\nTimeoutError = helper.TimeoutError\n\n__all__ = [\n \"browser_types\",\n \"chromium\",\n \"firefox\",\n \"webkit\",\n \"devices\",\n \"Error\",\n \"TimeoutError\",\n]\n", "path": "playwright/__init__.py"}]}
| 1,251 | 579 |
gh_patches_debug_36108
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1463
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User creation in the admin is broken
Sentry Issue: [CONCREXIT-3F](https://sentry.io/organizations/thalia/issues/1844597243/?referrer=github_integration)
```
FieldError: Unknown field(s) (password2, password1) specified for User
File "django/contrib/admin/options.py", line 702, in get_form
return modelform_factory(self.model, **defaults)
File "django/forms/models.py", line 554, in modelform_factory
return type(form)(class_name, (form,), form_class_attrs)
File "django/forms/models.py", line 267, in __new__
raise FieldError(message)
FieldError: Unknown field(s) (password2, password1) specified for User. Check fields/fieldsets/exclude attributes of class UserAdmin.
(15 additional frame(s) were not displayed)
...
File "django/utils/decorators.py", line 130, in _wrapped_view
response = view_func(request, *args, **kwargs)
File "django/contrib/admin/options.py", line 1522, in changeform_view
return self._changeform_view(request, object_id, form_url, extra_context)
File "django/contrib/admin/options.py", line 1555, in _changeform_view
ModelForm = self.get_form(request, obj, change=not add)
File "django/contrib/auth/admin.py", line 80, in get_form
return super().get_form(request, obj, **defaults)
File "django/contrib/admin/options.py", line 704, in get_form
raise FieldError(
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/members/forms.py`
Content:
```
1 """Forms defined by the members package."""
2 from django import forms
3 from django.conf import settings
4 from django.contrib.auth import get_user_model
5 from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm
6 from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
7 from django.core.validators import RegexValidator
8 from django.utils.translation import gettext_lazy as _
9
10 from members import emails
11 from .models import Profile
12
13
14 class ProfileForm(forms.ModelForm):
15 """Form with all the user editable fields of a Profile model."""
16
17 class Meta:
18 fields = [
19 "show_birthday",
20 "address_street",
21 "address_street2",
22 "address_postal_code",
23 "address_city",
24 "address_country",
25 "phone_number",
26 "emergency_contact",
27 "emergency_contact_phone_number",
28 "website",
29 "profile_description",
30 "nickname",
31 "initials",
32 "display_name_preference",
33 "photo",
34 "receive_optin",
35 "receive_newsletter",
36 "receive_magazine",
37 "email_gsuite_only",
38 ]
39 model = Profile
40
41 def __init__(self, *args, **kwargs):
42 super().__init__(*args, **kwargs)
43 if not kwargs["instance"].user.is_staff:
44 self.fields["email_gsuite_only"].widget = self.fields[
45 "email_gsuite_only"
46 ].hidden_widget()
47
48
49 class UserCreationForm(BaseUserCreationForm):
50 """Custom Form that removes the password fields from user creation and sends a welcome message when a user is created."""
51
52 # Don't forget to edit the formset in admin.py!
53 # This is a stupid quirk of the user admin.
54
55 # shadow the password fields to prevent validation errors,
56 # since we generate the passwords dynamically.
57 password1 = None
58 password2 = None
59
60 def __init__(self, *args, **kwargs):
61 super().__init__(*args, **kwargs)
62 for field in ("email", "first_name", "last_name"):
63 self.fields[field].required = True
64
65 send_welcome_email = forms.BooleanField(
66 label=_("Send welcome email"),
67 help_text=_("This email will include the generated password"),
68 required=False,
69 initial=True,
70 )
71
72 def clean(self):
73 if "username" in self.cleaned_data:
74 self.cleaned_data["username"] = self.cleaned_data["username"].lower()
75 super().clean()
76
77 def save(self, commit=True):
78 password = get_user_model().objects.make_random_password(length=15)
79 # pass the password on as if it was filled in, so that save() works
80 self.cleaned_data["password1"] = password
81 user = super().save(commit=False)
82 user.set_password(password)
83 if commit:
84 user.save()
85 if self.cleaned_data["send_welcome_email"]:
86 language = settings.LANGUAGE_CODE
87 emails.send_welcome_message(user, password, language)
88 return user
89
90 class Meta:
91 fields = ("username", "first_name", "last_name", "send_welcome_email")
92
93
94 class UserChangeForm(BaseUserChangeForm):
95 """Custom user edit form that adds fields for first/last name and email.
96
97 It also force-lowercases the username on save
98 """
99
100 username = forms.CharField(
101 label=_("Username"),
102 required=True,
103 help_text=_("Required. 64 characters or fewer. Letters and digits only."),
104 widget=forms.TextInput(attrs={"class": "vTextField", "maxlength": 64}),
105 validators=[
106 RegexValidator(
107 regex="^[a-zA-Z0-9]{1,64}$",
108 message=_(
109 "Please use 64 characters or fewer. Letters and digits only."
110 ),
111 )
112 ],
113 )
114
115 first_name = forms.CharField(
116 label=_("First name"),
117 required=True,
118 widget=forms.TextInput(attrs={"class": "vTextField", "maxlength": 30}),
119 )
120 last_name = forms.CharField(
121 label=_("Last name"),
122 required=True,
123 widget=forms.TextInput(attrs={"class": "vTextField", "maxlength": 150}),
124 )
125 email = forms.CharField(
126 label=_("Email address"),
127 required=True,
128 widget=forms.EmailInput(attrs={"class": "vTextField", "maxlength": 254}),
129 )
130
131 def clean(self):
132 if "username" in self.cleaned_data:
133 self.cleaned_data["username"] = self.cleaned_data["username"].lower()
134 super().clean()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/members/forms.py b/website/members/forms.py
--- a/website/members/forms.py
+++ b/website/members/forms.py
@@ -1,13 +1,10 @@
"""Forms defined by the members package."""
from django import forms
-from django.conf import settings
-from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm
from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
from django.core.validators import RegexValidator
from django.utils.translation import gettext_lazy as _
-from members import emails
from .models import Profile
@@ -47,48 +44,15 @@
class UserCreationForm(BaseUserCreationForm):
- """Custom Form that removes the password fields from user creation and sends a welcome message when a user is created."""
-
- # Don't forget to edit the formset in admin.py!
- # This is a stupid quirk of the user admin.
-
- # shadow the password fields to prevent validation errors,
- # since we generate the passwords dynamically.
- password1 = None
- password2 = None
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- for field in ("email", "first_name", "last_name"):
- self.fields[field].required = True
-
- send_welcome_email = forms.BooleanField(
- label=_("Send welcome email"),
- help_text=_("This email will include the generated password"),
- required=False,
- initial=True,
- )
+ """Custom Form that lowercases the username on creation."""
def clean(self):
if "username" in self.cleaned_data:
self.cleaned_data["username"] = self.cleaned_data["username"].lower()
super().clean()
- def save(self, commit=True):
- password = get_user_model().objects.make_random_password(length=15)
- # pass the password on as if it was filled in, so that save() works
- self.cleaned_data["password1"] = password
- user = super().save(commit=False)
- user.set_password(password)
- if commit:
- user.save()
- if self.cleaned_data["send_welcome_email"]:
- language = settings.LANGUAGE_CODE
- emails.send_welcome_message(user, password, language)
- return user
-
class Meta:
- fields = ("username", "first_name", "last_name", "send_welcome_email")
+ fields = ("username", "first_name", "last_name")
class UserChangeForm(BaseUserChangeForm):
|
{"golden_diff": "diff --git a/website/members/forms.py b/website/members/forms.py\n--- a/website/members/forms.py\n+++ b/website/members/forms.py\n@@ -1,13 +1,10 @@\n \"\"\"Forms defined by the members package.\"\"\"\n from django import forms\n-from django.conf import settings\n-from django.contrib.auth import get_user_model\n from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm\n from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm\n from django.core.validators import RegexValidator\n from django.utils.translation import gettext_lazy as _\n \n-from members import emails\n from .models import Profile\n \n \n@@ -47,48 +44,15 @@\n \n \n class UserCreationForm(BaseUserCreationForm):\n- \"\"\"Custom Form that removes the password fields from user creation and sends a welcome message when a user is created.\"\"\"\n-\n- # Don't forget to edit the formset in admin.py!\n- # This is a stupid quirk of the user admin.\n-\n- # shadow the password fields to prevent validation errors,\n- # since we generate the passwords dynamically.\n- password1 = None\n- password2 = None\n-\n- def __init__(self, *args, **kwargs):\n- super().__init__(*args, **kwargs)\n- for field in (\"email\", \"first_name\", \"last_name\"):\n- self.fields[field].required = True\n-\n- send_welcome_email = forms.BooleanField(\n- label=_(\"Send welcome email\"),\n- help_text=_(\"This email will include the generated password\"),\n- required=False,\n- initial=True,\n- )\n+ \"\"\"Custom Form that lowercases the username on creation.\"\"\"\n \n def clean(self):\n if \"username\" in self.cleaned_data:\n self.cleaned_data[\"username\"] = self.cleaned_data[\"username\"].lower()\n super().clean()\n \n- def save(self, commit=True):\n- password = get_user_model().objects.make_random_password(length=15)\n- # pass the password on as if it was filled in, so that save() works\n- self.cleaned_data[\"password1\"] = password\n- user = super().save(commit=False)\n- user.set_password(password)\n- if commit:\n- user.save()\n- if self.cleaned_data[\"send_welcome_email\"]:\n- language = settings.LANGUAGE_CODE\n- emails.send_welcome_message(user, password, language)\n- return user\n-\n class Meta:\n- fields = (\"username\", \"first_name\", \"last_name\", \"send_welcome_email\")\n+ fields = (\"username\", \"first_name\", \"last_name\")\n \n \n class UserChangeForm(BaseUserChangeForm):\n", "issue": "User creation in the admin is broken\nSentry Issue: [CONCREXIT-3F](https://sentry.io/organizations/thalia/issues/1844597243/?referrer=github_integration)\n\n```\nFieldError: Unknown field(s) (password2, password1) specified for User\n File \"django/contrib/admin/options.py\", line 702, in get_form\n return modelform_factory(self.model, **defaults)\n File \"django/forms/models.py\", line 554, in modelform_factory\n return type(form)(class_name, (form,), form_class_attrs)\n File \"django/forms/models.py\", line 267, in __new__\n raise FieldError(message)\n\nFieldError: Unknown field(s) (password2, password1) specified for User. Check fields/fieldsets/exclude attributes of class UserAdmin.\n(15 additional frame(s) were not displayed)\n...\n File \"django/utils/decorators.py\", line 130, in _wrapped_view\n response = view_func(request, *args, **kwargs)\n File \"django/contrib/admin/options.py\", line 1522, in changeform_view\n return self._changeform_view(request, object_id, form_url, extra_context)\n File \"django/contrib/admin/options.py\", line 1555, in _changeform_view\n ModelForm = self.get_form(request, obj, change=not add)\n File \"django/contrib/auth/admin.py\", line 80, in get_form\n return super().get_form(request, obj, **defaults)\n File \"django/contrib/admin/options.py\", line 704, in get_form\n raise FieldError(\n```\n", "before_files": [{"content": "\"\"\"Forms defined by the members package.\"\"\"\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm\nfrom django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm\nfrom django.core.validators import RegexValidator\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members import emails\nfrom .models import Profile\n\n\nclass ProfileForm(forms.ModelForm):\n \"\"\"Form with all the user editable fields of a Profile model.\"\"\"\n\n class Meta:\n fields = [\n \"show_birthday\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"phone_number\",\n \"emergency_contact\",\n \"emergency_contact_phone_number\",\n \"website\",\n \"profile_description\",\n \"nickname\",\n \"initials\",\n \"display_name_preference\",\n \"photo\",\n \"receive_optin\",\n \"receive_newsletter\",\n \"receive_magazine\",\n \"email_gsuite_only\",\n ]\n model = Profile\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not kwargs[\"instance\"].user.is_staff:\n self.fields[\"email_gsuite_only\"].widget = self.fields[\n \"email_gsuite_only\"\n ].hidden_widget()\n\n\nclass UserCreationForm(BaseUserCreationForm):\n \"\"\"Custom Form that removes the password fields from user creation and sends a welcome message when a user is created.\"\"\"\n\n # Don't forget to edit the formset in admin.py!\n # This is a stupid quirk of the user admin.\n\n # shadow the password fields to prevent validation errors,\n # since we generate the passwords dynamically.\n password1 = None\n password2 = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in (\"email\", \"first_name\", \"last_name\"):\n self.fields[field].required = True\n\n send_welcome_email = forms.BooleanField(\n label=_(\"Send welcome email\"),\n help_text=_(\"This email will include the generated password\"),\n required=False,\n initial=True,\n )\n\n def clean(self):\n if \"username\" in self.cleaned_data:\n self.cleaned_data[\"username\"] = self.cleaned_data[\"username\"].lower()\n super().clean()\n\n def save(self, commit=True):\n password = get_user_model().objects.make_random_password(length=15)\n # pass the password on as if it was filled in, so that save() works\n self.cleaned_data[\"password1\"] = password\n user = super().save(commit=False)\n user.set_password(password)\n if commit:\n user.save()\n if self.cleaned_data[\"send_welcome_email\"]:\n language = settings.LANGUAGE_CODE\n emails.send_welcome_message(user, password, language)\n return user\n\n class Meta:\n fields = (\"username\", \"first_name\", \"last_name\", \"send_welcome_email\")\n\n\nclass UserChangeForm(BaseUserChangeForm):\n \"\"\"Custom user edit form that adds fields for first/last name and email.\n\n It also force-lowercases the username on save\n \"\"\"\n\n username = forms.CharField(\n label=_(\"Username\"),\n required=True,\n help_text=_(\"Required. 64 characters or fewer. Letters and digits only.\"),\n widget=forms.TextInput(attrs={\"class\": \"vTextField\", \"maxlength\": 64}),\n validators=[\n RegexValidator(\n regex=\"^[a-zA-Z0-9]{1,64}$\",\n message=_(\n \"Please use 64 characters or fewer. Letters and digits only.\"\n ),\n )\n ],\n )\n\n first_name = forms.CharField(\n label=_(\"First name\"),\n required=True,\n widget=forms.TextInput(attrs={\"class\": \"vTextField\", \"maxlength\": 30}),\n )\n last_name = forms.CharField(\n label=_(\"Last name\"),\n required=True,\n widget=forms.TextInput(attrs={\"class\": \"vTextField\", \"maxlength\": 150}),\n )\n email = forms.CharField(\n label=_(\"Email address\"),\n required=True,\n widget=forms.EmailInput(attrs={\"class\": \"vTextField\", \"maxlength\": 254}),\n )\n\n def clean(self):\n if \"username\" in self.cleaned_data:\n self.cleaned_data[\"username\"] = self.cleaned_data[\"username\"].lower()\n super().clean()\n", "path": "website/members/forms.py"}], "after_files": [{"content": "\"\"\"Forms defined by the members package.\"\"\"\nfrom django import forms\nfrom django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm\nfrom django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm\nfrom django.core.validators import RegexValidator\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .models import Profile\n\n\nclass ProfileForm(forms.ModelForm):\n \"\"\"Form with all the user editable fields of a Profile model.\"\"\"\n\n class Meta:\n fields = [\n \"show_birthday\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"phone_number\",\n \"emergency_contact\",\n \"emergency_contact_phone_number\",\n \"website\",\n \"profile_description\",\n \"nickname\",\n \"initials\",\n \"display_name_preference\",\n \"photo\",\n \"receive_optin\",\n \"receive_newsletter\",\n \"receive_magazine\",\n \"email_gsuite_only\",\n ]\n model = Profile\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not kwargs[\"instance\"].user.is_staff:\n self.fields[\"email_gsuite_only\"].widget = self.fields[\n \"email_gsuite_only\"\n ].hidden_widget()\n\n\nclass UserCreationForm(BaseUserCreationForm):\n \"\"\"Custom Form that lowercases the username on creation.\"\"\"\n\n def clean(self):\n if \"username\" in self.cleaned_data:\n self.cleaned_data[\"username\"] = self.cleaned_data[\"username\"].lower()\n super().clean()\n\n class Meta:\n fields = (\"username\", \"first_name\", \"last_name\")\n\n\nclass UserChangeForm(BaseUserChangeForm):\n \"\"\"Custom user edit form that adds fields for first/last name and email.\n\n It also force-lowercases the username on save\n \"\"\"\n\n username = forms.CharField(\n label=_(\"Username\"),\n required=True,\n help_text=_(\"Required. 64 characters or fewer. Letters and digits only.\"),\n widget=forms.TextInput(attrs={\"class\": \"vTextField\", \"maxlength\": 64}),\n validators=[\n RegexValidator(\n regex=\"^[a-zA-Z0-9]{1,64}$\",\n message=_(\n \"Please use 64 characters or fewer. Letters and digits only.\"\n ),\n )\n ],\n )\n\n first_name = forms.CharField(\n label=_(\"First name\"),\n required=True,\n widget=forms.TextInput(attrs={\"class\": \"vTextField\", \"maxlength\": 30}),\n )\n last_name = forms.CharField(\n label=_(\"Last name\"),\n required=True,\n widget=forms.TextInput(attrs={\"class\": \"vTextField\", \"maxlength\": 150}),\n )\n email = forms.CharField(\n label=_(\"Email address\"),\n required=True,\n widget=forms.EmailInput(attrs={\"class\": \"vTextField\", \"maxlength\": 254}),\n )\n\n def clean(self):\n if \"username\" in self.cleaned_data:\n self.cleaned_data[\"username\"] = self.cleaned_data[\"username\"].lower()\n super().clean()\n", "path": "website/members/forms.py"}]}
| 1,880 | 575 |
gh_patches_debug_166
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-9516
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2024.4.0 LongRunningTransaction
**Describe the bug**
Prometheus alert for a long running transaction.
I think the transaction is
```
SELECT pg_advisory_unlock($1)
```
**To Reproduce**
No activity, sitting idle
**Expected behavior**
Shouldn't have the alert
**Screenshots**
**Logs**
**Version and Deployment (please complete the following information):**
2024.4.0 kubernetes
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lifecycle/migrate.py`
Content:
```
1 #!/usr/bin/env python
2 """System Migration handler"""
3 from importlib.util import module_from_spec, spec_from_file_location
4 from inspect import getmembers, isclass
5 from os import environ, system
6 from pathlib import Path
7 from typing import Any
8
9 from psycopg import Connection, Cursor, connect
10 from structlog.stdlib import get_logger
11
12 from authentik.lib.config import CONFIG
13
14 LOGGER = get_logger()
15 ADV_LOCK_UID = 1000
16 LOCKED = False
17
18
19 class CommandError(Exception):
20 """Error raised when a system_crit command fails"""
21
22
23 class BaseMigration:
24 """Base System Migration"""
25
26 cur: Cursor
27 con: Connection
28
29 def __init__(self, cur: Any, con: Any):
30 self.cur = cur
31 self.con = con
32
33 def system_crit(self, command: str):
34 """Run system command"""
35 LOGGER.debug("Running system_crit command", command=command)
36 retval = system(command) # nosec
37 if retval != 0:
38 raise CommandError("Migration error")
39
40 def fake_migration(self, *app_migration: tuple[str, str]):
41 """Fake apply a list of migrations, arguments are
42 expected to be tuples of (app_label, migration_name)"""
43 for app, _migration in app_migration:
44 self.system_crit(f"./manage.py migrate {app} {_migration} --fake")
45
46 def needs_migration(self) -> bool:
47 """Return true if Migration needs to be run"""
48 return False
49
50 def run(self):
51 """Run the actual migration"""
52
53
54 def wait_for_lock(cursor: Cursor):
55 """lock an advisory lock to prevent multiple instances from migrating at once"""
56 LOGGER.info("waiting to acquire database lock")
57 cursor.execute("SELECT pg_advisory_lock(%s)", (ADV_LOCK_UID,))
58
59 global LOCKED # noqa: PLW0603
60 LOCKED = True
61
62
63 def release_lock(cursor: Cursor):
64 """Release database lock"""
65 if not LOCKED:
66 return
67 LOGGER.info("releasing database lock")
68 cursor.execute("SELECT pg_advisory_unlock(%s)", (ADV_LOCK_UID,))
69
70
71 def run_migrations():
72 conn = connect(
73 dbname=CONFIG.get("postgresql.name"),
74 user=CONFIG.get("postgresql.user"),
75 password=CONFIG.get("postgresql.password"),
76 host=CONFIG.get("postgresql.host"),
77 port=CONFIG.get_int("postgresql.port"),
78 sslmode=CONFIG.get("postgresql.sslmode"),
79 sslrootcert=CONFIG.get("postgresql.sslrootcert"),
80 sslcert=CONFIG.get("postgresql.sslcert"),
81 sslkey=CONFIG.get("postgresql.sslkey"),
82 )
83 curr = conn.cursor()
84 try:
85 for migration_path in Path(__file__).parent.absolute().glob("system_migrations/*.py"):
86 spec = spec_from_file_location("lifecycle.system_migrations", migration_path)
87 if not spec:
88 continue
89 mod = module_from_spec(spec)
90 spec.loader.exec_module(mod)
91
92 for name, sub in getmembers(mod, isclass):
93 if name != "Migration":
94 continue
95 migration = sub(curr, conn)
96 if migration.needs_migration():
97 wait_for_lock(curr)
98 LOGGER.info("Migration needs to be applied", migration=migration_path.name)
99 migration.run()
100 LOGGER.info("Migration finished applying", migration=migration_path.name)
101 release_lock(curr)
102 LOGGER.info("applying django migrations")
103 environ.setdefault("DJANGO_SETTINGS_MODULE", "authentik.root.settings")
104 wait_for_lock(curr)
105 try:
106 from django.core.management import execute_from_command_line
107 except ImportError as exc:
108 raise ImportError(
109 "Couldn't import Django. Are you sure it's installed and "
110 "available on your PYTHONPATH environment variable? Did you "
111 "forget to activate a virtual environment?"
112 ) from exc
113 execute_from_command_line(["", "migrate_schemas"])
114 execute_from_command_line(["", "migrate_schemas", "--schema", "template", "--tenant"])
115 execute_from_command_line(
116 ["", "check"] + ([] if CONFIG.get_bool("debug") else ["--deploy"])
117 )
118 finally:
119 release_lock(curr)
120
121
122 if __name__ == "__main__":
123 run_migrations()
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lifecycle/migrate.py b/lifecycle/migrate.py
--- a/lifecycle/migrate.py
+++ b/lifecycle/migrate.py
@@ -117,6 +117,8 @@
)
finally:
release_lock(curr)
+ curr.close()
+ conn.close()
if __name__ == "__main__":
|
{"golden_diff": "diff --git a/lifecycle/migrate.py b/lifecycle/migrate.py\n--- a/lifecycle/migrate.py\n+++ b/lifecycle/migrate.py\n@@ -117,6 +117,8 @@\n )\n finally:\n release_lock(curr)\n+ curr.close()\n+ conn.close()\n \n \n if __name__ == \"__main__\":\n", "issue": "2024.4.0 LongRunningTransaction\n**Describe the bug**\r\nPrometheus alert for a long running transaction.\r\n\r\nI think the transaction is\r\n\r\n```\r\nSELECT pg_advisory_unlock($1)\r\n```\r\n\r\n**To Reproduce**\r\nNo activity, sitting idle\r\n\r\n**Expected behavior**\r\nShouldn't have the alert\r\n\r\n**Screenshots**\r\n\r\n**Logs**\r\n\r\n**Version and Deployment (please complete the following information):**\r\n2024.4.0 kubernetes\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"System Migration handler\"\"\"\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom inspect import getmembers, isclass\nfrom os import environ, system\nfrom pathlib import Path\nfrom typing import Any\n\nfrom psycopg import Connection, Cursor, connect\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.config import CONFIG\n\nLOGGER = get_logger()\nADV_LOCK_UID = 1000\nLOCKED = False\n\n\nclass CommandError(Exception):\n \"\"\"Error raised when a system_crit command fails\"\"\"\n\n\nclass BaseMigration:\n \"\"\"Base System Migration\"\"\"\n\n cur: Cursor\n con: Connection\n\n def __init__(self, cur: Any, con: Any):\n self.cur = cur\n self.con = con\n\n def system_crit(self, command: str):\n \"\"\"Run system command\"\"\"\n LOGGER.debug(\"Running system_crit command\", command=command)\n retval = system(command) # nosec\n if retval != 0:\n raise CommandError(\"Migration error\")\n\n def fake_migration(self, *app_migration: tuple[str, str]):\n \"\"\"Fake apply a list of migrations, arguments are\n expected to be tuples of (app_label, migration_name)\"\"\"\n for app, _migration in app_migration:\n self.system_crit(f\"./manage.py migrate {app} {_migration} --fake\")\n\n def needs_migration(self) -> bool:\n \"\"\"Return true if Migration needs to be run\"\"\"\n return False\n\n def run(self):\n \"\"\"Run the actual migration\"\"\"\n\n\ndef wait_for_lock(cursor: Cursor):\n \"\"\"lock an advisory lock to prevent multiple instances from migrating at once\"\"\"\n LOGGER.info(\"waiting to acquire database lock\")\n cursor.execute(\"SELECT pg_advisory_lock(%s)\", (ADV_LOCK_UID,))\n\n global LOCKED # noqa: PLW0603\n LOCKED = True\n\n\ndef release_lock(cursor: Cursor):\n \"\"\"Release database lock\"\"\"\n if not LOCKED:\n return\n LOGGER.info(\"releasing database lock\")\n cursor.execute(\"SELECT pg_advisory_unlock(%s)\", (ADV_LOCK_UID,))\n\n\ndef run_migrations():\n conn = connect(\n dbname=CONFIG.get(\"postgresql.name\"),\n user=CONFIG.get(\"postgresql.user\"),\n password=CONFIG.get(\"postgresql.password\"),\n host=CONFIG.get(\"postgresql.host\"),\n port=CONFIG.get_int(\"postgresql.port\"),\n sslmode=CONFIG.get(\"postgresql.sslmode\"),\n sslrootcert=CONFIG.get(\"postgresql.sslrootcert\"),\n sslcert=CONFIG.get(\"postgresql.sslcert\"),\n sslkey=CONFIG.get(\"postgresql.sslkey\"),\n )\n curr = conn.cursor()\n try:\n for migration_path in Path(__file__).parent.absolute().glob(\"system_migrations/*.py\"):\n spec = spec_from_file_location(\"lifecycle.system_migrations\", migration_path)\n if not spec:\n continue\n mod = module_from_spec(spec)\n spec.loader.exec_module(mod)\n\n for name, sub in getmembers(mod, isclass):\n if name != \"Migration\":\n continue\n migration = sub(curr, conn)\n if migration.needs_migration():\n wait_for_lock(curr)\n LOGGER.info(\"Migration needs to be applied\", migration=migration_path.name)\n migration.run()\n LOGGER.info(\"Migration finished applying\", migration=migration_path.name)\n release_lock(curr)\n LOGGER.info(\"applying django migrations\")\n environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"authentik.root.settings\")\n wait_for_lock(curr)\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line([\"\", \"migrate_schemas\"])\n execute_from_command_line([\"\", \"migrate_schemas\", \"--schema\", \"template\", \"--tenant\"])\n execute_from_command_line(\n [\"\", \"check\"] + ([] if CONFIG.get_bool(\"debug\") else [\"--deploy\"])\n )\n finally:\n release_lock(curr)\n\n\nif __name__ == \"__main__\":\n run_migrations()\n", "path": "lifecycle/migrate.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"System Migration handler\"\"\"\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom inspect import getmembers, isclass\nfrom os import environ, system\nfrom pathlib import Path\nfrom typing import Any\n\nfrom psycopg import Connection, Cursor, connect\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.config import CONFIG\n\nLOGGER = get_logger()\nADV_LOCK_UID = 1000\nLOCKED = False\n\n\nclass CommandError(Exception):\n \"\"\"Error raised when a system_crit command fails\"\"\"\n\n\nclass BaseMigration:\n \"\"\"Base System Migration\"\"\"\n\n cur: Cursor\n con: Connection\n\n def __init__(self, cur: Any, con: Any):\n self.cur = cur\n self.con = con\n\n def system_crit(self, command: str):\n \"\"\"Run system command\"\"\"\n LOGGER.debug(\"Running system_crit command\", command=command)\n retval = system(command) # nosec\n if retval != 0:\n raise CommandError(\"Migration error\")\n\n def fake_migration(self, *app_migration: tuple[str, str]):\n \"\"\"Fake apply a list of migrations, arguments are\n expected to be tuples of (app_label, migration_name)\"\"\"\n for app, _migration in app_migration:\n self.system_crit(f\"./manage.py migrate {app} {_migration} --fake\")\n\n def needs_migration(self) -> bool:\n \"\"\"Return true if Migration needs to be run\"\"\"\n return False\n\n def run(self):\n \"\"\"Run the actual migration\"\"\"\n\n\ndef wait_for_lock(cursor: Cursor):\n \"\"\"lock an advisory lock to prevent multiple instances from migrating at once\"\"\"\n LOGGER.info(\"waiting to acquire database lock\")\n cursor.execute(\"SELECT pg_advisory_lock(%s)\", (ADV_LOCK_UID,))\n\n global LOCKED # noqa: PLW0603\n LOCKED = True\n\n\ndef release_lock(cursor: Cursor):\n \"\"\"Release database lock\"\"\"\n if not LOCKED:\n return\n LOGGER.info(\"releasing database lock\")\n cursor.execute(\"SELECT pg_advisory_unlock(%s)\", (ADV_LOCK_UID,))\n\n\ndef run_migrations():\n conn = connect(\n dbname=CONFIG.get(\"postgresql.name\"),\n user=CONFIG.get(\"postgresql.user\"),\n password=CONFIG.get(\"postgresql.password\"),\n host=CONFIG.get(\"postgresql.host\"),\n port=CONFIG.get_int(\"postgresql.port\"),\n sslmode=CONFIG.get(\"postgresql.sslmode\"),\n sslrootcert=CONFIG.get(\"postgresql.sslrootcert\"),\n sslcert=CONFIG.get(\"postgresql.sslcert\"),\n sslkey=CONFIG.get(\"postgresql.sslkey\"),\n )\n curr = conn.cursor()\n try:\n for migration_path in Path(__file__).parent.absolute().glob(\"system_migrations/*.py\"):\n spec = spec_from_file_location(\"lifecycle.system_migrations\", migration_path)\n if not spec:\n continue\n mod = module_from_spec(spec)\n spec.loader.exec_module(mod)\n\n for name, sub in getmembers(mod, isclass):\n if name != \"Migration\":\n continue\n migration = sub(curr, conn)\n if migration.needs_migration():\n wait_for_lock(curr)\n LOGGER.info(\"Migration needs to be applied\", migration=migration_path.name)\n migration.run()\n LOGGER.info(\"Migration finished applying\", migration=migration_path.name)\n release_lock(curr)\n LOGGER.info(\"applying django migrations\")\n environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"authentik.root.settings\")\n wait_for_lock(curr)\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line([\"\", \"migrate_schemas\"])\n execute_from_command_line([\"\", \"migrate_schemas\", \"--schema\", \"template\", \"--tenant\"])\n execute_from_command_line(\n [\"\", \"check\"] + ([] if CONFIG.get_bool(\"debug\") else [\"--deploy\"])\n )\n finally:\n release_lock(curr)\n curr.close()\n conn.close()\n\n\nif __name__ == \"__main__\":\n run_migrations()\n", "path": "lifecycle/migrate.py"}]}
| 1,540 | 75 |
gh_patches_debug_31693
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-10923
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Security Vulnerability
Please check it here https://huntr.com/bounties/e3d7a994-bfd6-4772-ac9b-9aee1aa16a5f/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/store/artifact/local_artifact_repo.py`
Content:
```
1 import os
2 import shutil
3
4 from mlflow.store.artifact.artifact_repo import ArtifactRepository, verify_artifact_path
5 from mlflow.utils.file_utils import (
6 get_file_info,
7 list_all,
8 local_file_uri_to_path,
9 mkdir,
10 relative_path_to_artifact_path,
11 )
12
13
14 class LocalArtifactRepository(ArtifactRepository):
15 """Stores artifacts as files in a local directory."""
16
17 def __init__(self, *args, **kwargs):
18 super().__init__(*args, **kwargs)
19 self._artifact_dir = local_file_uri_to_path(self.artifact_uri)
20
21 @property
22 def artifact_dir(self):
23 return self._artifact_dir
24
25 def log_artifact(self, local_file, artifact_path=None):
26 verify_artifact_path(artifact_path)
27 # NOTE: The artifact_path is expected to be in posix format.
28 # Posix paths work fine on windows but just in case we normalize it here.
29 if artifact_path:
30 artifact_path = os.path.normpath(artifact_path)
31
32 artifact_dir = (
33 os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir
34 )
35 if not os.path.exists(artifact_dir):
36 mkdir(artifact_dir)
37 try:
38 shutil.copy2(local_file, os.path.join(artifact_dir, os.path.basename(local_file)))
39 except shutil.SameFileError:
40 pass
41
42 def _is_directory(self, artifact_path):
43 # NOTE: The path is expected to be in posix format.
44 # Posix paths work fine on windows but just in case we normalize it here.
45 path = os.path.normpath(artifact_path) if artifact_path else ""
46 list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir
47 return os.path.isdir(list_dir)
48
49 def log_artifacts(self, local_dir, artifact_path=None):
50 verify_artifact_path(artifact_path)
51 # NOTE: The artifact_path is expected to be in posix format.
52 # Posix paths work fine on windows but just in case we normalize it here.
53 if artifact_path:
54 artifact_path = os.path.normpath(artifact_path)
55 artifact_dir = (
56 os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir
57 )
58 if not os.path.exists(artifact_dir):
59 mkdir(artifact_dir)
60 shutil.copytree(src=local_dir, dst=artifact_dir, dirs_exist_ok=True)
61
62 def download_artifacts(self, artifact_path, dst_path=None):
63 """
64 Artifacts tracked by ``LocalArtifactRepository`` already exist on the local filesystem.
65 If ``dst_path`` is ``None``, the absolute filesystem path of the specified artifact is
66 returned. If ``dst_path`` is not ``None``, the local artifact is copied to ``dst_path``.
67
68 :param artifact_path: Relative source path to the desired artifacts.
69 :param dst_path: Absolute path of the local filesystem destination directory to which to
70 download the specified artifacts. This directory must already exist. If
71 unspecified, the absolute path of the local artifact will be returned.
72
73 :return: Absolute path of the local filesystem location containing the desired artifacts.
74 """
75 if dst_path:
76 return super().download_artifacts(artifact_path, dst_path)
77 # NOTE: The artifact_path is expected to be in posix format.
78 # Posix paths work fine on windows but just in case we normalize it here.
79 local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path))
80 if not os.path.exists(local_artifact_path):
81 raise OSError(f"No such file or directory: '{local_artifact_path}'")
82 return os.path.abspath(local_artifact_path)
83
84 def list_artifacts(self, path=None):
85 # NOTE: The path is expected to be in posix format.
86 # Posix paths work fine on windows but just in case we normalize it here.
87 if path:
88 path = os.path.normpath(path)
89 list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir
90 if os.path.isdir(list_dir):
91 artifact_files = list_all(list_dir, full_path=True)
92 infos = [
93 get_file_info(
94 f, relative_path_to_artifact_path(os.path.relpath(f, self.artifact_dir))
95 )
96 for f in artifact_files
97 ]
98 return sorted(infos, key=lambda f: f.path)
99 else:
100 return []
101
102 def _download_file(self, remote_file_path, local_path):
103 # NOTE: The remote_file_path is expected to be in posix format.
104 # Posix paths work fine on windows but just in case we normalize it here.
105 remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path))
106 shutil.copy2(remote_file_path, local_path)
107
108 def delete_artifacts(self, artifact_path=None):
109 artifact_path = local_file_uri_to_path(
110 os.path.join(self._artifact_dir, artifact_path) if artifact_path else self._artifact_dir
111 )
112
113 if os.path.exists(artifact_path):
114 shutil.rmtree(artifact_path)
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlflow/store/artifact/local_artifact_repo.py b/mlflow/store/artifact/local_artifact_repo.py
--- a/mlflow/store/artifact/local_artifact_repo.py
+++ b/mlflow/store/artifact/local_artifact_repo.py
@@ -9,6 +9,7 @@
mkdir,
relative_path_to_artifact_path,
)
+from mlflow.utils.uri import validate_path_is_safe
class LocalArtifactRepository(ArtifactRepository):
@@ -74,8 +75,9 @@
"""
if dst_path:
return super().download_artifacts(artifact_path, dst_path)
- # NOTE: The artifact_path is expected to be in posix format.
+ # NOTE: The artifact_path is expected to be a relative path in posix format.
# Posix paths work fine on windows but just in case we normalize it here.
+ artifact_path = validate_path_is_safe(artifact_path)
local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path))
if not os.path.exists(local_artifact_path):
raise OSError(f"No such file or directory: '{local_artifact_path}'")
@@ -100,8 +102,9 @@
return []
def _download_file(self, remote_file_path, local_path):
- # NOTE: The remote_file_path is expected to be in posix format.
+ # NOTE: The remote_file_path is expected to be a relative path in posix format.
# Posix paths work fine on windows but just in case we normalize it here.
+ remote_file_path = validate_path_is_safe(remote_file_path)
remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path))
shutil.copy2(remote_file_path, local_path)
|
{"golden_diff": "diff --git a/mlflow/store/artifact/local_artifact_repo.py b/mlflow/store/artifact/local_artifact_repo.py\n--- a/mlflow/store/artifact/local_artifact_repo.py\n+++ b/mlflow/store/artifact/local_artifact_repo.py\n@@ -9,6 +9,7 @@\n mkdir,\n relative_path_to_artifact_path,\n )\n+from mlflow.utils.uri import validate_path_is_safe\n \n \n class LocalArtifactRepository(ArtifactRepository):\n@@ -74,8 +75,9 @@\n \"\"\"\n if dst_path:\n return super().download_artifacts(artifact_path, dst_path)\n- # NOTE: The artifact_path is expected to be in posix format.\n+ # NOTE: The artifact_path is expected to be a relative path in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n+ artifact_path = validate_path_is_safe(artifact_path)\n local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path))\n if not os.path.exists(local_artifact_path):\n raise OSError(f\"No such file or directory: '{local_artifact_path}'\")\n@@ -100,8 +102,9 @@\n return []\n \n def _download_file(self, remote_file_path, local_path):\n- # NOTE: The remote_file_path is expected to be in posix format.\n+ # NOTE: The remote_file_path is expected to be a relative path in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n+ remote_file_path = validate_path_is_safe(remote_file_path)\n remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path))\n shutil.copy2(remote_file_path, local_path)\n", "issue": "[BUG] Security Vulnerability\nPlease check it here https://huntr.com/bounties/e3d7a994-bfd6-4772-ac9b-9aee1aa16a5f/\n", "before_files": [{"content": "import os\nimport shutil\n\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository, verify_artifact_path\nfrom mlflow.utils.file_utils import (\n get_file_info,\n list_all,\n local_file_uri_to_path,\n mkdir,\n relative_path_to_artifact_path,\n)\n\n\nclass LocalArtifactRepository(ArtifactRepository):\n \"\"\"Stores artifacts as files in a local directory.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._artifact_dir = local_file_uri_to_path(self.artifact_uri)\n\n @property\n def artifact_dir(self):\n return self._artifact_dir\n\n def log_artifact(self, local_file, artifact_path=None):\n verify_artifact_path(artifact_path)\n # NOTE: The artifact_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n if artifact_path:\n artifact_path = os.path.normpath(artifact_path)\n\n artifact_dir = (\n os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir\n )\n if not os.path.exists(artifact_dir):\n mkdir(artifact_dir)\n try:\n shutil.copy2(local_file, os.path.join(artifact_dir, os.path.basename(local_file)))\n except shutil.SameFileError:\n pass\n\n def _is_directory(self, artifact_path):\n # NOTE: The path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n path = os.path.normpath(artifact_path) if artifact_path else \"\"\n list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir\n return os.path.isdir(list_dir)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n verify_artifact_path(artifact_path)\n # NOTE: The artifact_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n if artifact_path:\n artifact_path = os.path.normpath(artifact_path)\n artifact_dir = (\n os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir\n )\n if not os.path.exists(artifact_dir):\n mkdir(artifact_dir)\n shutil.copytree(src=local_dir, dst=artifact_dir, dirs_exist_ok=True)\n\n def download_artifacts(self, artifact_path, dst_path=None):\n \"\"\"\n Artifacts tracked by ``LocalArtifactRepository`` already exist on the local filesystem.\n If ``dst_path`` is ``None``, the absolute filesystem path of the specified artifact is\n returned. If ``dst_path`` is not ``None``, the local artifact is copied to ``dst_path``.\n\n :param artifact_path: Relative source path to the desired artifacts.\n :param dst_path: Absolute path of the local filesystem destination directory to which to\n download the specified artifacts. This directory must already exist. If\n unspecified, the absolute path of the local artifact will be returned.\n\n :return: Absolute path of the local filesystem location containing the desired artifacts.\n \"\"\"\n if dst_path:\n return super().download_artifacts(artifact_path, dst_path)\n # NOTE: The artifact_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path))\n if not os.path.exists(local_artifact_path):\n raise OSError(f\"No such file or directory: '{local_artifact_path}'\")\n return os.path.abspath(local_artifact_path)\n\n def list_artifacts(self, path=None):\n # NOTE: The path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n if path:\n path = os.path.normpath(path)\n list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir\n if os.path.isdir(list_dir):\n artifact_files = list_all(list_dir, full_path=True)\n infos = [\n get_file_info(\n f, relative_path_to_artifact_path(os.path.relpath(f, self.artifact_dir))\n )\n for f in artifact_files\n ]\n return sorted(infos, key=lambda f: f.path)\n else:\n return []\n\n def _download_file(self, remote_file_path, local_path):\n # NOTE: The remote_file_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path))\n shutil.copy2(remote_file_path, local_path)\n\n def delete_artifacts(self, artifact_path=None):\n artifact_path = local_file_uri_to_path(\n os.path.join(self._artifact_dir, artifact_path) if artifact_path else self._artifact_dir\n )\n\n if os.path.exists(artifact_path):\n shutil.rmtree(artifact_path)\n", "path": "mlflow/store/artifact/local_artifact_repo.py"}], "after_files": [{"content": "import os\nimport shutil\n\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository, verify_artifact_path\nfrom mlflow.utils.file_utils import (\n get_file_info,\n list_all,\n local_file_uri_to_path,\n mkdir,\n relative_path_to_artifact_path,\n)\nfrom mlflow.utils.uri import validate_path_is_safe\n\n\nclass LocalArtifactRepository(ArtifactRepository):\n \"\"\"Stores artifacts as files in a local directory.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._artifact_dir = local_file_uri_to_path(self.artifact_uri)\n\n @property\n def artifact_dir(self):\n return self._artifact_dir\n\n def log_artifact(self, local_file, artifact_path=None):\n verify_artifact_path(artifact_path)\n # NOTE: The artifact_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n if artifact_path:\n artifact_path = os.path.normpath(artifact_path)\n\n artifact_dir = (\n os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir\n )\n if not os.path.exists(artifact_dir):\n mkdir(artifact_dir)\n try:\n shutil.copy2(local_file, os.path.join(artifact_dir, os.path.basename(local_file)))\n except shutil.SameFileError:\n pass\n\n def _is_directory(self, artifact_path):\n # NOTE: The path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n path = os.path.normpath(artifact_path) if artifact_path else \"\"\n list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir\n return os.path.isdir(list_dir)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n verify_artifact_path(artifact_path)\n # NOTE: The artifact_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n if artifact_path:\n artifact_path = os.path.normpath(artifact_path)\n artifact_dir = (\n os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir\n )\n if not os.path.exists(artifact_dir):\n mkdir(artifact_dir)\n shutil.copytree(src=local_dir, dst=artifact_dir, dirs_exist_ok=True)\n\n def download_artifacts(self, artifact_path, dst_path=None):\n \"\"\"\n Artifacts tracked by ``LocalArtifactRepository`` already exist on the local filesystem.\n If ``dst_path`` is ``None``, the absolute filesystem path of the specified artifact is\n returned. If ``dst_path`` is not ``None``, the local artifact is copied to ``dst_path``.\n\n :param artifact_path: Relative source path to the desired artifacts.\n :param dst_path: Absolute path of the local filesystem destination directory to which to\n download the specified artifacts. This directory must already exist. If\n unspecified, the absolute path of the local artifact will be returned.\n\n :return: Absolute path of the local filesystem location containing the desired artifacts.\n \"\"\"\n if dst_path:\n return super().download_artifacts(artifact_path, dst_path)\n # NOTE: The artifact_path is expected to be a relative path in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n artifact_path = validate_path_is_safe(artifact_path)\n local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path))\n if not os.path.exists(local_artifact_path):\n raise OSError(f\"No such file or directory: '{local_artifact_path}'\")\n return os.path.abspath(local_artifact_path)\n\n def list_artifacts(self, path=None):\n # NOTE: The path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n if path:\n path = os.path.normpath(path)\n list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir\n if os.path.isdir(list_dir):\n artifact_files = list_all(list_dir, full_path=True)\n infos = [\n get_file_info(\n f, relative_path_to_artifact_path(os.path.relpath(f, self.artifact_dir))\n )\n for f in artifact_files\n ]\n return sorted(infos, key=lambda f: f.path)\n else:\n return []\n\n def _download_file(self, remote_file_path, local_path):\n # NOTE: The remote_file_path is expected to be a relative path in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n remote_file_path = validate_path_is_safe(remote_file_path)\n remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path))\n shutil.copy2(remote_file_path, local_path)\n\n def delete_artifacts(self, artifact_path=None):\n artifact_path = local_file_uri_to_path(\n os.path.join(self._artifact_dir, artifact_path) if artifact_path else self._artifact_dir\n )\n\n if os.path.exists(artifact_path):\n shutil.rmtree(artifact_path)\n", "path": "mlflow/store/artifact/local_artifact_repo.py"}]}
| 1,642 | 377 |
gh_patches_debug_25018
|
rasdani/github-patches
|
git_diff
|
magenta__magenta-1851
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError: 'tfds_data_dir'(GANSynth)
Hi, I got this error on GANSynth demo colab . How can I resolve it?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `magenta/models/gansynth/gansynth_generate.py`
Content:
```
1 # Copyright 2020 The Magenta Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Lint as: python3
16 r"""Generate samples with a pretrained GANSynth model.
17
18 To use a config of hyperparameters and manual hparams:
19 >>> python magenta/models/gansynth/generate.py \
20 >>> --ckpt_dir=/path/to/ckpt/dir --output_dir=/path/to/output/dir \
21 >>> --midi_file=/path/to/file.mid
22
23 If a MIDI file is specified, notes are synthesized with interpolation between
24 latent vectors in time. If no MIDI file is given, a random batch of notes is
25 synthesized.
26 """
27
28 import os
29
30 import absl.flags
31 from magenta.models.gansynth.lib import flags as lib_flags
32 from magenta.models.gansynth.lib import generate_util as gu
33 from magenta.models.gansynth.lib import model as lib_model
34 from magenta.models.gansynth.lib import util
35 import tensorflow.compat.v1 as tf
36
37
38 absl.flags.DEFINE_string('ckpt_dir',
39 '/tmp/gansynth/acoustic_only',
40 'Path to the base directory of pretrained checkpoints.'
41 'The base directory should contain many '
42 '"stage_000*" subdirectories.')
43 absl.flags.DEFINE_string('output_dir',
44 '/tmp/gansynth/samples',
45 'Path to directory to save wave files.')
46 absl.flags.DEFINE_string('midi_file',
47 '',
48 'Path to a MIDI file (.mid) to synthesize.')
49 absl.flags.DEFINE_integer('batch_size', 8, 'Batch size for generation.')
50 absl.flags.DEFINE_float('secs_per_instrument', 6.0,
51 'In random interpolations, the seconds it takes to '
52 'interpolate from one instrument to another.')
53
54 FLAGS = absl.flags.FLAGS
55 tf.logging.set_verbosity(tf.logging.INFO)
56
57
58 def main(unused_argv):
59 absl.flags.FLAGS.alsologtostderr = True
60
61 # Load the model
62 flags = lib_flags.Flags({'batch_size_schedule': [FLAGS.batch_size]})
63 model = lib_model.Model.load_from_path(FLAGS.ckpt_dir, flags)
64
65 # Make an output directory if it doesn't exist
66 output_dir = util.expand_path(FLAGS.output_dir)
67 if not tf.gfile.Exists(output_dir):
68 tf.gfile.MakeDirs(output_dir)
69
70 if FLAGS.midi_file:
71 # If a MIDI file is provided, synthesize interpolations across the clip
72 unused_ns, notes = gu.load_midi(FLAGS.midi_file)
73
74 # Distribute latent vectors linearly in time
75 z_instruments, t_instruments = gu.get_random_instruments(
76 model,
77 notes['end_times'][-1],
78 secs_per_instrument=FLAGS.secs_per_instrument)
79
80 # Get latent vectors for each note
81 z_notes = gu.get_z_notes(notes['start_times'], z_instruments, t_instruments)
82
83 # Generate audio for each note
84 print('Generating {} samples...'.format(len(z_notes)))
85 audio_notes = model.generate_samples_from_z(z_notes, notes['pitches'])
86
87 # Make a single audio clip
88 audio_clip = gu.combine_notes(audio_notes,
89 notes['start_times'],
90 notes['end_times'],
91 notes['velocities'])
92
93 # Write the wave files
94 fname = os.path.join(output_dir, 'generated_clip.wav')
95 gu.save_wav(audio_clip, fname)
96 else:
97 # Otherwise, just generate a batch of random sounds
98 waves = model.generate_samples(FLAGS.batch_size)
99 # Write the wave files
100 for i in range(len(waves)):
101 fname = os.path.join(output_dir, 'generated_{}.wav'.format(i))
102 gu.save_wav(waves[i], fname)
103
104
105 def console_entry_point():
106 tf.disable_v2_behavior()
107 tf.app.run(main)
108
109
110 if __name__ == '__main__':
111 console_entry_point()
112
```
Path: `magenta/version.py`
Content:
```
1 # Copyright 2020 The Magenta Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 r"""Separate file for storing the current version of Magenta.
16
17 Stored in a separate file so that setup.py can reference the version without
18 pulling in all the dependencies in __init__.py.
19 """
20
21 __version__ = '2.1.2'
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/magenta/models/gansynth/gansynth_generate.py b/magenta/models/gansynth/gansynth_generate.py
--- a/magenta/models/gansynth/gansynth_generate.py
+++ b/magenta/models/gansynth/gansynth_generate.py
@@ -50,6 +50,9 @@
absl.flags.DEFINE_float('secs_per_instrument', 6.0,
'In random interpolations, the seconds it takes to '
'interpolate from one instrument to another.')
+absl.flags.DEFINE_string('tfds_data_dir',
+ 'gs://tfds-data/datasets',
+ 'Data directory for the TFDS dataset used to train.')
FLAGS = absl.flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
@@ -59,7 +62,11 @@
absl.flags.FLAGS.alsologtostderr = True
# Load the model
- flags = lib_flags.Flags({'batch_size_schedule': [FLAGS.batch_size]})
+ flags = lib_flags.Flags(
+ {
+ 'batch_size_schedule': [FLAGS.batch_size],
+ 'tfds_data_dir': FLAGS.tfds_data_dir
+ })
model = lib_model.Model.load_from_path(FLAGS.ckpt_dir, flags)
# Make an output directory if it doesn't exist
diff --git a/magenta/version.py b/magenta/version.py
--- a/magenta/version.py
+++ b/magenta/version.py
@@ -18,4 +18,4 @@
pulling in all the dependencies in __init__.py.
"""
-__version__ = '2.1.2'
+__version__ = '2.1.3'
|
{"golden_diff": "diff --git a/magenta/models/gansynth/gansynth_generate.py b/magenta/models/gansynth/gansynth_generate.py\n--- a/magenta/models/gansynth/gansynth_generate.py\n+++ b/magenta/models/gansynth/gansynth_generate.py\n@@ -50,6 +50,9 @@\n absl.flags.DEFINE_float('secs_per_instrument', 6.0,\n 'In random interpolations, the seconds it takes to '\n 'interpolate from one instrument to another.')\n+absl.flags.DEFINE_string('tfds_data_dir',\n+ 'gs://tfds-data/datasets',\n+ 'Data directory for the TFDS dataset used to train.')\n \n FLAGS = absl.flags.FLAGS\n tf.logging.set_verbosity(tf.logging.INFO)\n@@ -59,7 +62,11 @@\n absl.flags.FLAGS.alsologtostderr = True\n \n # Load the model\n- flags = lib_flags.Flags({'batch_size_schedule': [FLAGS.batch_size]})\n+ flags = lib_flags.Flags(\n+ {\n+ 'batch_size_schedule': [FLAGS.batch_size],\n+ 'tfds_data_dir': FLAGS.tfds_data_dir\n+ })\n model = lib_model.Model.load_from_path(FLAGS.ckpt_dir, flags)\n \n # Make an output directory if it doesn't exist\ndiff --git a/magenta/version.py b/magenta/version.py\n--- a/magenta/version.py\n+++ b/magenta/version.py\n@@ -18,4 +18,4 @@\n pulling in all the dependencies in __init__.py.\n \"\"\"\n \n-__version__ = '2.1.2'\n+__version__ = '2.1.3'\n", "issue": "KeyError: 'tfds_data_dir'(GANSynth)\nHi, I got this error on GANSynth demo colab . How can I resolve it?\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Generate samples with a pretrained GANSynth model.\n\nTo use a config of hyperparameters and manual hparams:\n>>> python magenta/models/gansynth/generate.py \\\n>>> --ckpt_dir=/path/to/ckpt/dir --output_dir=/path/to/output/dir \\\n>>> --midi_file=/path/to/file.mid\n\nIf a MIDI file is specified, notes are synthesized with interpolation between\nlatent vectors in time. If no MIDI file is given, a random batch of notes is\nsynthesized.\n\"\"\"\n\nimport os\n\nimport absl.flags\nfrom magenta.models.gansynth.lib import flags as lib_flags\nfrom magenta.models.gansynth.lib import generate_util as gu\nfrom magenta.models.gansynth.lib import model as lib_model\nfrom magenta.models.gansynth.lib import util\nimport tensorflow.compat.v1 as tf\n\n\nabsl.flags.DEFINE_string('ckpt_dir',\n '/tmp/gansynth/acoustic_only',\n 'Path to the base directory of pretrained checkpoints.'\n 'The base directory should contain many '\n '\"stage_000*\" subdirectories.')\nabsl.flags.DEFINE_string('output_dir',\n '/tmp/gansynth/samples',\n 'Path to directory to save wave files.')\nabsl.flags.DEFINE_string('midi_file',\n '',\n 'Path to a MIDI file (.mid) to synthesize.')\nabsl.flags.DEFINE_integer('batch_size', 8, 'Batch size for generation.')\nabsl.flags.DEFINE_float('secs_per_instrument', 6.0,\n 'In random interpolations, the seconds it takes to '\n 'interpolate from one instrument to another.')\n\nFLAGS = absl.flags.FLAGS\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef main(unused_argv):\n absl.flags.FLAGS.alsologtostderr = True\n\n # Load the model\n flags = lib_flags.Flags({'batch_size_schedule': [FLAGS.batch_size]})\n model = lib_model.Model.load_from_path(FLAGS.ckpt_dir, flags)\n\n # Make an output directory if it doesn't exist\n output_dir = util.expand_path(FLAGS.output_dir)\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n if FLAGS.midi_file:\n # If a MIDI file is provided, synthesize interpolations across the clip\n unused_ns, notes = gu.load_midi(FLAGS.midi_file)\n\n # Distribute latent vectors linearly in time\n z_instruments, t_instruments = gu.get_random_instruments(\n model,\n notes['end_times'][-1],\n secs_per_instrument=FLAGS.secs_per_instrument)\n\n # Get latent vectors for each note\n z_notes = gu.get_z_notes(notes['start_times'], z_instruments, t_instruments)\n\n # Generate audio for each note\n print('Generating {} samples...'.format(len(z_notes)))\n audio_notes = model.generate_samples_from_z(z_notes, notes['pitches'])\n\n # Make a single audio clip\n audio_clip = gu.combine_notes(audio_notes,\n notes['start_times'],\n notes['end_times'],\n notes['velocities'])\n\n # Write the wave files\n fname = os.path.join(output_dir, 'generated_clip.wav')\n gu.save_wav(audio_clip, fname)\n else:\n # Otherwise, just generate a batch of random sounds\n waves = model.generate_samples(FLAGS.batch_size)\n # Write the wave files\n for i in range(len(waves)):\n fname = os.path.join(output_dir, 'generated_{}.wav'.format(i))\n gu.save_wav(waves[i], fname)\n\n\ndef console_entry_point():\n tf.disable_v2_behavior()\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/gansynth/gansynth_generate.py"}, {"content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Separate file for storing the current version of Magenta.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n\n__version__ = '2.1.2'\n", "path": "magenta/version.py"}], "after_files": [{"content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Generate samples with a pretrained GANSynth model.\n\nTo use a config of hyperparameters and manual hparams:\n>>> python magenta/models/gansynth/generate.py \\\n>>> --ckpt_dir=/path/to/ckpt/dir --output_dir=/path/to/output/dir \\\n>>> --midi_file=/path/to/file.mid\n\nIf a MIDI file is specified, notes are synthesized with interpolation between\nlatent vectors in time. If no MIDI file is given, a random batch of notes is\nsynthesized.\n\"\"\"\n\nimport os\n\nimport absl.flags\nfrom magenta.models.gansynth.lib import flags as lib_flags\nfrom magenta.models.gansynth.lib import generate_util as gu\nfrom magenta.models.gansynth.lib import model as lib_model\nfrom magenta.models.gansynth.lib import util\nimport tensorflow.compat.v1 as tf\n\n\nabsl.flags.DEFINE_string('ckpt_dir',\n '/tmp/gansynth/acoustic_only',\n 'Path to the base directory of pretrained checkpoints.'\n 'The base directory should contain many '\n '\"stage_000*\" subdirectories.')\nabsl.flags.DEFINE_string('output_dir',\n '/tmp/gansynth/samples',\n 'Path to directory to save wave files.')\nabsl.flags.DEFINE_string('midi_file',\n '',\n 'Path to a MIDI file (.mid) to synthesize.')\nabsl.flags.DEFINE_integer('batch_size', 8, 'Batch size for generation.')\nabsl.flags.DEFINE_float('secs_per_instrument', 6.0,\n 'In random interpolations, the seconds it takes to '\n 'interpolate from one instrument to another.')\nabsl.flags.DEFINE_string('tfds_data_dir',\n 'gs://tfds-data/datasets',\n 'Data directory for the TFDS dataset used to train.')\n\nFLAGS = absl.flags.FLAGS\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef main(unused_argv):\n absl.flags.FLAGS.alsologtostderr = True\n\n # Load the model\n flags = lib_flags.Flags(\n {\n 'batch_size_schedule': [FLAGS.batch_size],\n 'tfds_data_dir': FLAGS.tfds_data_dir\n })\n model = lib_model.Model.load_from_path(FLAGS.ckpt_dir, flags)\n\n # Make an output directory if it doesn't exist\n output_dir = util.expand_path(FLAGS.output_dir)\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n if FLAGS.midi_file:\n # If a MIDI file is provided, synthesize interpolations across the clip\n unused_ns, notes = gu.load_midi(FLAGS.midi_file)\n\n # Distribute latent vectors linearly in time\n z_instruments, t_instruments = gu.get_random_instruments(\n model,\n notes['end_times'][-1],\n secs_per_instrument=FLAGS.secs_per_instrument)\n\n # Get latent vectors for each note\n z_notes = gu.get_z_notes(notes['start_times'], z_instruments, t_instruments)\n\n # Generate audio for each note\n print('Generating {} samples...'.format(len(z_notes)))\n audio_notes = model.generate_samples_from_z(z_notes, notes['pitches'])\n\n # Make a single audio clip\n audio_clip = gu.combine_notes(audio_notes,\n notes['start_times'],\n notes['end_times'],\n notes['velocities'])\n\n # Write the wave files\n fname = os.path.join(output_dir, 'generated_clip.wav')\n gu.save_wav(audio_clip, fname)\n else:\n # Otherwise, just generate a batch of random sounds\n waves = model.generate_samples(FLAGS.batch_size)\n # Write the wave files\n for i in range(len(waves)):\n fname = os.path.join(output_dir, 'generated_{}.wav'.format(i))\n gu.save_wav(waves[i], fname)\n\n\ndef console_entry_point():\n tf.disable_v2_behavior()\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/gansynth/gansynth_generate.py"}, {"content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Separate file for storing the current version of Magenta.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n\n__version__ = '2.1.3'\n", "path": "magenta/version.py"}]}
| 1,760 | 357 |
gh_patches_debug_38325
|
rasdani/github-patches
|
git_diff
|
encode__starlette-8
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Request should present a scope-like interface
The `Request` class should present a dict-like interface so that it can be used in the same way as `scope`. Should also allow it to be instantiated without a `receive` channel being set initially.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/request.py`
Content:
```
1 from starlette.datastructures import URL, Headers, QueryParams
2 import json
3
4
5 class Request:
6 def __init__(self, scope, receive):
7 self._scope = scope
8 self._receive = receive
9 self._stream_consumed = False
10
11 @property
12 def method(self):
13 return self._scope["method"]
14
15 @property
16 def url(self):
17 if not hasattr(self, "_url"):
18 scheme = self._scope["scheme"]
19 host, port = self._scope["server"]
20 path = self._scope["path"]
21 query_string = self._scope["query_string"]
22
23 if (scheme == "http" and port != 80) or (scheme == "https" and port != 443):
24 url = "%s://%s:%s%s" % (scheme, host, port, path)
25 else:
26 url = "%s://%s%s" % (scheme, host, path)
27
28 if query_string:
29 url += "?" + query_string.decode()
30
31 self._url = URL(url)
32 return self._url
33
34 @property
35 def headers(self):
36 if not hasattr(self, "_headers"):
37 self._headers = Headers(
38 [
39 (key.decode(), value.decode())
40 for key, value in self._scope["headers"]
41 ]
42 )
43 return self._headers
44
45 @property
46 def query_params(self):
47 if not hasattr(self, "_query_params"):
48 query_string = self._scope["query_string"].decode()
49 self._query_params = QueryParams(query_string)
50 return self._query_params
51
52 async def stream(self):
53 if hasattr(self, "_body"):
54 yield self._body
55 return
56
57 if self._stream_consumed:
58 raise RuntimeError("Stream consumed")
59
60 self._stream_consumed = True
61 while True:
62 message = await self._receive()
63 if message["type"] == "http.request":
64 yield message.get("body", b"")
65 if not message.get("more_body", False):
66 break
67
68 async def body(self):
69 if not hasattr(self, "_body"):
70 body = b""
71 async for chunk in self.stream():
72 body += chunk
73 self._body = body
74 return self._body
75
76 async def json(self):
77 if not hasattr(self, "_json"):
78 body = await self.body()
79 self._json = json.loads(body)
80 return self._json
81
```
Path: `starlette/decorators.py`
Content:
```
1 from starlette.request import Request
2 from starlette.response import Response
3 from starlette.types import ASGIInstance, Receive, Send, Scope
4
5
6 def asgi_application(func):
7 def app(scope: Scope) -> ASGIInstance:
8 async def awaitable(receive: Receive, send: Send) -> None:
9 request = Request(scope, receive)
10 response = func(request)
11 await response(receive, send)
12
13 return awaitable
14
15 return app
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlette/decorators.py b/starlette/decorators.py
--- a/starlette/decorators.py
+++ b/starlette/decorators.py
@@ -5,8 +5,10 @@
def asgi_application(func):
def app(scope: Scope) -> ASGIInstance:
+ request = Request(scope)
+
async def awaitable(receive: Receive, send: Send) -> None:
- request = Request(scope, receive)
+ request.set_receive_channel(receive)
response = func(request)
await response(receive, send)
diff --git a/starlette/request.py b/starlette/request.py
--- a/starlette/request.py
+++ b/starlette/request.py
@@ -1,19 +1,33 @@
from starlette.datastructures import URL, Headers, QueryParams
+from collections.abc import Mapping
import json
+import typing
-class Request:
- def __init__(self, scope, receive):
+class Request(Mapping):
+ def __init__(self, scope, receive=None):
self._scope = scope
self._receive = receive
self._stream_consumed = False
+ def __getitem__(self, key):
+ return self._scope[key]
+
+ def __iter__(self):
+ return iter(self._scope)
+
+ def __len__(self):
+ return len(self._scope)
+
+ def set_receive_channel(self, receive):
+ self._receive = receive
+
@property
- def method(self):
+ def method(self) -> str:
return self._scope["method"]
@property
- def url(self):
+ def url(self) -> URL:
if not hasattr(self, "_url"):
scheme = self._scope["scheme"]
host, port = self._scope["server"]
@@ -32,7 +46,7 @@
return self._url
@property
- def headers(self):
+ def headers(self) -> Headers:
if not hasattr(self, "_headers"):
self._headers = Headers(
[
@@ -43,7 +57,7 @@
return self._headers
@property
- def query_params(self):
+ def query_params(self) -> QueryParams:
if not hasattr(self, "_query_params"):
query_string = self._scope["query_string"].decode()
self._query_params = QueryParams(query_string)
@@ -57,6 +71,9 @@
if self._stream_consumed:
raise RuntimeError("Stream consumed")
+ if self._receive is None:
+ raise RuntimeError("Receive channel has not been made available")
+
self._stream_consumed = True
while True:
message = await self._receive()
|
{"golden_diff": "diff --git a/starlette/decorators.py b/starlette/decorators.py\n--- a/starlette/decorators.py\n+++ b/starlette/decorators.py\n@@ -5,8 +5,10 @@\n \n def asgi_application(func):\n def app(scope: Scope) -> ASGIInstance:\n+ request = Request(scope)\n+\n async def awaitable(receive: Receive, send: Send) -> None:\n- request = Request(scope, receive)\n+ request.set_receive_channel(receive)\n response = func(request)\n await response(receive, send)\n \ndiff --git a/starlette/request.py b/starlette/request.py\n--- a/starlette/request.py\n+++ b/starlette/request.py\n@@ -1,19 +1,33 @@\n from starlette.datastructures import URL, Headers, QueryParams\n+from collections.abc import Mapping\n import json\n+import typing\n \n \n-class Request:\n- def __init__(self, scope, receive):\n+class Request(Mapping):\n+ def __init__(self, scope, receive=None):\n self._scope = scope\n self._receive = receive\n self._stream_consumed = False\n \n+ def __getitem__(self, key):\n+ return self._scope[key]\n+\n+ def __iter__(self):\n+ return iter(self._scope)\n+\n+ def __len__(self):\n+ return len(self._scope)\n+\n+ def set_receive_channel(self, receive):\n+ self._receive = receive\n+\n @property\n- def method(self):\n+ def method(self) -> str:\n return self._scope[\"method\"]\n \n @property\n- def url(self):\n+ def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n scheme = self._scope[\"scheme\"]\n host, port = self._scope[\"server\"]\n@@ -32,7 +46,7 @@\n return self._url\n \n @property\n- def headers(self):\n+ def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(\n [\n@@ -43,7 +57,7 @@\n return self._headers\n \n @property\n- def query_params(self):\n+ def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n query_string = self._scope[\"query_string\"].decode()\n self._query_params = QueryParams(query_string)\n@@ -57,6 +71,9 @@\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n \n+ if self._receive is None:\n+ raise RuntimeError(\"Receive channel has not been made available\")\n+\n self._stream_consumed = True\n while True:\n message = await self._receive()\n", "issue": "Request should present a scope-like interface\nThe `Request` class should present a dict-like interface so that it can be used in the same way as `scope`. Should also allow it to be instantiated without a `receive` channel being set initially.\n", "before_files": [{"content": "from starlette.datastructures import URL, Headers, QueryParams\nimport json\n\n\nclass Request:\n def __init__(self, scope, receive):\n self._scope = scope\n self._receive = receive\n self._stream_consumed = False\n\n @property\n def method(self):\n return self._scope[\"method\"]\n\n @property\n def url(self):\n if not hasattr(self, \"_url\"):\n scheme = self._scope[\"scheme\"]\n host, port = self._scope[\"server\"]\n path = self._scope[\"path\"]\n query_string = self._scope[\"query_string\"]\n\n if (scheme == \"http\" and port != 80) or (scheme == \"https\" and port != 443):\n url = \"%s://%s:%s%s\" % (scheme, host, port, path)\n else:\n url = \"%s://%s%s\" % (scheme, host, path)\n\n if query_string:\n url += \"?\" + query_string.decode()\n\n self._url = URL(url)\n return self._url\n\n @property\n def headers(self):\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(\n [\n (key.decode(), value.decode())\n for key, value in self._scope[\"headers\"]\n ]\n )\n return self._headers\n\n @property\n def query_params(self):\n if not hasattr(self, \"_query_params\"):\n query_string = self._scope[\"query_string\"].decode()\n self._query_params = QueryParams(query_string)\n return self._query_params\n\n async def stream(self):\n if hasattr(self, \"_body\"):\n yield self._body\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n yield message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n break\n\n async def body(self):\n if not hasattr(self, \"_body\"):\n body = b\"\"\n async for chunk in self.stream():\n body += chunk\n self._body = body\n return self._body\n\n async def json(self):\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n", "path": "starlette/request.py"}, {"content": "from starlette.request import Request\nfrom starlette.response import Response\nfrom starlette.types import ASGIInstance, Receive, Send, Scope\n\n\ndef asgi_application(func):\n def app(scope: Scope) -> ASGIInstance:\n async def awaitable(receive: Receive, send: Send) -> None:\n request = Request(scope, receive)\n response = func(request)\n await response(receive, send)\n\n return awaitable\n\n return app\n", "path": "starlette/decorators.py"}], "after_files": [{"content": "from starlette.datastructures import URL, Headers, QueryParams\nfrom collections.abc import Mapping\nimport json\nimport typing\n\n\nclass Request(Mapping):\n def __init__(self, scope, receive=None):\n self._scope = scope\n self._receive = receive\n self._stream_consumed = False\n\n def __getitem__(self, key):\n return self._scope[key]\n\n def __iter__(self):\n return iter(self._scope)\n\n def __len__(self):\n return len(self._scope)\n\n def set_receive_channel(self, receive):\n self._receive = receive\n\n @property\n def method(self) -> str:\n return self._scope[\"method\"]\n\n @property\n def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n scheme = self._scope[\"scheme\"]\n host, port = self._scope[\"server\"]\n path = self._scope[\"path\"]\n query_string = self._scope[\"query_string\"]\n\n if (scheme == \"http\" and port != 80) or (scheme == \"https\" and port != 443):\n url = \"%s://%s:%s%s\" % (scheme, host, port, path)\n else:\n url = \"%s://%s%s\" % (scheme, host, path)\n\n if query_string:\n url += \"?\" + query_string.decode()\n\n self._url = URL(url)\n return self._url\n\n @property\n def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(\n [\n (key.decode(), value.decode())\n for key, value in self._scope[\"headers\"]\n ]\n )\n return self._headers\n\n @property\n def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n query_string = self._scope[\"query_string\"].decode()\n self._query_params = QueryParams(query_string)\n return self._query_params\n\n async def stream(self):\n if hasattr(self, \"_body\"):\n yield self._body\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n if self._receive is None:\n raise RuntimeError(\"Receive channel has not been made available\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n yield message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n break\n\n async def body(self):\n if not hasattr(self, \"_body\"):\n body = b\"\"\n async for chunk in self.stream():\n body += chunk\n self._body = body\n return self._body\n\n async def json(self):\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n", "path": "starlette/request.py"}, {"content": "from starlette.request import Request\nfrom starlette.response import Response\nfrom starlette.types import ASGIInstance, Receive, Send, Scope\n\n\ndef asgi_application(func):\n def app(scope: Scope) -> ASGIInstance:\n request = Request(scope)\n\n async def awaitable(receive: Receive, send: Send) -> None:\n request.set_receive_channel(receive)\n response = func(request)\n await response(receive, send)\n\n return awaitable\n\n return app\n", "path": "starlette/decorators.py"}]}
| 1,126 | 599 |
gh_patches_debug_16899
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmdetection-1099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling'
I have successful install the mmdetection by the command "pip install -v -e .". But I have the problem in the test. Would anyone help me ?
(lab) gpuserver@ubuntu:~/ht/labs/mmdetection-master$ python
Python 3.7.3 (default, Mar 27 2019, 22:11:17)
[GCC 7.3.0] :: Anaconda, Inc. on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from mmdet.apis import init_detector
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/__init__.py", line 2, in <module>
from .inference import inference_detector, init_detector, show_result
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/inference.py", line 9, in <module>
from mmdet.core import get_classes
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/__init__.py", line 1, in <module>
from .anchor import * # noqa: F401, F403
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/__init__.py", line 2, in <module>
from .anchor_target import anchor_inside_flags, anchor_target
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/anchor_target.py", line 3, in <module>
from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/__init__.py", line 1, in <module>
from .assign_sampling import assign_and_sample, build_assigner, build_sampler
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py", line 3, in <module>
from . import assigners, samplers
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/__init__.py", line 2, in <module>
from .combined_sampler import CombinedSampler
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/combined_sampler.py", line 1, in <module>
from ..assign_sampling import build_sampler
ImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling' (/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet/core/bbox/__init__.py`
Content:
```
1 from .assign_sampling import assign_and_sample, build_assigner, build_sampler
2 from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner
3 from .bbox_target import bbox_target
4 from .geometry import bbox_overlaps
5 from .samplers import (BaseSampler, CombinedSampler,
6 InstanceBalancedPosSampler, IoUBalancedNegSampler,
7 PseudoSampler, RandomSampler, SamplingResult)
8 from .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip,
9 bbox_mapping, bbox_mapping_back, delta2bbox,
10 distance2bbox, roi2bbox)
11
12 __all__ = [
13 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',
14 'BaseSampler', 'PseudoSampler', 'RandomSampler',
15 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
16 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',
17 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',
18 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',
19 'distance2bbox', 'bbox_target'
20 ]
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmdet/core/bbox/__init__.py b/mmdet/core/bbox/__init__.py
--- a/mmdet/core/bbox/__init__.py
+++ b/mmdet/core/bbox/__init__.py
@@ -1,4 +1,3 @@
-from .assign_sampling import assign_and_sample, build_assigner, build_sampler
from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner
from .bbox_target import bbox_target
from .geometry import bbox_overlaps
@@ -9,6 +8,9 @@
bbox_mapping, bbox_mapping_back, delta2bbox,
distance2bbox, roi2bbox)
+from .assign_sampling import ( # isort:skip, avoid recursive imports
+ assign_and_sample, build_assigner, build_sampler)
+
__all__ = [
'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',
'BaseSampler', 'PseudoSampler', 'RandomSampler',
|
{"golden_diff": "diff --git a/mmdet/core/bbox/__init__.py b/mmdet/core/bbox/__init__.py\n--- a/mmdet/core/bbox/__init__.py\n+++ b/mmdet/core/bbox/__init__.py\n@@ -1,4 +1,3 @@\n-from .assign_sampling import assign_and_sample, build_assigner, build_sampler\n from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner\n from .bbox_target import bbox_target\n from .geometry import bbox_overlaps\n@@ -9,6 +8,9 @@\n bbox_mapping, bbox_mapping_back, delta2bbox,\n distance2bbox, roi2bbox)\n \n+from .assign_sampling import ( # isort:skip, avoid recursive imports\n+ assign_and_sample, build_assigner, build_sampler)\n+\n __all__ = [\n 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',\n 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n", "issue": "ImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling' \nI have successful install the mmdetection by the command \"pip install -v -e .\". But I have the problem in the test. Would anyone help me ?\r\n\r\n(lab) gpuserver@ubuntu:~/ht/labs/mmdetection-master$ python\r\nPython 3.7.3 (default, Mar 27 2019, 22:11:17) \r\n[GCC 7.3.0] :: Anaconda, Inc. on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> from mmdet.apis import init_detector\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/__init__.py\", line 2, in <module>\r\n from .inference import inference_detector, init_detector, show_result\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/inference.py\", line 9, in <module>\r\n from mmdet.core import get_classes\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/__init__.py\", line 1, in <module>\r\n from .anchor import * # noqa: F401, F403\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/__init__.py\", line 2, in <module>\r\n from .anchor_target import anchor_inside_flags, anchor_target\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/anchor_target.py\", line 3, in <module>\r\n from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/__init__.py\", line 1, in <module>\r\n from .assign_sampling import assign_and_sample, build_assigner, build_sampler\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py\", line 3, in <module>\r\n from . import assigners, samplers\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/__init__.py\", line 2, in <module>\r\n from .combined_sampler import CombinedSampler\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/combined_sampler.py\", line 1, in <module>\r\n from ..assign_sampling import build_sampler\r\nImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling' (/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py)\n", "before_files": [{"content": "from .assign_sampling import assign_and_sample, build_assigner, build_sampler\nfrom .assigners import AssignResult, BaseAssigner, MaxIoUAssigner\nfrom .bbox_target import bbox_target\nfrom .geometry import bbox_overlaps\nfrom .samplers import (BaseSampler, CombinedSampler,\n InstanceBalancedPosSampler, IoUBalancedNegSampler,\n PseudoSampler, RandomSampler, SamplingResult)\nfrom .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip,\n bbox_mapping, bbox_mapping_back, delta2bbox,\n distance2bbox, roi2bbox)\n\n__all__ = [\n 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',\n 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',\n 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',\n 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',\n 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',\n 'distance2bbox', 'bbox_target'\n]\n", "path": "mmdet/core/bbox/__init__.py"}], "after_files": [{"content": "from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner\nfrom .bbox_target import bbox_target\nfrom .geometry import bbox_overlaps\nfrom .samplers import (BaseSampler, CombinedSampler,\n InstanceBalancedPosSampler, IoUBalancedNegSampler,\n PseudoSampler, RandomSampler, SamplingResult)\nfrom .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip,\n bbox_mapping, bbox_mapping_back, delta2bbox,\n distance2bbox, roi2bbox)\n\nfrom .assign_sampling import ( # isort:skip, avoid recursive imports\n assign_and_sample, build_assigner, build_sampler)\n\n__all__ = [\n 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',\n 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',\n 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',\n 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',\n 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',\n 'distance2bbox', 'bbox_target'\n]\n", "path": "mmdet/core/bbox/__init__.py"}]}
| 1,217 | 217 |
gh_patches_debug_35992
|
rasdani/github-patches
|
git_diff
|
safe-global__safe-config-service-8
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Safe App Provider information
The service should (optionally) include information about the provider of the application.
The provider information should have at least the following fields:
```json
{
"name" : <string>,
"url": <string>
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/safe_apps/admin.py`
Content:
```
1 from django.contrib import admin
2
3 from .models import SafeApp
4
5 admin.site.register(SafeApp)
6
```
Path: `src/safe_apps/models.py`
Content:
```
1 from django.contrib.postgres.fields import ArrayField
2 from django.db import models
3
4
5 class SafeApp(models.Model):
6 url = models.URLField(primary_key=True)
7 name = models.CharField(max_length=200)
8 icon_url = models.URLField()
9 description = models.CharField(max_length=200)
10 networks = ArrayField(models.IntegerField())
11
```
Path: `src/safe_apps/migrations/0001_initial.py`
Content:
```
1 # Generated by Django 3.2 on 2021-04-28 13:39
2
3 import django.contrib.postgres.fields
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 initial = True
10
11 dependencies = [
12 ]
13
14 operations = [
15 migrations.CreateModel(
16 name='SafeApp',
17 fields=[
18 ('url', models.URLField(primary_key=True, serialize=False)),
19 ('name', models.CharField(max_length=200)),
20 ('icon_url', models.URLField()),
21 ('description', models.CharField(max_length=200)),
22 ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
23 ],
24 ),
25 ]
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/safe_apps/admin.py b/src/safe_apps/admin.py
--- a/src/safe_apps/admin.py
+++ b/src/safe_apps/admin.py
@@ -1,5 +1,6 @@
from django.contrib import admin
-from .models import SafeApp
+from .models import SafeApp, Provider
-admin.site.register(SafeApp)
+models = [SafeApp, Provider]
+admin.site.register(models)
diff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py
--- a/src/safe_apps/migrations/0001_initial.py
+++ b/src/safe_apps/migrations/0001_initial.py
@@ -1,7 +1,8 @@
-# Generated by Django 3.2 on 2021-04-28 13:39
+# Generated by Django 3.2 on 2021-04-29 08:22
import django.contrib.postgres.fields
from django.db import migrations, models
+import django.db.models.deletion
class Migration(migrations.Migration):
@@ -12,6 +13,13 @@
]
operations = [
+ migrations.CreateModel(
+ name='Provider',
+ fields=[
+ ('url', models.URLField(primary_key=True, serialize=False)),
+ ('name', models.CharField(max_length=200)),
+ ],
+ ),
migrations.CreateModel(
name='SafeApp',
fields=[
@@ -20,6 +28,7 @@
('icon_url', models.URLField()),
('description', models.CharField(max_length=200)),
('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
+ ('provider', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='safe_apps.provider')),
],
),
]
diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py
--- a/src/safe_apps/models.py
+++ b/src/safe_apps/models.py
@@ -2,9 +2,21 @@
from django.db import models
+class Provider(models.Model):
+ url = models.URLField(primary_key=True)
+ name = models.CharField(max_length=200)
+
+ def __str__(self):
+ return f'{self.name} | {self.url}'
+
+
class SafeApp(models.Model):
url = models.URLField(primary_key=True)
name = models.CharField(max_length=200)
icon_url = models.URLField()
description = models.CharField(max_length=200)
networks = ArrayField(models.IntegerField())
+ provider = models.ForeignKey(Provider, null=True, on_delete=models.SET_NULL)
+
+ def __str__(self):
+ return f'{self.name} | {self.url} | networks={self.networks}'
|
{"golden_diff": "diff --git a/src/safe_apps/admin.py b/src/safe_apps/admin.py\n--- a/src/safe_apps/admin.py\n+++ b/src/safe_apps/admin.py\n@@ -1,5 +1,6 @@\n from django.contrib import admin\n \n-from .models import SafeApp\n+from .models import SafeApp, Provider\n \n-admin.site.register(SafeApp)\n+models = [SafeApp, Provider]\n+admin.site.register(models)\ndiff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py\n--- a/src/safe_apps/migrations/0001_initial.py\n+++ b/src/safe_apps/migrations/0001_initial.py\n@@ -1,7 +1,8 @@\n-# Generated by Django 3.2 on 2021-04-28 13:39\n+# Generated by Django 3.2 on 2021-04-29 08:22\n \n import django.contrib.postgres.fields\n from django.db import migrations, models\n+import django.db.models.deletion\n \n \n class Migration(migrations.Migration):\n@@ -12,6 +13,13 @@\n ]\n \n operations = [\n+ migrations.CreateModel(\n+ name='Provider',\n+ fields=[\n+ ('url', models.URLField(primary_key=True, serialize=False)),\n+ ('name', models.CharField(max_length=200)),\n+ ],\n+ ),\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n@@ -20,6 +28,7 @@\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n+ ('provider', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='safe_apps.provider')),\n ],\n ),\n ]\ndiff --git a/src/safe_apps/models.py b/src/safe_apps/models.py\n--- a/src/safe_apps/models.py\n+++ b/src/safe_apps/models.py\n@@ -2,9 +2,21 @@\n from django.db import models\n \n \n+class Provider(models.Model):\n+ url = models.URLField(primary_key=True)\n+ name = models.CharField(max_length=200)\n+\n+ def __str__(self):\n+ return f'{self.name} | {self.url}'\n+\n+\n class SafeApp(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n+ provider = models.ForeignKey(Provider, null=True, on_delete=models.SET_NULL)\n+\n+ def __str__(self):\n+ return f'{self.name} | {self.url} | networks={self.networks}'\n", "issue": "Add Safe App Provider information\nThe service should (optionally) include information about the provider of the application.\r\n\r\nThe provider information should have at least the following fields:\r\n\r\n```json\r\n{\r\n \"name\" : <string>,\r\n \"url\": <string>\r\n}\r\n```\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import SafeApp\n\nadmin.site.register(SafeApp)\n", "path": "src/safe_apps/admin.py"}, {"content": "from django.contrib.postgres.fields import ArrayField\nfrom django.db import models\n\n\nclass SafeApp(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n", "path": "src/safe_apps/models.py"}, {"content": "# Generated by Django 3.2 on 2021-04-28 13:39\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n ('url', models.URLField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n ],\n ),\n ]\n", "path": "src/safe_apps/migrations/0001_initial.py"}], "after_files": [{"content": "from django.contrib import admin\n\nfrom .models import SafeApp, Provider\n\nmodels = [SafeApp, Provider]\nadmin.site.register(models)\n", "path": "src/safe_apps/admin.py"}, {"content": "from django.contrib.postgres.fields import ArrayField\nfrom django.db import models\n\n\nclass Provider(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n\n def __str__(self):\n return f'{self.name} | {self.url}'\n\n\nclass SafeApp(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n provider = models.ForeignKey(Provider, null=True, on_delete=models.SET_NULL)\n\n def __str__(self):\n return f'{self.name} | {self.url} | networks={self.networks}'\n", "path": "src/safe_apps/models.py"}, {"content": "# Generated by Django 3.2 on 2021-04-29 08:22\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Provider',\n fields=[\n ('url', models.URLField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n ('url', models.URLField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n ('provider', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='safe_apps.provider')),\n ],\n ),\n ]\n", "path": "src/safe_apps/migrations/0001_initial.py"}]}
| 674 | 634 |
gh_patches_debug_17476
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-840
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not possible to install Ray from git
I want to install Ray for Python 3.
```
$ apt-get install -y cmake pkg-config python3-dev build-essential autoconf curl libtool libboost-all-dev unzip
$ pip3 install git+https://github.com/ray-project/ray.git@37282330c0ea687fd1b983176dce85731fcf189d#subdirectory=python
```
But this tries to install it for python2.7, and not python3, failing with:
```
CMake Error at cmake_modules/FindNumPy.cmake:62 (message):
NumPy import failure:
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: No module named numpy
```
Because numpy is installed only for Python 3.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/setup.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6 import shutil
7 import subprocess
8 import sys
9
10 from setuptools import setup, find_packages, Distribution
11 import setuptools.command.build_ext as _build_ext
12
13 # Ideally, we could include these files by putting them in a
14 # MANIFEST.in or using the package_data argument to setup, but the
15 # MANIFEST.in gets applied at the very beginning when setup.py runs
16 # before these files have been created, so we have to move the files
17 # manually.
18 ray_files = [
19 "ray/core/src/common/thirdparty/redis/src/redis-server",
20 "ray/core/src/common/redis_module/libray_redis_module.so",
21 "ray/core/src/plasma/plasma_store",
22 "ray/core/src/plasma/plasma_manager",
23 "ray/core/src/local_scheduler/local_scheduler",
24 "ray/core/src/local_scheduler/liblocal_scheduler_library.so",
25 "ray/core/src/numbuf/libnumbuf.so",
26 "ray/core/src/global_scheduler/global_scheduler",
27 "ray/WebUI.ipynb"
28 ]
29
30
31 class build_ext(_build_ext.build_ext):
32 def run(self):
33 # Note: We are passing in sys.executable so that we use the same
34 # version of Python to build pyarrow inside the build.sh script. Note
35 # that certain flags will not be passed along such as --user or sudo.
36 # TODO(rkn): Fix this.
37 subprocess.check_call(["../build.sh", sys.executable])
38
39 # We also need to install pyarrow along with Ray, so make sure that the
40 # relevant non-Python pyarrow files get copied.
41 pyarrow_files = [
42 os.path.join("ray/pyarrow_files/pyarrow", filename)
43 for filename in os.listdir("./ray/pyarrow_files/pyarrow")
44 if not os.path.isdir(os.path.join("ray/pyarrow_files/pyarrow",
45 filename))]
46
47 files_to_include = ray_files + pyarrow_files
48
49 for filename in files_to_include:
50 self.move_file(filename)
51 # Copy over the autogenerated flatbuffer Python bindings.
52 generated_python_directory = "ray/core/generated"
53 for filename in os.listdir(generated_python_directory):
54 if filename[-3:] == ".py":
55 self.move_file(os.path.join(generated_python_directory,
56 filename))
57
58 def move_file(self, filename):
59 # TODO(rkn): This feels very brittle. It may not handle all cases. See
60 # https://github.com/apache/arrow/blob/master/python/setup.py for an
61 # example.
62 source = filename
63 destination = os.path.join(self.build_lib, filename)
64 # Create the target directory if it doesn't already exist.
65 parent_directory = os.path.dirname(destination)
66 if not os.path.exists(parent_directory):
67 os.makedirs(parent_directory)
68 print("Copying {} to {}.".format(source, destination))
69 shutil.copy(source, destination)
70
71
72 class BinaryDistribution(Distribution):
73 def has_ext_modules(self):
74 return True
75
76
77 setup(name="ray",
78 version="0.1.2",
79 packages=find_packages(),
80 cmdclass={"build_ext": build_ext},
81 # The BinaryDistribution argument triggers build_ext.
82 distclass=BinaryDistribution,
83 install_requires=["numpy",
84 "funcsigs",
85 "click",
86 "colorama",
87 "psutil",
88 "redis",
89 "cloudpickle >= 0.2.2",
90 "flatbuffers"],
91 entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]},
92 include_package_data=True,
93 zip_safe=False,
94 license="Apache 2.0")
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -81,12 +81,15 @@
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=["numpy",
+ "cython",
"funcsigs",
"click",
"colorama",
"psutil",
"redis",
"cloudpickle >= 0.2.2",
+ # The six module is required by pyarrow.
+ "six >= 1.0.0",
"flatbuffers"],
entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]},
include_package_data=True,
|
{"golden_diff": "diff --git a/python/setup.py b/python/setup.py\n--- a/python/setup.py\n+++ b/python/setup.py\n@@ -81,12 +81,15 @@\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=[\"numpy\",\n+ \"cython\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"psutil\",\n \"redis\",\n \"cloudpickle >= 0.2.2\",\n+ # The six module is required by pyarrow.\n+ \"six >= 1.0.0\",\n \"flatbuffers\"],\n entry_points={\"console_scripts\": [\"ray=ray.scripts.scripts:main\"]},\n include_package_data=True,\n", "issue": "Not possible to install Ray from git\nI want to install Ray for Python 3.\r\n\r\n```\r\n$ apt-get install -y cmake pkg-config python3-dev build-essential autoconf curl libtool libboost-all-dev unzip\r\n$ pip3 install git+https://github.com/ray-project/ray.git@37282330c0ea687fd1b983176dce85731fcf189d#subdirectory=python\r\n```\r\n\r\nBut this tries to install it for python2.7, and not python3, failing with:\r\n\r\n```\r\n CMake Error at cmake_modules/FindNumPy.cmake:62 (message):\r\n NumPy import failure:\r\n \r\n Traceback (most recent call last):\r\n \r\n File \"<string>\", line 1, in <module>\r\n \r\n ImportError: No module named numpy\r\n```\r\n\r\nBecause numpy is installed only for Python 3.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\nray_files = [\n \"ray/core/src/common/thirdparty/redis/src/redis-server\",\n \"ray/core/src/common/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store\",\n \"ray/core/src/plasma/plasma_manager\",\n \"ray/core/src/local_scheduler/local_scheduler\",\n \"ray/core/src/local_scheduler/liblocal_scheduler_library.so\",\n \"ray/core/src/numbuf/libnumbuf.so\",\n \"ray/core/src/global_scheduler/global_scheduler\",\n \"ray/WebUI.ipynb\"\n]\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = [\n os.path.join(\"ray/pyarrow_files/pyarrow\", filename)\n for filename in os.listdir(\"./ray/pyarrow_files/pyarrow\")\n if not os.path.isdir(os.path.join(\"ray/pyarrow_files/pyarrow\",\n filename))]\n\n files_to_include = ray_files + pyarrow_files\n\n for filename in files_to_include:\n self.move_file(filename)\n # Copy over the autogenerated flatbuffer Python bindings.\n generated_python_directory = \"ray/core/generated\"\n for filename in os.listdir(generated_python_directory):\n if filename[-3:] == \".py\":\n self.move_file(os.path.join(generated_python_directory,\n filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nsetup(name=\"ray\",\n version=\"0.1.2\",\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=[\"numpy\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"psutil\",\n \"redis\",\n \"cloudpickle >= 0.2.2\",\n \"flatbuffers\"],\n entry_points={\"console_scripts\": [\"ray=ray.scripts.scripts:main\"]},\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\nray_files = [\n \"ray/core/src/common/thirdparty/redis/src/redis-server\",\n \"ray/core/src/common/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store\",\n \"ray/core/src/plasma/plasma_manager\",\n \"ray/core/src/local_scheduler/local_scheduler\",\n \"ray/core/src/local_scheduler/liblocal_scheduler_library.so\",\n \"ray/core/src/numbuf/libnumbuf.so\",\n \"ray/core/src/global_scheduler/global_scheduler\",\n \"ray/WebUI.ipynb\"\n]\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = [\n os.path.join(\"ray/pyarrow_files/pyarrow\", filename)\n for filename in os.listdir(\"./ray/pyarrow_files/pyarrow\")\n if not os.path.isdir(os.path.join(\"ray/pyarrow_files/pyarrow\",\n filename))]\n\n files_to_include = ray_files + pyarrow_files\n\n for filename in files_to_include:\n self.move_file(filename)\n # Copy over the autogenerated flatbuffer Python bindings.\n generated_python_directory = \"ray/core/generated\"\n for filename in os.listdir(generated_python_directory):\n if filename[-3:] == \".py\":\n self.move_file(os.path.join(generated_python_directory,\n filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nsetup(name=\"ray\",\n version=\"0.1.2\",\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=[\"numpy\",\n \"cython\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"psutil\",\n \"redis\",\n \"cloudpickle >= 0.2.2\",\n # The six module is required by pyarrow.\n \"six >= 1.0.0\",\n \"flatbuffers\"],\n entry_points={\"console_scripts\": [\"ray=ray.scripts.scripts:main\"]},\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}]}
| 1,409 | 156 |
gh_patches_debug_15745
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-874
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pwndbg gets stuck in GDB 10.1
<!--
Before reporting a new issue, make sure that we do not have any duplicates already open.
If there is one it might be good to take part in the discussion there.
Please make sure you have checked that the issue persists on LATEST pwndbg version.
Below is a template for BUG REPORTS.
Don't include it if this is a FEATURE REQUEST.
-->
### Description
pwndbg gets stuck in GDB 10.1
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
### Steps to reproduce

<!--
What do we have to do to reproduce the problem?
If this is connected to particular C/asm code,
please provide the smallest C code that reproduces the issue.
-->
### My setup
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
```
Gdb: 10.1
Python: 3.8.6 (default, Sep 30 2020, 04:00:38) [GCC 10.2.0]
Pwndbg: 1.1.0
Capstone: 4.0.1024
Unicorn: 1.0.2
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/heap/__init__.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import pwndbg.heap.heap
5 import pwndbg.symbol
6
7 current = None
8
9 heap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')
10
11 @pwndbg.events.new_objfile
12 def update():
13 import pwndbg.heap.dlmalloc
14 import pwndbg.heap.ptmalloc
15
16 global current
17
18
19 if pwndbg.symbol.address('ptmalloc_init'):
20 current = pwndbg.heap.ptmalloc.Heap()
21
22 else:
23 # Default to ptmalloc heap for now until
24 # there are more implementations
25 current = pwndbg.heap.ptmalloc.Heap()
26
```
Path: `pwndbg/heap/dlmalloc.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import gdb
5
6 import pwndbg.events
7 import pwndbg.typeinfo
8
9
10 class Heap(pwndbg.heap.heap.BaseHeap):
11 pass
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwndbg/heap/__init__.py b/pwndbg/heap/__init__.py
--- a/pwndbg/heap/__init__.py
+++ b/pwndbg/heap/__init__.py
@@ -8,18 +8,8 @@
heap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')
[email protected]_objfile
[email protected]
def update():
- import pwndbg.heap.dlmalloc
import pwndbg.heap.ptmalloc
-
global current
-
-
- if pwndbg.symbol.address('ptmalloc_init'):
- current = pwndbg.heap.ptmalloc.Heap()
-
- else:
- # Default to ptmalloc heap for now until
- # there are more implementations
- current = pwndbg.heap.ptmalloc.Heap()
+ current = pwndbg.heap.ptmalloc.Heap()
diff --git a/pwndbg/heap/dlmalloc.py b/pwndbg/heap/dlmalloc.py
deleted file mode 100644
--- a/pwndbg/heap/dlmalloc.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import gdb
-
-import pwndbg.events
-import pwndbg.typeinfo
-
-
-class Heap(pwndbg.heap.heap.BaseHeap):
- pass
|
{"golden_diff": "diff --git a/pwndbg/heap/__init__.py b/pwndbg/heap/__init__.py\n--- a/pwndbg/heap/__init__.py\n+++ b/pwndbg/heap/__init__.py\n@@ -8,18 +8,8 @@\n \n heap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')\n \[email protected]_objfile\[email protected]\n def update():\n- import pwndbg.heap.dlmalloc\n import pwndbg.heap.ptmalloc\n-\n global current\n-\n-\n- if pwndbg.symbol.address('ptmalloc_init'):\n- current = pwndbg.heap.ptmalloc.Heap()\n-\n- else:\n- # Default to ptmalloc heap for now until\n- # there are more implementations\n- current = pwndbg.heap.ptmalloc.Heap()\n+ current = pwndbg.heap.ptmalloc.Heap()\ndiff --git a/pwndbg/heap/dlmalloc.py b/pwndbg/heap/dlmalloc.py\ndeleted file mode 100644\n--- a/pwndbg/heap/dlmalloc.py\n+++ /dev/null\n@@ -1,11 +0,0 @@\n-#!/usr/bin/env python\n-# -*- coding: utf-8 -*-\n-\n-import gdb\n-\n-import pwndbg.events\n-import pwndbg.typeinfo\n-\n-\n-class Heap(pwndbg.heap.heap.BaseHeap):\n- pass\n", "issue": "pwndbg gets stuck in GDB 10.1\n<!--\r\nBefore reporting a new issue, make sure that we do not have any duplicates already open.\r\nIf there is one it might be good to take part in the discussion there.\r\n\r\nPlease make sure you have checked that the issue persists on LATEST pwndbg version.\r\n\r\nBelow is a template for BUG REPORTS.\r\nDon't include it if this is a FEATURE REQUEST.\r\n-->\r\n\r\n\r\n### Description\r\npwndbg gets stuck in GDB 10.1\r\n<!--\r\nBriefly describe the problem you are having in a few paragraphs.\r\n-->\r\n\r\n### Steps to reproduce\r\n\r\n\r\n<!--\r\nWhat do we have to do to reproduce the problem?\r\nIf this is connected to particular C/asm code, \r\nplease provide the smallest C code that reproduces the issue.\r\n-->\r\n\r\n### My setup\r\n\r\n<!--\r\nShow us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).\r\n\r\nNOTE: We are currently supporting only Ubuntu installations.\r\nIt is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).\r\nIf you would like to change this situation - help us improving pwndbg and supporting other distros!\r\n\r\nThis can be displayed in pwndbg through `version` command.\r\n\r\nIf it is somehow unavailable, use:\r\n* `show version` - for gdb\r\n* `py import sys; print(sys.version)` - for python\r\n* pwndbg version/git commit id\r\n-->\r\n```\r\nGdb: 10.1\r\nPython: 3.8.6 (default, Sep 30 2020, 04:00:38) [GCC 10.2.0]\r\nPwndbg: 1.1.0\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.2\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pwndbg.heap.heap\nimport pwndbg.symbol\n\ncurrent = None\n\nheap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')\n\[email protected]_objfile\ndef update():\n import pwndbg.heap.dlmalloc\n import pwndbg.heap.ptmalloc\n\n global current\n\n\n if pwndbg.symbol.address('ptmalloc_init'):\n current = pwndbg.heap.ptmalloc.Heap()\n\n else:\n # Default to ptmalloc heap for now until\n # there are more implementations\n current = pwndbg.heap.ptmalloc.Heap()\n", "path": "pwndbg/heap/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.typeinfo\n\n\nclass Heap(pwndbg.heap.heap.BaseHeap):\n pass\n", "path": "pwndbg/heap/dlmalloc.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pwndbg.heap.heap\nimport pwndbg.symbol\n\ncurrent = None\n\nheap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')\n\[email protected]\ndef update():\n import pwndbg.heap.ptmalloc\n global current\n current = pwndbg.heap.ptmalloc.Heap()\n", "path": "pwndbg/heap/__init__.py"}, {"content": null, "path": "pwndbg/heap/dlmalloc.py"}]}
| 1,027 | 326 |
gh_patches_debug_20387
|
rasdani/github-patches
|
git_diff
|
unionai-oss__pandera-416
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
improve contribution instructions and expose in the docs
#### Location of the documentation
NA
#### Documentation problem
Currently, the contribution documentation is in [github](https://github.com/pandera-dev/pandera/blob/master/.github/CONTRIBUTING.md) but it would be nice to also expose it in the online documentation.
Additionally, we also want to document the process of contributing:
Enhancements (New features)
- create a `feature/<my-feature>` or `feature/<issue-number>` branch
- make a pull request to `dev`
Bugs
- create a `bugfix/<my-fix>` or `bugfix/<issue-number>` branch
- make a pull request to `master`
Docs
- create a `docs/<my-docs>` or `docs/<issue-number>` branch
- make a pull request to `master`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 import doctest
10 import logging as pylogging
11
12 # If extensions (or modules to document with autodoc) are in another directory,
13 # add these directories to sys.path here. If the directory is relative to the
14 # documentation root, use os.path.abspath to make it absolute, like shown here.
15 #
16 import os
17 import sys
18
19 from sphinx.util import logging
20
21 sys.path.insert(0, os.path.abspath("../../pandera"))
22
23
24 # -- Project information -----------------------------------------------------
25
26 project = "pandera"
27 copyright = "2019, Niels Bantilan, Nigel Markey, Jean-Francois Zinque"
28 author = "Niels Bantilan, Nigel Markey, Jean-Francois Zinque"
29
30
31 # -- General configuration ---------------------------------------------------
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 "sphinx.ext.autodoc",
38 "sphinx.ext.autosummary",
39 "sphinx.ext.intersphinx",
40 "sphinx.ext.doctest",
41 "sphinx_autodoc_typehints",
42 "sphinx.ext.viewcode",
43 ]
44
45 doctest_global_setup = """
46 import sys
47 import pandas as pd
48 import numpy as np
49 from packaging import version
50 pd.options.display.max_columns = None # For Travis on macOS
51 pd.options.display.max_rows = None # For Travis on macOS
52
53 try:
54 import hypothesis
55 except ImportError:
56 SKIP_STRATEGY = True
57 else:
58 SKIP_STRATEGY = False
59
60 SKIP = sys.version_info < (3, 6)
61 PY36 = sys.version_info < (3, 7)
62 SKIP_PANDAS_LT_V1 = version.parse(pd.__version__).release < (1, 0) or PY36
63 """
64
65 doctest_default_flags = (
66 0
67 | doctest.DONT_ACCEPT_TRUE_FOR_1
68 | doctest.ELLIPSIS
69 | doctest.IGNORE_EXCEPTION_DETAIL
70 | doctest.NORMALIZE_WHITESPACE
71 )
72
73 # Add any paths that contain templates here, relative to this directory.
74 templates_path = ["_templates"]
75
76 # The master toctree document.
77 master_doc = "index"
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 # This pattern also affects html_static_path and html_extra_path.
82 exclude_patterns = []
83
84 autoclass_content = "both"
85 pygments_style = None
86
87 autodoc_default_options = {
88 # 'special-members': '__call__',
89 "undoc-members": False,
90 # 'exclude-members': '__weakref__'
91 }
92
93 # -- Options for HTML output -------------------------------------------------
94
95 # The theme to use for HTML and HTML Help pages. See the documentation for
96 # a list of builtin themes.
97 #
98 html_theme = "sphinx_rtd_theme"
99
100 # Theme options are theme-specific and customize the look and feel of a theme
101 # further. For a list of options available for each theme, see the
102 # documentation.
103
104 html_logo = "_static/pandera-banner-white.png"
105 html_theme_options = {
106 "collapse_navigation": False,
107 "logo_only": True,
108 "analytics_id": "UA-71018060-2",
109 }
110
111 # Add any paths that contain custom static files (such as style sheets) here,
112 # relative to this directory. They are copied after the builtin static files,
113 # so a file named "default.css" will overwrite the builtin "default.css".
114 html_static_path = ["_static"]
115
116 rst_prolog = """
117 .. role:: red
118 .. role:: green
119 """
120
121 autosummary_generate = ["API_reference.rst"]
122 autosummary_filename_map = {
123 "pandera.Check": "pandera.Check",
124 "pandera.check": "pandera.check_decorator",
125 }
126
127 intersphinx_mapping = {
128 "python": ("https://docs.python.org/3/", None),
129 "numpy": ("https://docs.scipy.org/doc/numpy/", None),
130 "pandas": ("http://pandas.pydata.org/pandas-docs/stable/", None),
131 }
132
133
134 # this is a workaround to filter out forward reference issue in
135 # sphinx_autodoc_typehints
136 class FilterPandasTypeAnnotationWarning(pylogging.Filter):
137 def filter(self, record: pylogging.LogRecord) -> bool:
138 # You probably should make this check more specific by checking
139 # that dataclass name is in the message, so that you don't filter out
140 # other meaningful warnings
141 return not record.getMessage().startswith(
142 "Cannot resolve forward reference in type annotations of "
143 '"pandera.typing.DataFrame"'
144 )
145
146
147 logging.getLogger("sphinx_autodoc_typehints").logger.addFilter(
148 FilterPandasTypeAnnotationWarning()
149 )
150
151
152 def setup(app):
153 app.add_css_file("default.css")
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -14,6 +14,7 @@
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
+import shutil
import sys
from sphinx.util import logging
@@ -40,6 +41,7 @@
"sphinx.ext.doctest",
"sphinx_autodoc_typehints",
"sphinx.ext.viewcode",
+ "recommonmark",
]
doctest_global_setup = """
@@ -70,6 +72,19 @@
| doctest.NORMALIZE_WHITESPACE
)
+source_suffix = {
+ ".rst": "restructuredtext",
+ ".md": "markdown",
+}
+
+# copy CONTRIBUTING.md docs into source directory
+shutil.copyfile(
+ os.path.join(
+ os.path.dirname(__file__), "..", "..", ".github", "CONTRIBUTING.md"
+ ),
+ "CONTRIBUTING.md",
+)
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -14,6 +14,7 @@\n # documentation root, use os.path.abspath to make it absolute, like shown here.\n #\n import os\n+import shutil\n import sys\n \n from sphinx.util import logging\n@@ -40,6 +41,7 @@\n \"sphinx.ext.doctest\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.viewcode\",\n+ \"recommonmark\",\n ]\n \n doctest_global_setup = \"\"\"\n@@ -70,6 +72,19 @@\n | doctest.NORMALIZE_WHITESPACE\n )\n \n+source_suffix = {\n+ \".rst\": \"restructuredtext\",\n+ \".md\": \"markdown\",\n+}\n+\n+# copy CONTRIBUTING.md docs into source directory\n+shutil.copyfile(\n+ os.path.join(\n+ os.path.dirname(__file__), \"..\", \"..\", \".github\", \"CONTRIBUTING.md\"\n+ ),\n+ \"CONTRIBUTING.md\",\n+)\n+\n # Add any paths that contain templates here, relative to this directory.\n templates_path = [\"_templates\"]\n", "issue": "improve contribution instructions and expose in the docs\n#### Location of the documentation\r\n\r\nNA\r\n\r\n#### Documentation problem\r\n\r\nCurrently, the contribution documentation is in [github](https://github.com/pandera-dev/pandera/blob/master/.github/CONTRIBUTING.md) but it would be nice to also expose it in the online documentation.\r\n\r\nAdditionally, we also want to document the process of contributing:\r\n\r\nEnhancements (New features)\r\n- create a `feature/<my-feature>` or `feature/<issue-number>` branch\r\n- make a pull request to `dev`\r\n\r\nBugs\r\n- create a `bugfix/<my-fix>` or `bugfix/<issue-number>` branch\r\n- make a pull request to `master`\r\n\r\nDocs\r\n- create a `docs/<my-docs>` or `docs/<issue-number>` branch\r\n- make a pull request to `master`\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\nimport doctest\nimport logging as pylogging\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nfrom sphinx.util import logging\n\nsys.path.insert(0, os.path.abspath(\"../../pandera\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pandera\"\ncopyright = \"2019, Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\nauthor = \"Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.doctest\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.viewcode\",\n]\n\ndoctest_global_setup = \"\"\"\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom packaging import version\npd.options.display.max_columns = None # For Travis on macOS\npd.options.display.max_rows = None # For Travis on macOS\n\ntry:\n import hypothesis\nexcept ImportError:\n SKIP_STRATEGY = True\nelse:\n SKIP_STRATEGY = False\n\nSKIP = sys.version_info < (3, 6)\nPY36 = sys.version_info < (3, 7)\nSKIP_PANDAS_LT_V1 = version.parse(pd.__version__).release < (1, 0) or PY36\n\"\"\"\n\ndoctest_default_flags = (\n 0\n | doctest.DONT_ACCEPT_TRUE_FOR_1\n | doctest.ELLIPSIS\n | doctest.IGNORE_EXCEPTION_DETAIL\n | doctest.NORMALIZE_WHITESPACE\n)\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\nautoclass_content = \"both\"\npygments_style = None\n\nautodoc_default_options = {\n # 'special-members': '__call__',\n \"undoc-members\": False,\n # 'exclude-members': '__weakref__'\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_logo = \"_static/pandera-banner-white.png\"\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"logo_only\": True,\n \"analytics_id\": \"UA-71018060-2\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nrst_prolog = \"\"\"\n.. role:: red\n.. role:: green\n\"\"\"\n\nautosummary_generate = [\"API_reference.rst\"]\nautosummary_filename_map = {\n \"pandera.Check\": \"pandera.Check\",\n \"pandera.check\": \"pandera.check_decorator\",\n}\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"pandas\": (\"http://pandas.pydata.org/pandas-docs/stable/\", None),\n}\n\n\n# this is a workaround to filter out forward reference issue in\n# sphinx_autodoc_typehints\nclass FilterPandasTypeAnnotationWarning(pylogging.Filter):\n def filter(self, record: pylogging.LogRecord) -> bool:\n # You probably should make this check more specific by checking\n # that dataclass name is in the message, so that you don't filter out\n # other meaningful warnings\n return not record.getMessage().startswith(\n \"Cannot resolve forward reference in type annotations of \"\n '\"pandera.typing.DataFrame\"'\n )\n\n\nlogging.getLogger(\"sphinx_autodoc_typehints\").logger.addFilter(\n FilterPandasTypeAnnotationWarning()\n)\n\n\ndef setup(app):\n app.add_css_file(\"default.css\")\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\nimport doctest\nimport logging as pylogging\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport shutil\nimport sys\n\nfrom sphinx.util import logging\n\nsys.path.insert(0, os.path.abspath(\"../../pandera\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pandera\"\ncopyright = \"2019, Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\nauthor = \"Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.doctest\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.viewcode\",\n \"recommonmark\",\n]\n\ndoctest_global_setup = \"\"\"\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom packaging import version\npd.options.display.max_columns = None # For Travis on macOS\npd.options.display.max_rows = None # For Travis on macOS\n\ntry:\n import hypothesis\nexcept ImportError:\n SKIP_STRATEGY = True\nelse:\n SKIP_STRATEGY = False\n\nSKIP = sys.version_info < (3, 6)\nPY36 = sys.version_info < (3, 7)\nSKIP_PANDAS_LT_V1 = version.parse(pd.__version__).release < (1, 0) or PY36\n\"\"\"\n\ndoctest_default_flags = (\n 0\n | doctest.DONT_ACCEPT_TRUE_FOR_1\n | doctest.ELLIPSIS\n | doctest.IGNORE_EXCEPTION_DETAIL\n | doctest.NORMALIZE_WHITESPACE\n)\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\n# copy CONTRIBUTING.md docs into source directory\nshutil.copyfile(\n os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \".github\", \"CONTRIBUTING.md\"\n ),\n \"CONTRIBUTING.md\",\n)\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\nautoclass_content = \"both\"\npygments_style = None\n\nautodoc_default_options = {\n # 'special-members': '__call__',\n \"undoc-members\": False,\n # 'exclude-members': '__weakref__'\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_logo = \"_static/pandera-banner-white.png\"\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"logo_only\": True,\n \"analytics_id\": \"UA-71018060-2\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nrst_prolog = \"\"\"\n.. role:: red\n.. role:: green\n\"\"\"\n\nautosummary_generate = [\"API_reference.rst\"]\nautosummary_filename_map = {\n \"pandera.Check\": \"pandera.Check\",\n \"pandera.check\": \"pandera.check_decorator\",\n}\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"pandas\": (\"http://pandas.pydata.org/pandas-docs/stable/\", None),\n}\n\n\n# this is a workaround to filter out forward reference issue in\n# sphinx_autodoc_typehints\nclass FilterPandasTypeAnnotationWarning(pylogging.Filter):\n def filter(self, record: pylogging.LogRecord) -> bool:\n # You probably should make this check more specific by checking\n # that dataclass name is in the message, so that you don't filter out\n # other meaningful warnings\n return not record.getMessage().startswith(\n \"Cannot resolve forward reference in type annotations of \"\n '\"pandera.typing.DataFrame\"'\n )\n\n\nlogging.getLogger(\"sphinx_autodoc_typehints\").logger.addFilter(\n FilterPandasTypeAnnotationWarning()\n)\n\n\ndef setup(app):\n app.add_css_file(\"default.css\")\n", "path": "docs/source/conf.py"}]}
| 1,895 | 259 |
gh_patches_debug_14934
|
rasdani/github-patches
|
git_diff
|
hylang__hy-932
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multiple implementations overwrite each others' entrypoint hooks
It's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.
Sorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.
Multiple implementations overwrite each others' entrypoint hooks
It's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.
Sorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice shall be included in
12 # all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 # DEALINGS IN THE SOFTWARE.
21
22 import os
23 import re
24 import sys
25
26 from setuptools import find_packages, setup
27
28 PKG = "hy"
29 VERSIONFILE = os.path.join(PKG, "version.py")
30 verstr = "unknown"
31 try:
32 verstrline = open(VERSIONFILE, "rt").read()
33 except EnvironmentError:
34 pass # Okay, there is no version file.
35 else:
36 VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
37 mo = re.search(VSRE, verstrline, re.M)
38 if mo:
39 __version__ = mo.group(1)
40 else:
41 msg = "if %s.py exists, it is required to be well-formed" % VERSIONFILE
42 raise RuntimeError(msg)
43
44 long_description = """Hy is a Python <--> Lisp layer. It helps
45 make things work nicer, and lets Python and the Hy lisp variant play
46 nice together. """
47
48 install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
49 if sys.version_info[:2] < (2, 7):
50 install_requires.append('argparse>=1.2.1')
51 install_requires.append('importlib>=1.0.2')
52 if os.name == 'nt':
53 install_requires.append('pyreadline==2.0')
54
55 setup(
56 name=PKG,
57 version=__version__,
58 install_requires=install_requires,
59 entry_points={
60 'console_scripts': [
61 'hy = hy.cmdline:hy_main',
62 'hyc = hy.cmdline:hyc_main',
63 'hy2py = hy.cmdline:hy2py_main',
64 ]
65 },
66 packages=find_packages(exclude=['tests*']),
67 package_data={
68 'hy.contrib': ['*.hy'],
69 'hy.core': ['*.hy'],
70 },
71 author="Paul Tagliamonte",
72 author_email="[email protected]",
73 long_description=long_description,
74 description='Lisp and Python love each other.',
75 license="Expat",
76 url="http://hylang.org/",
77 platforms=['any'],
78 classifiers=[
79 "Development Status :: 4 - Beta",
80 "Intended Audience :: Developers",
81 "License :: DFSG approved",
82 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
83 "Operating System :: OS Independent",
84 "Programming Language :: Lisp",
85 "Programming Language :: Python",
86 "Programming Language :: Python :: 2",
87 "Programming Language :: Python :: 2.6",
88 "Programming Language :: Python :: 2.7",
89 "Programming Language :: Python :: 3",
90 "Programming Language :: Python :: 3.3",
91 "Programming Language :: Python :: 3.4",
92 "Topic :: Software Development :: Code Generators",
93 "Topic :: Software Development :: Compilers",
94 "Topic :: Software Development :: Libraries",
95 ]
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,6 +52,8 @@
if os.name == 'nt':
install_requires.append('pyreadline==2.0')
+ver = sys.version_info[0]
+
setup(
name=PKG,
version=__version__,
@@ -59,8 +61,11 @@
entry_points={
'console_scripts': [
'hy = hy.cmdline:hy_main',
+ 'hy%d = hy.cmdline:hy_main' % ver,
'hyc = hy.cmdline:hyc_main',
+ 'hyc%d = hy.cmdline:hyc_main' % ver,
'hy2py = hy.cmdline:hy2py_main',
+ 'hy2py%d = hy.cmdline:hy2py_main' % ver,
]
},
packages=find_packages(exclude=['tests*']),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,6 +52,8 @@\n if os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n \n+ver = sys.version_info[0]\n+\n setup(\n name=PKG,\n version=__version__,\n@@ -59,8 +61,11 @@\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n+ 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n+ 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n+ 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n", "issue": "Multiple implementations overwrite each others' entrypoint hooks\nIt's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.\n\nSorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.\n\nMultiple implementations overwrite each others' entrypoint hooks\nIt's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.\n\nSorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\nverstr = \"unknown\"\ntry:\n verstrline = open(VERSIONFILE, \"rt\").read()\nexcept EnvironmentError:\n pass # Okay, there is no version file.\nelse:\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n __version__ = mo.group(1)\n else:\n msg = \"if %s.py exists, it is required to be well-formed\" % VERSIONFILE\n raise RuntimeError(msg)\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\n install_requires.append('importlib>=1.0.2')\nif os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main',\n 'hy2py = hy.cmdline:hy2py_main',\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\nverstr = \"unknown\"\ntry:\n verstrline = open(VERSIONFILE, \"rt\").read()\nexcept EnvironmentError:\n pass # Okay, there is no version file.\nelse:\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n __version__ = mo.group(1)\n else:\n msg = \"if %s.py exists, it is required to be well-formed\" % VERSIONFILE\n raise RuntimeError(msg)\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\n install_requires.append('importlib>=1.0.2')\nif os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n\nver = sys.version_info[0]\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}]}
| 1,593 | 203 |
gh_patches_debug_28507
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-1815
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Worker hangs when connects to PS.
The worker will wait the grpc channel of PS is ready using `channel_ready_future`. We should set timeout and retry.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/worker/main.py`
Content:
```
1 import grpc
2
3 from elasticdl.python.common import log_utils
4 from elasticdl.python.common.args import parse_worker_args
5 from elasticdl.python.common.grpc_utils import build_channel
6 from elasticdl.python.worker.worker import Worker
7
8
9 def main():
10 args = parse_worker_args()
11 logger = log_utils.get_logger(__name__)
12 logger.info("Starting worker %d", args.worker_id)
13 if args.master_addr is None:
14 raise ValueError("master_addr is missing for worker")
15
16 master_channel = build_channel(args.master_addr)
17
18 ps_channels = []
19 if args.ps_addrs:
20 ps_addrs = args.ps_addrs.split(",")
21
22 for addr in ps_addrs:
23 # addr is in the form as "ps-pod-name.namespace.svc:port"
24 channel = build_channel(addr)
25
26 # Wait the channel is ready by a Future object.
27 grpc.channel_ready_future(channel).result()
28 logger.info(
29 "grpc channel %s to connect pod %s is ready"
30 % (addr, addr.split(".")[0])
31 )
32 ps_channels.append(channel)
33
34 worker = Worker(
35 args,
36 channel=master_channel,
37 ps_channels=ps_channels,
38 set_parallelism=True,
39 )
40 worker.run()
41
42
43 if __name__ == "__main__":
44 main()
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py
--- a/elasticdl/python/worker/main.py
+++ b/elasticdl/python/worker/main.py
@@ -5,6 +5,9 @@
from elasticdl.python.common.grpc_utils import build_channel
from elasticdl.python.worker.worker import Worker
+CONNECT_PS_MAX_RETRIES = 3
+CONNECT_PS_TIMEOUT = 60
+
def main():
args = parse_worker_args()
@@ -23,13 +26,29 @@
# addr is in the form as "ps-pod-name.namespace.svc:port"
channel = build_channel(addr)
- # Wait the channel is ready by a Future object.
- grpc.channel_ready_future(channel).result()
- logger.info(
- "grpc channel %s to connect pod %s is ready"
- % (addr, addr.split(".")[0])
- )
- ps_channels.append(channel)
+ succeeded = False
+ for i in range(CONNECT_PS_MAX_RETRIES):
+ try:
+ grpc.channel_ready_future(channel).result(
+ timeout=CONNECT_PS_TIMEOUT
+ )
+ logger.info(
+ "grpc channel %s to connect pod %s is ready"
+ % (addr, addr.split(".")[0])
+ )
+ ps_channels.append(channel)
+ succeeded = True
+ break
+ except grpc.FutureTimeoutError:
+ logger.warning(
+ "Failed to connect pod %s with %d retry"
+ % (addr.split(".")[0], i)
+ )
+ if not succeeded:
+ raise TimeoutError(
+ "Time out to connect pod %s with 3 retries"
+ % addr.split(".")[0]
+ )
worker = Worker(
args,
|
{"golden_diff": "diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py\n--- a/elasticdl/python/worker/main.py\n+++ b/elasticdl/python/worker/main.py\n@@ -5,6 +5,9 @@\n from elasticdl.python.common.grpc_utils import build_channel\n from elasticdl.python.worker.worker import Worker\n \n+CONNECT_PS_MAX_RETRIES = 3\n+CONNECT_PS_TIMEOUT = 60\n+\n \n def main():\n args = parse_worker_args()\n@@ -23,13 +26,29 @@\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n channel = build_channel(addr)\n \n- # Wait the channel is ready by a Future object.\n- grpc.channel_ready_future(channel).result()\n- logger.info(\n- \"grpc channel %s to connect pod %s is ready\"\n- % (addr, addr.split(\".\")[0])\n- )\n- ps_channels.append(channel)\n+ succeeded = False\n+ for i in range(CONNECT_PS_MAX_RETRIES):\n+ try:\n+ grpc.channel_ready_future(channel).result(\n+ timeout=CONNECT_PS_TIMEOUT\n+ )\n+ logger.info(\n+ \"grpc channel %s to connect pod %s is ready\"\n+ % (addr, addr.split(\".\")[0])\n+ )\n+ ps_channels.append(channel)\n+ succeeded = True\n+ break\n+ except grpc.FutureTimeoutError:\n+ logger.warning(\n+ \"Failed to connect pod %s with %d retry\"\n+ % (addr.split(\".\")[0], i)\n+ )\n+ if not succeeded:\n+ raise TimeoutError(\n+ \"Time out to connect pod %s with 3 retries\"\n+ % addr.split(\".\")[0]\n+ )\n \n worker = Worker(\n args,\n", "issue": "Worker hangs when connects to PS.\nThe worker will wait the grpc channel of PS is ready using `channel_ready_future`. We should set timeout and retry.\r\n\r\n\n", "before_files": [{"content": "import grpc\n\nfrom elasticdl.python.common import log_utils\nfrom elasticdl.python.common.args import parse_worker_args\nfrom elasticdl.python.common.grpc_utils import build_channel\nfrom elasticdl.python.worker.worker import Worker\n\n\ndef main():\n args = parse_worker_args()\n logger = log_utils.get_logger(__name__)\n logger.info(\"Starting worker %d\", args.worker_id)\n if args.master_addr is None:\n raise ValueError(\"master_addr is missing for worker\")\n\n master_channel = build_channel(args.master_addr)\n\n ps_channels = []\n if args.ps_addrs:\n ps_addrs = args.ps_addrs.split(\",\")\n\n for addr in ps_addrs:\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n channel = build_channel(addr)\n\n # Wait the channel is ready by a Future object.\n grpc.channel_ready_future(channel).result()\n logger.info(\n \"grpc channel %s to connect pod %s is ready\"\n % (addr, addr.split(\".\")[0])\n )\n ps_channels.append(channel)\n\n worker = Worker(\n args,\n channel=master_channel,\n ps_channels=ps_channels,\n set_parallelism=True,\n )\n worker.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/worker/main.py"}], "after_files": [{"content": "import grpc\n\nfrom elasticdl.python.common import log_utils\nfrom elasticdl.python.common.args import parse_worker_args\nfrom elasticdl.python.common.grpc_utils import build_channel\nfrom elasticdl.python.worker.worker import Worker\n\nCONNECT_PS_MAX_RETRIES = 3\nCONNECT_PS_TIMEOUT = 60\n\n\ndef main():\n args = parse_worker_args()\n logger = log_utils.get_logger(__name__)\n logger.info(\"Starting worker %d\", args.worker_id)\n if args.master_addr is None:\n raise ValueError(\"master_addr is missing for worker\")\n\n master_channel = build_channel(args.master_addr)\n\n ps_channels = []\n if args.ps_addrs:\n ps_addrs = args.ps_addrs.split(\",\")\n\n for addr in ps_addrs:\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n channel = build_channel(addr)\n\n succeeded = False\n for i in range(CONNECT_PS_MAX_RETRIES):\n try:\n grpc.channel_ready_future(channel).result(\n timeout=CONNECT_PS_TIMEOUT\n )\n logger.info(\n \"grpc channel %s to connect pod %s is ready\"\n % (addr, addr.split(\".\")[0])\n )\n ps_channels.append(channel)\n succeeded = True\n break\n except grpc.FutureTimeoutError:\n logger.warning(\n \"Failed to connect pod %s with %d retry\"\n % (addr.split(\".\")[0], i)\n )\n if not succeeded:\n raise TimeoutError(\n \"Time out to connect pod %s with 3 retries\"\n % addr.split(\".\")[0]\n )\n\n worker = Worker(\n args,\n channel=master_channel,\n ps_channels=ps_channels,\n set_parallelism=True,\n )\n worker.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/worker/main.py"}]}
| 712 | 397 |
gh_patches_debug_31741
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-738
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support arbitrary sample_shape in Rejector
This will require the `proposal` to accept an optional `sample_shape` arg and the `log_prob_accept()` method to support broadcasting.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/distributions/rejector.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import torch
4 from pyro.distributions.distribution import Distribution
5 from pyro.distributions.score_parts import ScoreParts
6 from pyro.distributions.util import copy_docs_from
7
8
9 @copy_docs_from(Distribution)
10 class Rejector(Distribution):
11 """
12 Rejection sampled distribution given an acceptance rate function.
13
14 :param Distribution propose: A proposal distribution that samples batched
15 propsals via `propose()`.
16 :param callable log_prob_accept: A callable that inputs a batch of
17 proposals and returns a batch of log acceptance probabilities.
18 :param log_scale: Total log probability of acceptance.
19 """
20 stateful = True
21 reparameterized = True
22
23 def __init__(self, propose, log_prob_accept, log_scale):
24 self.propose = propose
25 self.log_prob_accept = log_prob_accept
26 self._log_scale = log_scale
27
28 # These LRU(1) caches allow work to be shared across different method calls.
29 self._log_prob_accept_cache = None, None
30 self._propose_batch_log_pdf_cache = None, None
31
32 def _log_prob_accept(self, x):
33 if x is not self._log_prob_accept_cache[0]:
34 self._log_prob_accept_cache = x, self.log_prob_accept(x) - self._log_scale
35 return self._log_prob_accept_cache[1]
36
37 def _propose_batch_log_pdf(self, x):
38 if x is not self._propose_batch_log_pdf_cache[0]:
39 self._propose_batch_log_pdf_cache = x, self.propose.log_prob(x)
40 return self._propose_batch_log_pdf_cache[1]
41
42 def sample(self, sample_shape=torch.Size()):
43 if sample_shape:
44 raise ValueError("Arbitrary `sample_shape` not supported by Rejector class.")
45 # Implements parallel batched accept-reject sampling.
46 x = self.propose()
47 log_prob_accept = self.log_prob_accept(x)
48 probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
49 done = torch.bernoulli(probs).byte()
50 while not done.all():
51 proposed_x = self.propose()
52 log_prob_accept = self.log_prob_accept(proposed_x)
53 prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
54 accept = torch.bernoulli(prob_accept).byte() & ~done
55 if accept.any():
56 x[accept] = proposed_x[accept]
57 done |= accept
58 return x
59
60 def log_prob(self, x):
61 return self._propose_batch_log_pdf(x) + self._log_prob_accept(x)
62
63 def score_parts(self, x):
64 score_function = self._log_prob_accept(x)
65 log_pdf = self.log_prob(x)
66 return ScoreParts(log_pdf, score_function, log_pdf)
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyro/distributions/rejector.py b/pyro/distributions/rejector.py
--- a/pyro/distributions/rejector.py
+++ b/pyro/distributions/rejector.py
@@ -12,7 +12,8 @@
Rejection sampled distribution given an acceptance rate function.
:param Distribution propose: A proposal distribution that samples batched
- propsals via `propose()`.
+ proposals via ``propose()``. :meth:`sample` supports a ``sample_shape``
+ arg only if ``propose()`` supports a ``sample_shape`` arg.
:param callable log_prob_accept: A callable that inputs a batch of
proposals and returns a batch of log acceptance probabilities.
:param log_scale: Total log probability of acceptance.
@@ -40,15 +41,13 @@
return self._propose_batch_log_pdf_cache[1]
def sample(self, sample_shape=torch.Size()):
- if sample_shape:
- raise ValueError("Arbitrary `sample_shape` not supported by Rejector class.")
# Implements parallel batched accept-reject sampling.
- x = self.propose()
+ x = self.propose(sample_shape) if sample_shape else self.propose()
log_prob_accept = self.log_prob_accept(x)
probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
done = torch.bernoulli(probs).byte()
while not done.all():
- proposed_x = self.propose()
+ proposed_x = self.propose(sample_shape) if sample_shape else self.propose()
log_prob_accept = self.log_prob_accept(proposed_x)
prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
accept = torch.bernoulli(prob_accept).byte() & ~done
|
{"golden_diff": "diff --git a/pyro/distributions/rejector.py b/pyro/distributions/rejector.py\n--- a/pyro/distributions/rejector.py\n+++ b/pyro/distributions/rejector.py\n@@ -12,7 +12,8 @@\n Rejection sampled distribution given an acceptance rate function.\n \n :param Distribution propose: A proposal distribution that samples batched\n- propsals via `propose()`.\n+ proposals via ``propose()``. :meth:`sample` supports a ``sample_shape``\n+ arg only if ``propose()`` supports a ``sample_shape`` arg.\n :param callable log_prob_accept: A callable that inputs a batch of\n proposals and returns a batch of log acceptance probabilities.\n :param log_scale: Total log probability of acceptance.\n@@ -40,15 +41,13 @@\n return self._propose_batch_log_pdf_cache[1]\n \n def sample(self, sample_shape=torch.Size()):\n- if sample_shape:\n- raise ValueError(\"Arbitrary `sample_shape` not supported by Rejector class.\")\n # Implements parallel batched accept-reject sampling.\n- x = self.propose()\n+ x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(x)\n probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n done = torch.bernoulli(probs).byte()\n while not done.all():\n- proposed_x = self.propose()\n+ proposed_x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(proposed_x)\n prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n accept = torch.bernoulli(prob_accept).byte() & ~done\n", "issue": "Support arbitrary sample_shape in Rejector\nThis will require the `proposal` to accept an optional `sample_shape` arg and the `log_prob_accept()` method to support broadcasting.\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport torch\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.distributions.score_parts import ScoreParts\nfrom pyro.distributions.util import copy_docs_from\n\n\n@copy_docs_from(Distribution)\nclass Rejector(Distribution):\n \"\"\"\n Rejection sampled distribution given an acceptance rate function.\n\n :param Distribution propose: A proposal distribution that samples batched\n propsals via `propose()`.\n :param callable log_prob_accept: A callable that inputs a batch of\n proposals and returns a batch of log acceptance probabilities.\n :param log_scale: Total log probability of acceptance.\n \"\"\"\n stateful = True\n reparameterized = True\n\n def __init__(self, propose, log_prob_accept, log_scale):\n self.propose = propose\n self.log_prob_accept = log_prob_accept\n self._log_scale = log_scale\n\n # These LRU(1) caches allow work to be shared across different method calls.\n self._log_prob_accept_cache = None, None\n self._propose_batch_log_pdf_cache = None, None\n\n def _log_prob_accept(self, x):\n if x is not self._log_prob_accept_cache[0]:\n self._log_prob_accept_cache = x, self.log_prob_accept(x) - self._log_scale\n return self._log_prob_accept_cache[1]\n\n def _propose_batch_log_pdf(self, x):\n if x is not self._propose_batch_log_pdf_cache[0]:\n self._propose_batch_log_pdf_cache = x, self.propose.log_prob(x)\n return self._propose_batch_log_pdf_cache[1]\n\n def sample(self, sample_shape=torch.Size()):\n if sample_shape:\n raise ValueError(\"Arbitrary `sample_shape` not supported by Rejector class.\")\n # Implements parallel batched accept-reject sampling.\n x = self.propose()\n log_prob_accept = self.log_prob_accept(x)\n probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n done = torch.bernoulli(probs).byte()\n while not done.all():\n proposed_x = self.propose()\n log_prob_accept = self.log_prob_accept(proposed_x)\n prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n accept = torch.bernoulli(prob_accept).byte() & ~done\n if accept.any():\n x[accept] = proposed_x[accept]\n done |= accept\n return x\n\n def log_prob(self, x):\n return self._propose_batch_log_pdf(x) + self._log_prob_accept(x)\n\n def score_parts(self, x):\n score_function = self._log_prob_accept(x)\n log_pdf = self.log_prob(x)\n return ScoreParts(log_pdf, score_function, log_pdf)\n", "path": "pyro/distributions/rejector.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport torch\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.distributions.score_parts import ScoreParts\nfrom pyro.distributions.util import copy_docs_from\n\n\n@copy_docs_from(Distribution)\nclass Rejector(Distribution):\n \"\"\"\n Rejection sampled distribution given an acceptance rate function.\n\n :param Distribution propose: A proposal distribution that samples batched\n proposals via ``propose()``. :meth:`sample` supports a ``sample_shape``\n arg only if ``propose()`` supports a ``sample_shape`` arg.\n :param callable log_prob_accept: A callable that inputs a batch of\n proposals and returns a batch of log acceptance probabilities.\n :param log_scale: Total log probability of acceptance.\n \"\"\"\n stateful = True\n reparameterized = True\n\n def __init__(self, propose, log_prob_accept, log_scale):\n self.propose = propose\n self.log_prob_accept = log_prob_accept\n self._log_scale = log_scale\n\n # These LRU(1) caches allow work to be shared across different method calls.\n self._log_prob_accept_cache = None, None\n self._propose_batch_log_pdf_cache = None, None\n\n def _log_prob_accept(self, x):\n if x is not self._log_prob_accept_cache[0]:\n self._log_prob_accept_cache = x, self.log_prob_accept(x) - self._log_scale\n return self._log_prob_accept_cache[1]\n\n def _propose_batch_log_pdf(self, x):\n if x is not self._propose_batch_log_pdf_cache[0]:\n self._propose_batch_log_pdf_cache = x, self.propose.log_prob(x)\n return self._propose_batch_log_pdf_cache[1]\n\n def sample(self, sample_shape=torch.Size()):\n # Implements parallel batched accept-reject sampling.\n x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(x)\n probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n done = torch.bernoulli(probs).byte()\n while not done.all():\n proposed_x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(proposed_x)\n prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n accept = torch.bernoulli(prob_accept).byte() & ~done\n if accept.any():\n x[accept] = proposed_x[accept]\n done |= accept\n return x\n\n def log_prob(self, x):\n return self._propose_batch_log_pdf(x) + self._log_prob_accept(x)\n\n def score_parts(self, x):\n score_function = self._log_prob_accept(x)\n log_pdf = self.log_prob(x)\n return ScoreParts(log_pdf, score_function, log_pdf)\n", "path": "pyro/distributions/rejector.py"}]}
| 1,041 | 398 |
gh_patches_debug_10405
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-340
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A user must never evaluate him/herself
In rare cases a user can be participant and contributor in a course (e.g. participating student and tutor for the exercises at the same time).
The system has to make sure that this user can't answer questions related to him/herself.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/student/views.py`
Content:
```
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied
3 from django.db import transaction
4 from django.shortcuts import get_object_or_404, redirect, render_to_response
5 from django.template import RequestContext
6 from django.utils.datastructures import SortedDict
7 from django.utils.translation import ugettext as _
8
9 from evap.evaluation.auth import login_required
10 from evap.evaluation.models import Course, Semester
11 from evap.evaluation.tools import questionnaires_and_contributions
12 from evap.student.forms import QuestionsForm
13 from evap.student.tools import make_form_identifier
14
15 from datetime import datetime
16
17
18 @login_required
19 def index(request):
20 # retrieve all courses, which the user can evaluate at some point
21 users_courses = Course.objects.filter(
22 participants=request.user
23 ).exclude(
24 voters=request.user
25 )
26 # split up into current and future courses
27 current_courses = users_courses.filter(state='inEvaluation')
28 future_courses = users_courses.filter(state='approved')
29
30 return render_to_response(
31 "student_index.html",
32 dict(current_courses=current_courses,
33 future_courses=future_courses),
34 context_instance=RequestContext(request))
35
36
37 @login_required
38 def vote(request, course_id):
39 # retrieve course and make sure that the user is allowed to vote
40 course = get_object_or_404(Course, id=course_id)
41 if not course.can_user_vote(request.user):
42 raise PermissionDenied
43
44 # build forms
45 forms = SortedDict()
46 for questionnaire, contribution in questionnaires_and_contributions(course):
47 form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)
48 forms[(contribution, questionnaire)] = form
49
50 if all(form.is_valid() for form in forms.values()):
51 # begin vote operation
52 with transaction.commit_on_success():
53 for (contribution, questionnaire), form in forms.items():
54 for question in questionnaire.question_set.all():
55 identifier = make_form_identifier(contribution, questionnaire, question)
56 value = form.cleaned_data.get(identifier)
57
58 if type(value) in [str, unicode]:
59 value = value.strip()
60
61 if value == 6: #no answer
62 value = None
63
64 # store the answer if one was given
65 if value:
66 question.answer_class.objects.create(
67 contribution=contribution,
68 question=question,
69 answer=value)
70
71 # remember that the user voted already
72 course.voters.add(request.user)
73
74 messages.add_message(request, messages.INFO, _("Your vote was recorded."))
75 return redirect('evap.student.views.index')
76 else:
77 return render_to_response(
78 "student_vote.html",
79 dict(forms=forms.values(),
80 course=course),
81 context_instance=RequestContext(request))
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/student/views.py b/evap/student/views.py
--- a/evap/student/views.py
+++ b/evap/student/views.py
@@ -45,6 +45,8 @@
forms = SortedDict()
for questionnaire, contribution in questionnaires_and_contributions(course):
form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)
+ if form.contribution.contributor == request.user:
+ continue # users shall not vote about themselves
forms[(contribution, questionnaire)] = form
if all(form.is_valid() for form in forms.values()):
|
{"golden_diff": "diff --git a/evap/student/views.py b/evap/student/views.py\n--- a/evap/student/views.py\n+++ b/evap/student/views.py\n@@ -45,6 +45,8 @@\n forms = SortedDict()\n for questionnaire, contribution in questionnaires_and_contributions(course):\n form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)\n+ if form.contribution.contributor == request.user:\n+ continue # users shall not vote about themselves\n forms[(contribution, questionnaire)] = form\n \n if all(form.is_valid() for form in forms.values()):\n", "issue": "A user must never evaluate him/herself\nIn rare cases a user can be participant and contributor in a course (e.g. participating student and tutor for the exercises at the same time).\nThe system has to make sure that this user can't answer questions related to him/herself.\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.datastructures import SortedDict\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import login_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import questionnaires_and_contributions\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom datetime import datetime\n\n\n@login_required\ndef index(request):\n # retrieve all courses, which the user can evaluate at some point\n users_courses = Course.objects.filter(\n participants=request.user\n ).exclude(\n voters=request.user\n )\n # split up into current and future courses\n current_courses = users_courses.filter(state='inEvaluation')\n future_courses = users_courses.filter(state='approved')\n\n return render_to_response(\n \"student_index.html\",\n dict(current_courses=current_courses,\n future_courses=future_courses),\n context_instance=RequestContext(request))\n\n\n@login_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # build forms\n forms = SortedDict()\n for questionnaire, contribution in questionnaires_and_contributions(course):\n form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)\n forms[(contribution, questionnaire)] = form\n\n if all(form.is_valid() for form in forms.values()):\n # begin vote operation\n with transaction.commit_on_success():\n for (contribution, questionnaire), form in forms.items():\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = form.cleaned_data.get(identifier)\n\n if type(value) in [str, unicode]:\n value = value.strip()\n\n if value == 6: #no answer\n value = None\n\n # store the answer if one was given\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n messages.add_message(request, messages.INFO, _(\"Your vote was recorded.\"))\n return redirect('evap.student.views.index')\n else:\n return render_to_response(\n \"student_vote.html\",\n dict(forms=forms.values(),\n course=course),\n context_instance=RequestContext(request))\n", "path": "evap/student/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.datastructures import SortedDict\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import login_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import questionnaires_and_contributions\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom datetime import datetime\n\n\n@login_required\ndef index(request):\n # retrieve all courses, which the user can evaluate at some point\n users_courses = Course.objects.filter(\n participants=request.user\n ).exclude(\n voters=request.user\n )\n # split up into current and future courses\n current_courses = users_courses.filter(state='inEvaluation')\n future_courses = users_courses.filter(state='approved')\n\n return render_to_response(\n \"student_index.html\",\n dict(current_courses=current_courses,\n future_courses=future_courses),\n context_instance=RequestContext(request))\n\n\n@login_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # build forms\n forms = SortedDict()\n for questionnaire, contribution in questionnaires_and_contributions(course):\n form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)\n if form.contribution.contributor == request.user:\n continue # users shall not vote about themselves\n forms[(contribution, questionnaire)] = form\n\n if all(form.is_valid() for form in forms.values()):\n # begin vote operation\n with transaction.commit_on_success():\n for (contribution, questionnaire), form in forms.items():\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = form.cleaned_data.get(identifier)\n\n if type(value) in [str, unicode]:\n value = value.strip()\n\n if value == 6: #no answer\n value = None\n\n # store the answer if one was given\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n messages.add_message(request, messages.INFO, _(\"Your vote was recorded.\"))\n return redirect('evap.student.views.index')\n else:\n return render_to_response(\n \"student_vote.html\",\n dict(forms=forms.values(),\n course=course),\n context_instance=RequestContext(request))\n", "path": "evap/student/views.py"}]}
| 1,051 | 134 |
gh_patches_debug_5905
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-746
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecation notice about urllib3[secure]
### Description
pyOpenSSL and urllib3[secure] are deprecated in the upcoming release (1.26.12)
https://github.com/urllib3/urllib3/issues/2680
Removed 'urllib3[secure] < 1.25 ; python_version < "3.5"' and 'urllib3[secure] < 2 ; python_version >= "3.5"'
Closes #746
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import os
5 import sys
6
7 from setuptools import Extension, find_packages, setup
8
9 with open("README.md", "r") as fp:
10 long_description = fp.read()
11
12 packages = find_packages("src")
13 if sys.version_info < (3, 6):
14 packages = [p for p in packages if not p.startswith("scout_apm.async_")]
15
16 compile_extensions = (
17 # Python 3+
18 sys.version_info >= (3,)
19 # Not Jython
20 and not sys.platform.startswith("java")
21 # Not PyPy
22 and "__pypy__" not in sys.builtin_module_names
23 # Not explicitly disabled
24 and (os.environ.get("SCOUT_DISABLE_EXTENSIONS", "") == "")
25 )
26 if compile_extensions:
27 ext_modules = [
28 Extension(
29 name=str("scout_apm.core._objtrace"),
30 sources=[str("src/scout_apm/core/_objtrace.c")],
31 optional=True,
32 )
33 ]
34 else:
35 ext_modules = []
36
37 setup(
38 name="scout_apm",
39 version="2.26.1",
40 description="Scout Application Performance Monitoring Agent",
41 long_description=long_description,
42 long_description_content_type="text/markdown",
43 url="https://github.com/scoutapp/scout_apm_python",
44 project_urls={
45 "Documentation": "https://docs.scoutapm.com/#python-agent",
46 "Changelog": (
47 "https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md"
48 ),
49 },
50 author="Scout",
51 author_email="[email protected]",
52 license="MIT",
53 zip_safe=False,
54 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
55 packages=packages,
56 package_dir={str(""): str("src")},
57 ext_modules=ext_modules,
58 entry_points={
59 "console_scripts": [
60 "core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
61 ]
62 },
63 install_requires=[
64 'asgiref ; python_version >= "3.5"',
65 'contextvars ; python_version >= "3.6" and python_version < "3.7"',
66 'importlib-metadata ; python_version < "3.8"',
67 "psutil>=5,<6",
68 'urllib3[secure] < 1.25 ; python_version < "3.5"',
69 'urllib3[secure] < 2 ; python_version >= "3.5"',
70 "wrapt>=1.10,<2.0",
71 ],
72 keywords=["apm", "performance monitoring", "development"],
73 classifiers=[
74 "Development Status :: 5 - Production/Stable",
75 "Framework :: Bottle",
76 "Framework :: Django",
77 "Framework :: Django :: 1.8",
78 "Framework :: Django :: 1.9",
79 "Framework :: Django :: 1.10",
80 "Framework :: Django :: 1.11",
81 "Framework :: Django :: 2.0",
82 "Framework :: Django :: 2.1",
83 "Framework :: Django :: 2.2",
84 "Framework :: Django :: 3.0",
85 "Framework :: Django :: 3.1",
86 "Framework :: Django :: 3.2",
87 "Framework :: Django :: 4.0",
88 "Framework :: Flask",
89 "Framework :: Pyramid",
90 "Intended Audience :: Developers",
91 "Topic :: System :: Monitoring",
92 "License :: OSI Approved :: MIT License",
93 "Operating System :: MacOS",
94 "Operating System :: POSIX",
95 "Operating System :: POSIX :: Linux",
96 "Programming Language :: Python :: 2",
97 "Programming Language :: Python :: 2.7",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3.4",
100 "Programming Language :: Python :: 3.5",
101 "Programming Language :: Python :: 3.6",
102 "Programming Language :: Python :: 3.7",
103 "Programming Language :: Python :: 3.8",
104 "Programming Language :: Python :: 3.9",
105 "Programming Language :: Python :: 3.10",
106 ],
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@
'importlib-metadata ; python_version < "3.8"',
"psutil>=5,<6",
'urllib3[secure] < 1.25 ; python_version < "3.5"',
- 'urllib3[secure] < 2 ; python_version >= "3.5"',
+ 'urllib3 < 2 ; python_version >= "3.5"',
"wrapt>=1.10,<2.0",
],
keywords=["apm", "performance monitoring", "development"],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n- 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n+ 'urllib3 < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=[\"apm\", \"performance monitoring\", \"development\"],\n", "issue": "Deprecation notice about urllib3[secure]\n ### Description\r\n\r\n pyOpenSSL and urllib3[secure] are deprecated in the upcoming release (1.26.12)\r\n https://github.com/urllib3/urllib3/issues/2680\r\n Removed 'urllib3[secure] < 1.25 ; python_version < \"3.5\"' and 'urllib3[secure] < 2 ; python_version >= \"3.5\"'\r\n\r\nCloses #746\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n # Not explicitly disabled\n and (os.environ.get(\"SCOUT_DISABLE_EXTENSIONS\", \"\") == \"\")\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n name=str(\"scout_apm.core._objtrace\"),\n sources=[str(\"src/scout_apm/core/_objtrace.c\")],\n optional=True,\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.26.1\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'contextvars ; python_version >= \"3.6\" and python_version < \"3.7\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=[\"apm\", \"performance monitoring\", \"development\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n # Not explicitly disabled\n and (os.environ.get(\"SCOUT_DISABLE_EXTENSIONS\", \"\") == \"\")\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n name=str(\"scout_apm.core._objtrace\"),\n sources=[str(\"src/scout_apm/core/_objtrace.c\")],\n optional=True,\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.26.1\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'contextvars ; python_version >= \"3.6\" and python_version < \"3.7\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n 'urllib3 < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=[\"apm\", \"performance monitoring\", \"development\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}]}
| 1,511 | 146 |
gh_patches_debug_31075
|
rasdani/github-patches
|
git_diff
|
databricks__koalas-104
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix pypi description
See https://pypi.org/project/databricks-koalas/
It just pulls in our entire README in markdown format.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 #
4 # Copyright (C) 2019 Databricks, Inc.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19 from setuptools import setup
20
21
22 install_requires = [
23 'pandas>=0.23',
24 'decorator',
25 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26
26 ]
27
28 setup(
29 name='databricks-koalas',
30 version='0.0.6',
31 packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',
32 'databricks.koalas.missing'],
33 extras_require={
34 'spark': ['pyspark>=2.4.0'],
35 },
36 install_requires=install_requires,
37 author="Timothy Hunter",
38 author_email="[email protected]",
39 license='http://www.apache.org/licenses/LICENSE-2.0',
40 long_description=open('README.md').read(),
41 )
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,24 +18,46 @@
from setuptools import setup
+DESCRIPTION = "Pandas DataFrame API on Apache Spark"
-install_requires = [
- 'pandas>=0.23',
- 'decorator',
- 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26
-]
+LONG_DESCRIPTION = """
+Koalas makes data scientists more productive when interacting with big data,
+by augmenting Apache Spark's Python DataFrame API to be compatible with
+Pandas'.
+
+Pandas is the de facto standard (single-node) dataframe implementation in
+Python, while Spark is the de facto standard for big data processing.
+With this package, data scientists can:
+
+- Be immediately productive with Spark, with no learning curve, if one
+ is already familiar with Pandas.
+- Have a single codebase that works both with Pandas (tests, smaller datasets)
+ and with Spark (distributed datasets).
+"""
setup(
- name='databricks-koalas',
+ name='koalas',
version='0.0.6',
packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',
'databricks.koalas.missing'],
extras_require={
'spark': ['pyspark>=2.4.0'],
},
- install_requires=install_requires,
- author="Timothy Hunter",
- author_email="[email protected]",
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
+ install_requires=[
+ 'pandas>=0.23',
+ 'decorator',
+ 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26
+ ],
+ maintainer="Databricks",
+ maintainer_email="[email protected]",
license='http://www.apache.org/licenses/LICENSE-2.0',
- long_description=open('README.md').read(),
+ url="https://github.com/databricks/spark-pandas",
+ project_urls={
+ 'Bug Tracker': 'https://github.com/databricks/spark-pandas/issues',
+ # 'Documentation': '',
+ 'Source Code': 'https://github.com/databricks/spark-pandas'
+ },
+ description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,24 +18,46 @@\n \n from setuptools import setup\n \n+DESCRIPTION = \"Pandas DataFrame API on Apache Spark\"\n \n-install_requires = [\n- 'pandas>=0.23',\n- 'decorator',\n- 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n-]\n+LONG_DESCRIPTION = \"\"\"\n+Koalas makes data scientists more productive when interacting with big data,\n+by augmenting Apache Spark's Python DataFrame API to be compatible with\n+Pandas'.\n+\n+Pandas is the de facto standard (single-node) dataframe implementation in\n+Python, while Spark is the de facto standard for big data processing.\n+With this package, data scientists can:\n+\n+- Be immediately productive with Spark, with no learning curve, if one\n+ is already familiar with Pandas.\n+- Have a single codebase that works both with Pandas (tests, smaller datasets)\n+ and with Spark (distributed datasets).\n+\"\"\"\n \n setup(\n- name='databricks-koalas',\n+ name='koalas',\n version='0.0.6',\n packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',\n 'databricks.koalas.missing'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n },\n- install_requires=install_requires,\n- author=\"Timothy Hunter\",\n- author_email=\"[email protected]\",\n+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',\n+ install_requires=[\n+ 'pandas>=0.23',\n+ 'decorator',\n+ 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n+ ],\n+ maintainer=\"Databricks\",\n+ maintainer_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n- long_description=open('README.md').read(),\n+ url=\"https://github.com/databricks/spark-pandas\",\n+ project_urls={\n+ 'Bug Tracker': 'https://github.com/databricks/spark-pandas/issues',\n+ # 'Documentation': '',\n+ 'Source Code': 'https://github.com/databricks/spark-pandas'\n+ },\n+ description=DESCRIPTION,\n+ long_description=LONG_DESCRIPTION,\n )\n", "issue": "Fix pypi description\nSee https://pypi.org/project/databricks-koalas/\r\n\r\nIt just pulls in our entire README in markdown format.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom setuptools import setup\n\n\ninstall_requires = [\n 'pandas>=0.23',\n 'decorator',\n 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n]\n\nsetup(\n name='databricks-koalas',\n version='0.0.6',\n packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',\n 'databricks.koalas.missing'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n },\n install_requires=install_requires,\n author=\"Timothy Hunter\",\n author_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n long_description=open('README.md').read(),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom setuptools import setup\n\nDESCRIPTION = \"Pandas DataFrame API on Apache Spark\"\n\nLONG_DESCRIPTION = \"\"\"\nKoalas makes data scientists more productive when interacting with big data,\nby augmenting Apache Spark's Python DataFrame API to be compatible with\nPandas'.\n\nPandas is the de facto standard (single-node) dataframe implementation in\nPython, while Spark is the de facto standard for big data processing.\nWith this package, data scientists can:\n\n- Be immediately productive with Spark, with no learning curve, if one\n is already familiar with Pandas.\n- Have a single codebase that works both with Pandas (tests, smaller datasets)\n and with Spark (distributed datasets).\n\"\"\"\n\nsetup(\n name='koalas',\n version='0.0.6',\n packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',\n 'databricks.koalas.missing'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n },\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',\n install_requires=[\n 'pandas>=0.23',\n 'decorator',\n 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n ],\n maintainer=\"Databricks\",\n maintainer_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n url=\"https://github.com/databricks/spark-pandas\",\n project_urls={\n 'Bug Tracker': 'https://github.com/databricks/spark-pandas/issues',\n # 'Documentation': '',\n 'Source Code': 'https://github.com/databricks/spark-pandas'\n },\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n)\n", "path": "setup.py"}]}
| 705 | 597 |
gh_patches_debug_31276
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-3857
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Global tasks like repair or reclaim should probably not run in parallel
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
**Describe the bug**
Global tasks that involve all repos like repair or reclaim should run one at a time, like orphan cleanup https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/orphans.py#L29
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/views/repair.py`
Content:
```
1 from drf_spectacular.utils import extend_schema
2 from rest_framework.views import APIView
3
4 from pulpcore.app.response import OperationPostponedResponse
5 from pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer
6 from pulpcore.app.tasks import repair_all_artifacts
7 from pulpcore.tasking.tasks import dispatch
8
9
10 class RepairView(APIView):
11 @extend_schema(
12 description=(
13 "Trigger an asynchronous task that checks for missing "
14 "or corrupted artifacts, and attempts to redownload them."
15 ),
16 summary="Repair Artifact Storage",
17 request=RepairSerializer,
18 responses={202: AsyncOperationResponseSerializer},
19 )
20 def post(self, request):
21 """
22 Repair artifacts.
23 """
24 serializer = RepairSerializer(data=request.data)
25 serializer.is_valid(raise_exception=True)
26
27 verify_checksums = serializer.validated_data["verify_checksums"]
28
29 task = dispatch(repair_all_artifacts, args=[verify_checksums])
30
31 return OperationPostponedResponse(task, request)
32
```
Path: `pulpcore/app/viewsets/reclaim.py`
Content:
```
1 from drf_spectacular.utils import extend_schema
2 from rest_framework.viewsets import ViewSet
3
4 from pulpcore.app.response import OperationPostponedResponse
5 from pulpcore.app.serializers import AsyncOperationResponseSerializer, ReclaimSpaceSerializer
6 from pulpcore.app.tasks import reclaim_space
7 from pulpcore.tasking.tasks import dispatch
8
9
10 class ReclaimSpaceViewSet(ViewSet):
11 """
12 Viewset for reclaim disk space endpoint.
13 """
14
15 serializer_class = ReclaimSpaceSerializer
16
17 @extend_schema(
18 description="Trigger an asynchronous space reclaim operation.",
19 responses={202: AsyncOperationResponseSerializer},
20 )
21 def reclaim(self, request):
22 """
23 Triggers an asynchronous space reclaim operation.
24 """
25 serializer = ReclaimSpaceSerializer(data=request.data)
26 serializer.is_valid(raise_exception=True)
27
28 repos = serializer.validated_data.get("repo_hrefs", [])
29 keeplist = serializer.validated_data.get("repo_versions_keeplist", [])
30 reclaim_repo_pks = []
31 keeplist_rv_pks = []
32 for repo in repos:
33 reclaim_repo_pks.append(repo.pk)
34 for rv in keeplist:
35 repos.append(rv.repository)
36 keeplist_rv_pks.append(rv.pk)
37
38 task = dispatch(
39 reclaim_space,
40 shared_resources=repos,
41 kwargs={
42 "repo_pks": reclaim_repo_pks,
43 "keeplist_rv_pks": keeplist_rv_pks,
44 },
45 )
46
47 return OperationPostponedResponse(task, request)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py
--- a/pulpcore/app/views/repair.py
+++ b/pulpcore/app/views/repair.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.views import APIView
from pulpcore.app.response import OperationPostponedResponse
@@ -26,6 +27,9 @@
verify_checksums = serializer.validated_data["verify_checksums"]
- task = dispatch(repair_all_artifacts, args=[verify_checksums])
+ uri = "/api/v3/repair/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/reclaim.py b/pulpcore/app/viewsets/reclaim.py
--- a/pulpcore/app/viewsets/reclaim.py
+++ b/pulpcore/app/viewsets/reclaim.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.viewsets import ViewSet
from pulpcore.app.response import OperationPostponedResponse
@@ -35,8 +36,17 @@
repos.append(rv.repository)
keeplist_rv_pks.append(rv.pk)
+ if repos:
+ exclusive_resources = None
+ else:
+ uri = "/api/v3/repositories/reclaim_space/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ exclusive_resources = [uri]
+
task = dispatch(
reclaim_space,
+ exclusive_resources=exclusive_resources,
shared_resources=repos,
kwargs={
"repo_pks": reclaim_repo_pks,
|
{"golden_diff": "diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py\n--- a/pulpcore/app/views/repair.py\n+++ b/pulpcore/app/views/repair.py\n@@ -1,4 +1,5 @@\n from drf_spectacular.utils import extend_schema\n+from django.conf import settings\n from rest_framework.views import APIView\n \n from pulpcore.app.response import OperationPostponedResponse\n@@ -26,6 +27,9 @@\n \n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n \n- task = dispatch(repair_all_artifacts, args=[verify_checksums])\n+ uri = \"/api/v3/repair/\"\n+ if settings.DOMAIN_ENABLED:\n+ uri = f\"/{request.pulp_domain.name}{uri}\"\n+ task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])\n \n return OperationPostponedResponse(task, request)\ndiff --git a/pulpcore/app/viewsets/reclaim.py b/pulpcore/app/viewsets/reclaim.py\n--- a/pulpcore/app/viewsets/reclaim.py\n+++ b/pulpcore/app/viewsets/reclaim.py\n@@ -1,4 +1,5 @@\n from drf_spectacular.utils import extend_schema\n+from django.conf import settings\n from rest_framework.viewsets import ViewSet\n \n from pulpcore.app.response import OperationPostponedResponse\n@@ -35,8 +36,17 @@\n repos.append(rv.repository)\n keeplist_rv_pks.append(rv.pk)\n \n+ if repos:\n+ exclusive_resources = None\n+ else:\n+ uri = \"/api/v3/repositories/reclaim_space/\"\n+ if settings.DOMAIN_ENABLED:\n+ uri = f\"/{request.pulp_domain.name}{uri}\"\n+ exclusive_resources = [uri]\n+\n task = dispatch(\n reclaim_space,\n+ exclusive_resources=exclusive_resources,\n shared_resources=repos,\n kwargs={\n \"repo_pks\": reclaim_repo_pks,\n", "issue": "Global tasks like repair or reclaim should probably not run in parallel\n**Version**\r\nPlease provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.\r\n\r\n**Describe the bug**\r\nGlobal tasks that involve all repos like repair or reclaim should run one at a time, like orphan cleanup https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/orphans.py#L29\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.\r\n\n", "before_files": [{"content": "from drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer\nfrom pulpcore.app.tasks import repair_all_artifacts\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass RepairView(APIView):\n @extend_schema(\n description=(\n \"Trigger an asynchronous task that checks for missing \"\n \"or corrupted artifacts, and attempts to redownload them.\"\n ),\n summary=\"Repair Artifact Storage\",\n request=RepairSerializer,\n responses={202: AsyncOperationResponseSerializer},\n )\n def post(self, request):\n \"\"\"\n Repair artifacts.\n \"\"\"\n serializer = RepairSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n\n task = dispatch(repair_all_artifacts, args=[verify_checksums])\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/views/repair.py"}, {"content": "from drf_spectacular.utils import extend_schema\nfrom rest_framework.viewsets import ViewSet\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, ReclaimSpaceSerializer\nfrom pulpcore.app.tasks import reclaim_space\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass ReclaimSpaceViewSet(ViewSet):\n \"\"\"\n Viewset for reclaim disk space endpoint.\n \"\"\"\n\n serializer_class = ReclaimSpaceSerializer\n\n @extend_schema(\n description=\"Trigger an asynchronous space reclaim operation.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def reclaim(self, request):\n \"\"\"\n Triggers an asynchronous space reclaim operation.\n \"\"\"\n serializer = ReclaimSpaceSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n repos = serializer.validated_data.get(\"repo_hrefs\", [])\n keeplist = serializer.validated_data.get(\"repo_versions_keeplist\", [])\n reclaim_repo_pks = []\n keeplist_rv_pks = []\n for repo in repos:\n reclaim_repo_pks.append(repo.pk)\n for rv in keeplist:\n repos.append(rv.repository)\n keeplist_rv_pks.append(rv.pk)\n\n task = dispatch(\n reclaim_space,\n shared_resources=repos,\n kwargs={\n \"repo_pks\": reclaim_repo_pks,\n \"keeplist_rv_pks\": keeplist_rv_pks,\n },\n )\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/viewsets/reclaim.py"}], "after_files": [{"content": "from drf_spectacular.utils import extend_schema\nfrom django.conf import settings\nfrom rest_framework.views import APIView\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer\nfrom pulpcore.app.tasks import repair_all_artifacts\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass RepairView(APIView):\n @extend_schema(\n description=(\n \"Trigger an asynchronous task that checks for missing \"\n \"or corrupted artifacts, and attempts to redownload them.\"\n ),\n summary=\"Repair Artifact Storage\",\n request=RepairSerializer,\n responses={202: AsyncOperationResponseSerializer},\n )\n def post(self, request):\n \"\"\"\n Repair artifacts.\n \"\"\"\n serializer = RepairSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n\n uri = \"/api/v3/repair/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.pulp_domain.name}{uri}\"\n task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/views/repair.py"}, {"content": "from drf_spectacular.utils import extend_schema\nfrom django.conf import settings\nfrom rest_framework.viewsets import ViewSet\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, ReclaimSpaceSerializer\nfrom pulpcore.app.tasks import reclaim_space\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass ReclaimSpaceViewSet(ViewSet):\n \"\"\"\n Viewset for reclaim disk space endpoint.\n \"\"\"\n\n serializer_class = ReclaimSpaceSerializer\n\n @extend_schema(\n description=\"Trigger an asynchronous space reclaim operation.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def reclaim(self, request):\n \"\"\"\n Triggers an asynchronous space reclaim operation.\n \"\"\"\n serializer = ReclaimSpaceSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n repos = serializer.validated_data.get(\"repo_hrefs\", [])\n keeplist = serializer.validated_data.get(\"repo_versions_keeplist\", [])\n reclaim_repo_pks = []\n keeplist_rv_pks = []\n for repo in repos:\n reclaim_repo_pks.append(repo.pk)\n for rv in keeplist:\n repos.append(rv.repository)\n keeplist_rv_pks.append(rv.pk)\n\n if repos:\n exclusive_resources = None\n else:\n uri = \"/api/v3/repositories/reclaim_space/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.pulp_domain.name}{uri}\"\n exclusive_resources = [uri]\n\n task = dispatch(\n reclaim_space,\n exclusive_resources=exclusive_resources,\n shared_resources=repos,\n kwargs={\n \"repo_pks\": reclaim_repo_pks,\n \"keeplist_rv_pks\": keeplist_rv_pks,\n },\n )\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/viewsets/reclaim.py"}]}
| 1,123 | 433 |
gh_patches_debug_6402
|
rasdani/github-patches
|
git_diff
|
engnadeau__pybotics-425
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Strip links/badges/images from README for PyPi
- While the new PyPi Warehouse supports markdown, images/badges/links are slow to load and will not work if they use relative paths (e.g., https://test.pypi.org/project/pybotics/201803222157/#description)
- These elements should be stripped from the README prior to upload
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Setup module."""
2 import logging
3 from pathlib import Path
4
5 from setuptools import find_packages, setup # type: ignore
6
7
8 def main() -> None:
9 """Run setup."""
10 # run setup
11 setup(name='pybotics',
12 packages=find_packages(include=['pybotics']),
13 url='https://github.com/nnadeau/pybotics',
14 license='MIT',
15 author='Nicholas Nadeau',
16 author_email='[email protected]',
17 description='Python Toolbox for Robotics',
18 long_description=get_readme(),
19 long_description_content_type='text/markdown',
20 use_scm_version=True,
21 setup_requires=['setuptools_scm'],
22 install_requires=get_requirements(), # type: ignore
23 tests_require=['pytest'],
24 classifiers=[
25 'Development Status :: 4 - Beta',
26 'Intended Audience :: Developers',
27 'Intended Audience :: Education',
28 'Intended Audience :: End Users/Desktop',
29 'Intended Audience :: Manufacturing',
30 'Intended Audience :: Science/Research',
31 'Topic :: Education',
32 'Topic :: Scientific/Engineering',
33 'Topic :: Scientific/Engineering :: Artificial Intelligence',
34 'Topic :: Scientific/Engineering :: Human Machine Interfaces',
35 'Topic :: Scientific/Engineering :: Mathematics',
36 'Topic :: Scientific/Engineering :: Physics',
37 'Topic :: Utilities',
38 'License :: OSI Approved :: MIT License',
39 'Programming Language :: Python :: 3 :: Only',
40 'Programming Language :: Python :: 3',
41 'Programming Language :: Python :: 3.4',
42 'Programming Language :: Python :: 3.5',
43 'Programming Language :: Python :: 3.6',
44 ],
45 keywords='python robot robotics research '
46 'automation kinematics geometry')
47
48
49 def get_readme() -> str:
50 """Get README text."""
51 # description
52 readme_path = Path(__file__).parent / 'README.md'
53 logging.info('README path: {}'.format(readme_path.resolve()))
54 with open(str(readme_path)) as f:
55 readme = f.read()
56 return readme
57
58
59 # don't want to import typing... so ignore
60 def get_requirements(): # type: ignore
61 """Get requirements list."""
62 # requirements
63 requirements_path = Path(__file__).parent / 'requirements.txt'
64 logging.info('Requirements path: {}'.format(requirements_path.resolve()))
65 with open(str(requirements_path)) as f:
66 requirements = f.read().splitlines()
67 for i, req in enumerate(requirements):
68 requirements[i] = req.split()[0]
69 return requirements
70
71
72 if __name__ == '__main__':
73 logging.basicConfig(level=logging.INFO)
74 main()
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
long_description=get_readme(),
long_description_content_type='text/markdown',
use_scm_version=True,
- setup_requires=['setuptools_scm'],
+ setup_requires=['setuptools', 'setuptools_scm'],
install_requires=get_requirements(), # type: ignore
tests_require=['pytest'],
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n long_description=get_readme(),\n long_description_content_type='text/markdown',\n use_scm_version=True,\n- setup_requires=['setuptools_scm'],\n+ setup_requires=['setuptools', 'setuptools_scm'],\n install_requires=get_requirements(), # type: ignore\n tests_require=['pytest'],\n classifiers=[\n", "issue": "Strip links/badges/images from README for PyPi\n- While the new PyPi Warehouse supports markdown, images/badges/links are slow to load and will not work if they use relative paths (e.g., https://test.pypi.org/project/pybotics/201803222157/#description)\r\n- These elements should be stripped from the README prior to upload\n", "before_files": [{"content": "\"\"\"Setup module.\"\"\"\nimport logging\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup # type: ignore\n\n\ndef main() -> None:\n \"\"\"Run setup.\"\"\"\n # run setup\n setup(name='pybotics',\n packages=find_packages(include=['pybotics']),\n url='https://github.com/nnadeau/pybotics',\n license='MIT',\n author='Nicholas Nadeau',\n author_email='[email protected]',\n description='Python Toolbox for Robotics',\n long_description=get_readme(),\n long_description_content_type='text/markdown',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n install_requires=get_requirements(), # type: ignore\n tests_require=['pytest'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'Topic :: Education',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='python robot robotics research '\n 'automation kinematics geometry')\n\n\ndef get_readme() -> str:\n \"\"\"Get README text.\"\"\"\n # description\n readme_path = Path(__file__).parent / 'README.md'\n logging.info('README path: {}'.format(readme_path.resolve()))\n with open(str(readme_path)) as f:\n readme = f.read()\n return readme\n\n\n# don't want to import typing... so ignore\ndef get_requirements(): # type: ignore\n \"\"\"Get requirements list.\"\"\"\n # requirements\n requirements_path = Path(__file__).parent / 'requirements.txt'\n logging.info('Requirements path: {}'.format(requirements_path.resolve()))\n with open(str(requirements_path)) as f:\n requirements = f.read().splitlines()\n for i, req in enumerate(requirements):\n requirements[i] = req.split()[0]\n return requirements\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n main()\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Setup module.\"\"\"\nimport logging\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup # type: ignore\n\n\ndef main() -> None:\n \"\"\"Run setup.\"\"\"\n # run setup\n setup(name='pybotics',\n packages=find_packages(include=['pybotics']),\n url='https://github.com/nnadeau/pybotics',\n license='MIT',\n author='Nicholas Nadeau',\n author_email='[email protected]',\n description='Python Toolbox for Robotics',\n long_description=get_readme(),\n long_description_content_type='text/markdown',\n use_scm_version=True,\n setup_requires=['setuptools', 'setuptools_scm'],\n install_requires=get_requirements(), # type: ignore\n tests_require=['pytest'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'Topic :: Education',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='python robot robotics research '\n 'automation kinematics geometry')\n\n\ndef get_readme() -> str:\n \"\"\"Get README text.\"\"\"\n # description\n readme_path = Path(__file__).parent / 'README.md'\n logging.info('README path: {}'.format(readme_path.resolve()))\n with open(str(readme_path)) as f:\n readme = f.read()\n return readme\n\n\n# don't want to import typing... so ignore\ndef get_requirements(): # type: ignore\n \"\"\"Get requirements list.\"\"\"\n # requirements\n requirements_path = Path(__file__).parent / 'requirements.txt'\n logging.info('Requirements path: {}'.format(requirements_path.resolve()))\n with open(str(requirements_path)) as f:\n requirements = f.read().splitlines()\n for i, req in enumerate(requirements):\n requirements[i] = req.split()[0]\n return requirements\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n main()\n", "path": "setup.py"}]}
| 1,043 | 101 |
gh_patches_debug_32678
|
rasdani/github-patches
|
git_diff
|
raspiblitz__raspiblitz-1227
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move BlitzTUI log to RAM disk
The RAM disk (`/var/cache/raspiblitz/`) is a nice way to avoid reads and writes to either the SD card or the external disk for non-persistent data. Several things can be moved; the BlitzTUI log should be a prime example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `home.admin/BlitzTUI/blitztui/version.py`
Content:
```
1 """ Store the version here so:
2 # 1) we don't load dependencies by storing it in __init__.py
3 # 2) we can import it in setup.py for the same reason
4 # 3) we can import it into your module module
5 """
6
7 __version_info__ = ('0', '47', '0')
8 __version__ = '.'.join(__version_info__)
9
```
Path: `home.admin/BlitzTUI/blitztui/file_logger.py`
Content:
```
1 import json
2 import logging
3 import logging.config
4 import os
5 import sys
6
7 IS_WIN32_ENV = sys.platform == "win32"
8
9
10 def setup_logging(default_path=os.path.abspath(os.path.expanduser('~/.blitz-tui.json')), log_level="INFO"):
11 """Setup logging configuration"""
12 path = default_path
13 if os.path.exists(path):
14 with open(path, 'rt') as f:
15 config = json.load(f)
16 logging.config.dictConfig(config)
17
18 else: # if $default_path does not exist use the following default log setup
19
20 if IS_WIN32_ENV:
21 log_file = "blitz-tui.log"
22 else:
23 log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))
24
25 default_config_as_dict = dict(
26 version=1,
27 disable_existing_loggers=False,
28 formatters={'simple': {'format': '%(asctime)s - %(levelname)s - %(message)s'},
29 'extended': {
30 'format': '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s'}},
31 handlers={'console': {'class': 'logging.StreamHandler',
32 'level': 'INFO',
33 'formatter': 'extended',
34 'stream': 'ext://sys.stdout'},
35 'file_handler': {'class': 'logging.handlers.RotatingFileHandler',
36 'level': log_level,
37 'formatter': 'extended',
38 'filename': log_file,
39 'maxBytes': 10485760,
40 'backupCount': 0,
41 'encoding': 'utf8'}},
42 loggers={'infoblitz': {'level': 'DEBUG',
43 'handlers': ['console', 'file_handler'],
44 'propagate': 'no'}},
45 root={'level': 'DEBUG', 'handlers': ['console', 'file_handler']}
46 )
47
48 logging.config.dictConfig(default_config_as_dict)
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/home.admin/BlitzTUI/blitztui/file_logger.py b/home.admin/BlitzTUI/blitztui/file_logger.py
--- a/home.admin/BlitzTUI/blitztui/file_logger.py
+++ b/home.admin/BlitzTUI/blitztui/file_logger.py
@@ -1,3 +1,4 @@
+import getpass
import json
import logging
import logging.config
@@ -20,7 +21,14 @@
if IS_WIN32_ENV:
log_file = "blitz-tui.log"
else:
- log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))
+ if os.path.isdir('/var/cache/raspiblitz'):
+ try:
+ os.mkdir('/var/cache/raspiblitz/{}'.format(getpass.getuser()))
+ except FileExistsError:
+ pass
+ log_file = os.path.abspath('/var/cache/raspiblitz/{}/blitz-tui.log'.format(getpass.getuser()))
+ else:
+ log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))
default_config_as_dict = dict(
version=1,
@@ -36,8 +44,8 @@
'level': log_level,
'formatter': 'extended',
'filename': log_file,
- 'maxBytes': 10485760,
- 'backupCount': 0,
+ 'maxBytes': 2*1024*1024, # 2 MB
+ 'backupCount': 1,
'encoding': 'utf8'}},
loggers={'infoblitz': {'level': 'DEBUG',
'handlers': ['console', 'file_handler'],
diff --git a/home.admin/BlitzTUI/blitztui/version.py b/home.admin/BlitzTUI/blitztui/version.py
--- a/home.admin/BlitzTUI/blitztui/version.py
+++ b/home.admin/BlitzTUI/blitztui/version.py
@@ -4,5 +4,5 @@
# 3) we can import it into your module module
"""
-__version_info__ = ('0', '47', '0')
+__version_info__ = ('0', '48', '1')
__version__ = '.'.join(__version_info__)
|
{"golden_diff": "diff --git a/home.admin/BlitzTUI/blitztui/file_logger.py b/home.admin/BlitzTUI/blitztui/file_logger.py\n--- a/home.admin/BlitzTUI/blitztui/file_logger.py\n+++ b/home.admin/BlitzTUI/blitztui/file_logger.py\n@@ -1,3 +1,4 @@\n+import getpass\n import json\n import logging\n import logging.config\n@@ -20,7 +21,14 @@\n if IS_WIN32_ENV:\n log_file = \"blitz-tui.log\"\n else:\n- log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n+ if os.path.isdir('/var/cache/raspiblitz'):\n+ try:\n+ os.mkdir('/var/cache/raspiblitz/{}'.format(getpass.getuser()))\n+ except FileExistsError:\n+ pass\n+ log_file = os.path.abspath('/var/cache/raspiblitz/{}/blitz-tui.log'.format(getpass.getuser()))\n+ else:\n+ log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n \n default_config_as_dict = dict(\n version=1,\n@@ -36,8 +44,8 @@\n 'level': log_level,\n 'formatter': 'extended',\n 'filename': log_file,\n- 'maxBytes': 10485760,\n- 'backupCount': 0,\n+ 'maxBytes': 2*1024*1024, # 2 MB\n+ 'backupCount': 1,\n 'encoding': 'utf8'}},\n loggers={'infoblitz': {'level': 'DEBUG',\n 'handlers': ['console', 'file_handler'],\ndiff --git a/home.admin/BlitzTUI/blitztui/version.py b/home.admin/BlitzTUI/blitztui/version.py\n--- a/home.admin/BlitzTUI/blitztui/version.py\n+++ b/home.admin/BlitzTUI/blitztui/version.py\n@@ -4,5 +4,5 @@\n # 3) we can import it into your module module\n \"\"\"\n \n-__version_info__ = ('0', '47', '0')\n+__version_info__ = ('0', '48', '1')\n __version__ = '.'.join(__version_info__)\n", "issue": "Move BlitzTUI log to RAM disk\nThe RAM disk (`/var/cache/raspiblitz/`) is a nice way to avoid reads and writes to either the SD card or the external disk for non-persistent data. Several things can be moved; the BlitzTUI log should be a prime example.\n", "before_files": [{"content": "\"\"\" Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module module\n\"\"\"\n\n__version_info__ = ('0', '47', '0')\n__version__ = '.'.join(__version_info__)\n", "path": "home.admin/BlitzTUI/blitztui/version.py"}, {"content": "import json\nimport logging\nimport logging.config\nimport os\nimport sys\n\nIS_WIN32_ENV = sys.platform == \"win32\"\n\n\ndef setup_logging(default_path=os.path.abspath(os.path.expanduser('~/.blitz-tui.json')), log_level=\"INFO\"):\n \"\"\"Setup logging configuration\"\"\"\n path = default_path\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n\n else: # if $default_path does not exist use the following default log setup\n\n if IS_WIN32_ENV:\n log_file = \"blitz-tui.log\"\n else:\n log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n\n default_config_as_dict = dict(\n version=1,\n disable_existing_loggers=False,\n formatters={'simple': {'format': '%(asctime)s - %(levelname)s - %(message)s'},\n 'extended': {\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s'}},\n handlers={'console': {'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'extended',\n 'stream': 'ext://sys.stdout'},\n 'file_handler': {'class': 'logging.handlers.RotatingFileHandler',\n 'level': log_level,\n 'formatter': 'extended',\n 'filename': log_file,\n 'maxBytes': 10485760,\n 'backupCount': 0,\n 'encoding': 'utf8'}},\n loggers={'infoblitz': {'level': 'DEBUG',\n 'handlers': ['console', 'file_handler'],\n 'propagate': 'no'}},\n root={'level': 'DEBUG', 'handlers': ['console', 'file_handler']}\n )\n\n logging.config.dictConfig(default_config_as_dict)\n", "path": "home.admin/BlitzTUI/blitztui/file_logger.py"}], "after_files": [{"content": "\"\"\" Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module module\n\"\"\"\n\n__version_info__ = ('0', '48', '1')\n__version__ = '.'.join(__version_info__)\n", "path": "home.admin/BlitzTUI/blitztui/version.py"}, {"content": "import getpass\nimport json\nimport logging\nimport logging.config\nimport os\nimport sys\n\nIS_WIN32_ENV = sys.platform == \"win32\"\n\n\ndef setup_logging(default_path=os.path.abspath(os.path.expanduser('~/.blitz-tui.json')), log_level=\"INFO\"):\n \"\"\"Setup logging configuration\"\"\"\n path = default_path\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n\n else: # if $default_path does not exist use the following default log setup\n\n if IS_WIN32_ENV:\n log_file = \"blitz-tui.log\"\n else:\n if os.path.isdir('/var/cache/raspiblitz'):\n try:\n os.mkdir('/var/cache/raspiblitz/{}'.format(getpass.getuser()))\n except FileExistsError:\n pass\n log_file = os.path.abspath('/var/cache/raspiblitz/{}/blitz-tui.log'.format(getpass.getuser()))\n else:\n log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n\n default_config_as_dict = dict(\n version=1,\n disable_existing_loggers=False,\n formatters={'simple': {'format': '%(asctime)s - %(levelname)s - %(message)s'},\n 'extended': {\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s'}},\n handlers={'console': {'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'extended',\n 'stream': 'ext://sys.stdout'},\n 'file_handler': {'class': 'logging.handlers.RotatingFileHandler',\n 'level': log_level,\n 'formatter': 'extended',\n 'filename': log_file,\n 'maxBytes': 2*1024*1024, # 2 MB\n 'backupCount': 1,\n 'encoding': 'utf8'}},\n loggers={'infoblitz': {'level': 'DEBUG',\n 'handlers': ['console', 'file_handler'],\n 'propagate': 'no'}},\n root={'level': 'DEBUG', 'handlers': ['console', 'file_handler']}\n )\n\n logging.config.dictConfig(default_config_as_dict)\n", "path": "home.admin/BlitzTUI/blitztui/file_logger.py"}]}
| 951 | 523 |
gh_patches_debug_1532
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-bids-259
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update text before release
The setup.py still states that this is experimental. I think it's grown up a fair bit :) this is the text that ends up on pypi. we should update it to reflect the package's aim
https://github.com/mne-tools/mne-bids/blob/f8f267b45ac36e1600ea9ceb5540299e1bf3ab21/setup.py#L17
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 """Setup MNE-BIDS."""
3 import os
4 from setuptools import setup, find_packages
5
6 # get the version
7 version = None
8 with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
9 for line in (line.strip() for line in fid):
10 if line.startswith('__version__'):
11 version = line.split('=')[1].strip().strip('\'')
12 break
13 if version is None:
14 raise RuntimeError('Could not determine version')
15
16
17 descr = """Experimental code for BIDS using MNE."""
18
19 DISTNAME = 'mne-bids'
20 DESCRIPTION = descr
21 MAINTAINER = 'Mainak Jas'
22 MAINTAINER_EMAIL = '[email protected]'
23 URL = 'https://mne-tools.github.io/mne-bids/'
24 LICENSE = 'BSD (3-clause)'
25 DOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'
26 VERSION = version
27
28 if __name__ == "__main__":
29 setup(name=DISTNAME,
30 maintainer=MAINTAINER,
31 maintainer_email=MAINTAINER_EMAIL,
32 description=DESCRIPTION,
33 license=LICENSE,
34 url=URL,
35 version=VERSION,
36 download_url=DOWNLOAD_URL,
37 long_description=open('README.rst').read(),
38 long_description_content_type='text/x-rst',
39 classifiers=[
40 'Intended Audience :: Science/Research',
41 'Intended Audience :: Developers',
42 'License :: OSI Approved',
43 'Programming Language :: Python',
44 'Topic :: Software Development',
45 'Topic :: Scientific/Engineering',
46 'Operating System :: Microsoft :: Windows',
47 'Operating System :: POSIX',
48 'Operating System :: Unix',
49 'Operating System :: MacOS',
50 ],
51 platforms='any',
52 packages=find_packages(),
53 scripts=['bin/mne_bids']
54 )
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,8 @@
raise RuntimeError('Could not determine version')
-descr = """Experimental code for BIDS using MNE."""
+descr = ('An MNE project for organizing and formatting MEG and EEG data '
+ 'according to the BIDS specification.')
DISTNAME = 'mne-bids'
DESCRIPTION = descr
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,8 @@\n raise RuntimeError('Could not determine version')\n \n \n-descr = \"\"\"Experimental code for BIDS using MNE.\"\"\"\n+descr = ('An MNE project for organizing and formatting MEG and EEG data '\n+ 'according to the BIDS specification.')\n \n DISTNAME = 'mne-bids'\n DESCRIPTION = descr\n", "issue": "Update text before release\nThe setup.py still states that this is experimental. I think it's grown up a fair bit :) this is the text that ends up on pypi. we should update it to reflect the package's aim\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/f8f267b45ac36e1600ea9ceb5540299e1bf3ab21/setup.py#L17\n", "before_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = \"\"\"Experimental code for BIDS using MNE.\"\"\"\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne-tools.github.io/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n scripts=['bin/mne_bids']\n )\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = ('An MNE project for organizing and formatting MEG and EEG data '\n 'according to the BIDS specification.')\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne-tools.github.io/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n scripts=['bin/mne_bids']\n )\n", "path": "setup.py"}]}
| 847 | 97 |
gh_patches_debug_22071
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1919
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failure to get the container id
With #1888 in place, there's a regression when running inside a container. There's an assumption in https://github.com/pre-commit/pre-commit/blob/master/pre_commit/languages/docker.py#L32 that the hostname is the container ID which is not always the case (it's easy enough to set a different hostname with `docker run --hostname foo`). It causes the `docker inspect` command that follows to fail. A more reliable way to get the container id is from `/proc/1/cpuset` or from the first line in `/proc/1/cgroup` which is already checked in `_is_in_docker`.
Thanks @asottile and @okainov for your work on #1387 and pre-commit in general.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/docker.py`
Content:
```
1 import hashlib
2 import json
3 import os
4 import socket
5 from typing import Sequence
6 from typing import Tuple
7
8 import pre_commit.constants as C
9 from pre_commit.hook import Hook
10 from pre_commit.languages import helpers
11 from pre_commit.prefix import Prefix
12 from pre_commit.util import clean_path_on_failure
13 from pre_commit.util import cmd_output_b
14
15 ENVIRONMENT_DIR = 'docker'
16 PRE_COMMIT_LABEL = 'PRE_COMMIT'
17 get_default_version = helpers.basic_get_default_version
18 healthy = helpers.basic_healthy
19
20
21 def _is_in_docker() -> bool:
22 try:
23 with open('/proc/1/cgroup', 'rb') as f:
24 return b'docker' in f.read()
25 except FileNotFoundError:
26 return False
27
28
29 def _get_docker_path(path: str) -> str:
30 if not _is_in_docker():
31 return path
32 hostname = socket.gethostname()
33
34 _, out, _ = cmd_output_b('docker', 'inspect', hostname)
35
36 container, = json.loads(out)
37 for mount in container['Mounts']:
38 src_path = mount['Source']
39 to_path = mount['Destination']
40 if os.path.commonpath((path, to_path)) == to_path:
41 # So there is something in common,
42 # and we can proceed remapping it
43 return path.replace(to_path, src_path)
44 # we're in Docker, but the path is not mounted, cannot really do anything,
45 # so fall back to original path
46 return path
47
48
49 def md5(s: str) -> str: # pragma: win32 no cover
50 return hashlib.md5(s.encode()).hexdigest()
51
52
53 def docker_tag(prefix: Prefix) -> str: # pragma: win32 no cover
54 md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()
55 return f'pre-commit-{md5sum}'
56
57
58 def build_docker_image(
59 prefix: Prefix,
60 *,
61 pull: bool,
62 ) -> None: # pragma: win32 no cover
63 cmd: Tuple[str, ...] = (
64 'docker', 'build',
65 '--tag', docker_tag(prefix),
66 '--label', PRE_COMMIT_LABEL,
67 )
68 if pull:
69 cmd += ('--pull',)
70 # This must come last for old versions of docker. See #477
71 cmd += ('.',)
72 helpers.run_setup_cmd(prefix, cmd)
73
74
75 def install_environment(
76 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
77 ) -> None: # pragma: win32 no cover
78 helpers.assert_version_default('docker', version)
79 helpers.assert_no_additional_deps('docker', additional_dependencies)
80
81 directory = prefix.path(
82 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
83 )
84
85 # Docker doesn't really have relevant disk environment, but pre-commit
86 # still needs to cleanup its state files on failure
87 with clean_path_on_failure(directory):
88 build_docker_image(prefix, pull=True)
89 os.mkdir(directory)
90
91
92 def get_docker_user() -> Tuple[str, ...]: # pragma: win32 no cover
93 try:
94 return ('-u', f'{os.getuid()}:{os.getgid()}')
95 except AttributeError:
96 return ()
97
98
99 def docker_cmd() -> Tuple[str, ...]: # pragma: win32 no cover
100 return (
101 'docker', 'run',
102 '--rm',
103 *get_docker_user(),
104 # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from
105 # The `Z` option tells Docker to label the content with a private
106 # unshared label. Only the current container can use a private volume.
107 '-v', f'{_get_docker_path(os.getcwd())}:/src:rw,Z',
108 '--workdir', '/src',
109 )
110
111
112 def run_hook(
113 hook: Hook,
114 file_args: Sequence[str],
115 color: bool,
116 ) -> Tuple[int, bytes]: # pragma: win32 no cover
117 # Rebuild the docker image in case it has gone missing, as many people do
118 # automated cleanup of docker images.
119 build_docker_image(hook.prefix, pull=False)
120
121 entry_exe, *cmd_rest = hook.cmd
122
123 entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))
124 cmd = (*docker_cmd(), *entry_tag, *cmd_rest)
125 return helpers.run_xargs(hook, cmd, file_args, color=color)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py
--- a/pre_commit/languages/docker.py
+++ b/pre_commit/languages/docker.py
@@ -1,7 +1,6 @@
import hashlib
import json
import os
-import socket
from typing import Sequence
from typing import Tuple
@@ -26,12 +25,24 @@
return False
+def _get_container_id() -> str:
+ # It's assumed that we already check /proc/1/cgroup in _is_in_docker. The
+ # cpuset cgroup controller existed since cgroups were introduced so this
+ # way of getting the container ID is pretty reliable.
+ with open('/proc/1/cgroup', 'rb') as f:
+ for line in f.readlines():
+ if line.split(b':')[1] == b'cpuset':
+ return os.path.basename(line.split(b':')[2]).strip().decode()
+ raise RuntimeError('Failed to find the container ID in /proc/1/cgroup.')
+
+
def _get_docker_path(path: str) -> str:
if not _is_in_docker():
return path
- hostname = socket.gethostname()
- _, out, _ = cmd_output_b('docker', 'inspect', hostname)
+ container_id = _get_container_id()
+
+ _, out, _ = cmd_output_b('docker', 'inspect', container_id)
container, = json.loads(out)
for mount in container['Mounts']:
|
{"golden_diff": "diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py\n--- a/pre_commit/languages/docker.py\n+++ b/pre_commit/languages/docker.py\n@@ -1,7 +1,6 @@\n import hashlib\n import json\n import os\n-import socket\n from typing import Sequence\n from typing import Tuple\n \n@@ -26,12 +25,24 @@\n return False\n \n \n+def _get_container_id() -> str:\n+ # It's assumed that we already check /proc/1/cgroup in _is_in_docker. The\n+ # cpuset cgroup controller existed since cgroups were introduced so this\n+ # way of getting the container ID is pretty reliable.\n+ with open('/proc/1/cgroup', 'rb') as f:\n+ for line in f.readlines():\n+ if line.split(b':')[1] == b'cpuset':\n+ return os.path.basename(line.split(b':')[2]).strip().decode()\n+ raise RuntimeError('Failed to find the container ID in /proc/1/cgroup.')\n+\n+\n def _get_docker_path(path: str) -> str:\n if not _is_in_docker():\n return path\n- hostname = socket.gethostname()\n \n- _, out, _ = cmd_output_b('docker', 'inspect', hostname)\n+ container_id = _get_container_id()\n+\n+ _, out, _ = cmd_output_b('docker', 'inspect', container_id)\n \n container, = json.loads(out)\n for mount in container['Mounts']:\n", "issue": "Failure to get the container id\nWith #1888 in place, there's a regression when running inside a container. There's an assumption in https://github.com/pre-commit/pre-commit/blob/master/pre_commit/languages/docker.py#L32 that the hostname is the container ID which is not always the case (it's easy enough to set a different hostname with `docker run --hostname foo`). It causes the `docker inspect` command that follows to fail. A more reliable way to get the container id is from `/proc/1/cpuset` or from the first line in `/proc/1/cgroup` which is already checked in `_is_in_docker`.\r\n\r\nThanks @asottile and @okainov for your work on #1387 and pre-commit in general.\n", "before_files": [{"content": "import hashlib\nimport json\nimport os\nimport socket\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'docker'\nPRE_COMMIT_LABEL = 'PRE_COMMIT'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef _is_in_docker() -> bool:\n try:\n with open('/proc/1/cgroup', 'rb') as f:\n return b'docker' in f.read()\n except FileNotFoundError:\n return False\n\n\ndef _get_docker_path(path: str) -> str:\n if not _is_in_docker():\n return path\n hostname = socket.gethostname()\n\n _, out, _ = cmd_output_b('docker', 'inspect', hostname)\n\n container, = json.loads(out)\n for mount in container['Mounts']:\n src_path = mount['Source']\n to_path = mount['Destination']\n if os.path.commonpath((path, to_path)) == to_path:\n # So there is something in common,\n # and we can proceed remapping it\n return path.replace(to_path, src_path)\n # we're in Docker, but the path is not mounted, cannot really do anything,\n # so fall back to original path\n return path\n\n\ndef md5(s: str) -> str: # pragma: win32 no cover\n return hashlib.md5(s.encode()).hexdigest()\n\n\ndef docker_tag(prefix: Prefix) -> str: # pragma: win32 no cover\n md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()\n return f'pre-commit-{md5sum}'\n\n\ndef build_docker_image(\n prefix: Prefix,\n *,\n pull: bool,\n) -> None: # pragma: win32 no cover\n cmd: Tuple[str, ...] = (\n 'docker', 'build',\n '--tag', docker_tag(prefix),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n # This must come last for old versions of docker. See #477\n cmd += ('.',)\n helpers.run_setup_cmd(prefix, cmd)\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None: # pragma: win32 no cover\n helpers.assert_version_default('docker', version)\n helpers.assert_no_additional_deps('docker', additional_dependencies)\n\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # Docker doesn't really have relevant disk environment, but pre-commit\n # still needs to cleanup its state files on failure\n with clean_path_on_failure(directory):\n build_docker_image(prefix, pull=True)\n os.mkdir(directory)\n\n\ndef get_docker_user() -> Tuple[str, ...]: # pragma: win32 no cover\n try:\n return ('-u', f'{os.getuid()}:{os.getgid()}')\n except AttributeError:\n return ()\n\n\ndef docker_cmd() -> Tuple[str, ...]: # pragma: win32 no cover\n return (\n 'docker', 'run',\n '--rm',\n *get_docker_user(),\n # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from\n # The `Z` option tells Docker to label the content with a private\n # unshared label. Only the current container can use a private volume.\n '-v', f'{_get_docker_path(os.getcwd())}:/src:rw,Z',\n '--workdir', '/src',\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]: # pragma: win32 no cover\n # Rebuild the docker image in case it has gone missing, as many people do\n # automated cleanup of docker images.\n build_docker_image(hook.prefix, pull=False)\n\n entry_exe, *cmd_rest = hook.cmd\n\n entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))\n cmd = (*docker_cmd(), *entry_tag, *cmd_rest)\n return helpers.run_xargs(hook, cmd, file_args, color=color)\n", "path": "pre_commit/languages/docker.py"}], "after_files": [{"content": "import hashlib\nimport json\nimport os\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'docker'\nPRE_COMMIT_LABEL = 'PRE_COMMIT'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef _is_in_docker() -> bool:\n try:\n with open('/proc/1/cgroup', 'rb') as f:\n return b'docker' in f.read()\n except FileNotFoundError:\n return False\n\n\ndef _get_container_id() -> str:\n # It's assumed that we already check /proc/1/cgroup in _is_in_docker. The\n # cpuset cgroup controller existed since cgroups were introduced so this\n # way of getting the container ID is pretty reliable.\n with open('/proc/1/cgroup', 'rb') as f:\n for line in f.readlines():\n if line.split(b':')[1] == b'cpuset':\n return os.path.basename(line.split(b':')[2]).strip().decode()\n raise RuntimeError('Failed to find the container ID in /proc/1/cgroup.')\n\n\ndef _get_docker_path(path: str) -> str:\n if not _is_in_docker():\n return path\n\n container_id = _get_container_id()\n\n _, out, _ = cmd_output_b('docker', 'inspect', container_id)\n\n container, = json.loads(out)\n for mount in container['Mounts']:\n src_path = mount['Source']\n to_path = mount['Destination']\n if os.path.commonpath((path, to_path)) == to_path:\n # So there is something in common,\n # and we can proceed remapping it\n return path.replace(to_path, src_path)\n # we're in Docker, but the path is not mounted, cannot really do anything,\n # so fall back to original path\n return path\n\n\ndef md5(s: str) -> str: # pragma: win32 no cover\n return hashlib.md5(s.encode()).hexdigest()\n\n\ndef docker_tag(prefix: Prefix) -> str: # pragma: win32 no cover\n md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()\n return f'pre-commit-{md5sum}'\n\n\ndef build_docker_image(\n prefix: Prefix,\n *,\n pull: bool,\n) -> None: # pragma: win32 no cover\n cmd: Tuple[str, ...] = (\n 'docker', 'build',\n '--tag', docker_tag(prefix),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n # This must come last for old versions of docker. See #477\n cmd += ('.',)\n helpers.run_setup_cmd(prefix, cmd)\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None: # pragma: win32 no cover\n helpers.assert_version_default('docker', version)\n helpers.assert_no_additional_deps('docker', additional_dependencies)\n\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n # Docker doesn't really have relevant disk environment, but pre-commit\n # still needs to cleanup its state files on failure\n with clean_path_on_failure(directory):\n build_docker_image(prefix, pull=True)\n os.mkdir(directory)\n\n\ndef get_docker_user() -> Tuple[str, ...]: # pragma: win32 no cover\n try:\n return ('-u', f'{os.getuid()}:{os.getgid()}')\n except AttributeError:\n return ()\n\n\ndef docker_cmd() -> Tuple[str, ...]: # pragma: win32 no cover\n return (\n 'docker', 'run',\n '--rm',\n *get_docker_user(),\n # https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from\n # The `Z` option tells Docker to label the content with a private\n # unshared label. Only the current container can use a private volume.\n '-v', f'{_get_docker_path(os.getcwd())}:/src:rw,Z',\n '--workdir', '/src',\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]: # pragma: win32 no cover\n # Rebuild the docker image in case it has gone missing, as many people do\n # automated cleanup of docker images.\n build_docker_image(hook.prefix, pull=False)\n\n entry_exe, *cmd_rest = hook.cmd\n\n entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))\n cmd = (*docker_cmd(), *entry_tag, *cmd_rest)\n return helpers.run_xargs(hook, cmd, file_args, color=color)\n", "path": "pre_commit/languages/docker.py"}]}
| 1,658 | 332 |
gh_patches_debug_6310
|
rasdani/github-patches
|
git_diff
|
kornia__kornia-1421
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyPI tarball missing required files
### Describe the bug
The tarball uploaded to PyPI does not contain `requirements/*` files which are required to run the `setup.py` file.
### Reproduction steps
```bash
$ wget https://files.pythonhosted.org/packages/source/k/kornia/kornia-0.6.0.tar.gz
$ tar zxf kornia-0.6.0.tar.gz
$ cd kornia-0.6.0
$ python setup.py install
...
Traceback (most recent call last):
File "setup.py", line 43, in <module>
"x": load_requirements("requirements/x.txt"),
File "setup.py", line 38, in load_requirements
with open(filename) as f:
FileNotFoundError: [Errno 2] No such file or directory: 'requirements/x.txt'
```
### Expected behavior
I would expect the `setup.py` to function correctly. I believe there's a setuptools option to control which files get included in the upload tarball.
### Environment
```shell
- PyTorch Version (e.g., 1.0): 1.10
- OS (e.g., Linux): macOS
- How you installed PyTorch (`conda`, `pip`, source): `spack`
- Build command you used (if compiling from source): `python setup.py install`
- Python version: 3.8.11
- CUDA/cuDNN version: N/A
- GPU models and configuration: N/A
- Any other relevant information: N/A
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Welcome to the Kornia setup.py.
2 #
3 import re
4 import sys
5
6 # Make sure that kornia is running on Python 3.6.0 or later
7 # (to avoid running into this bug: https://bugs.python.org/issue29246)
8
9 if sys.version_info < (3, 6, 0):
10 raise RuntimeError("Kornia requires Python 3.6.0 or later.")
11
12
13 from setuptools import find_packages, setup
14
15
16 def find_version(file_path: str) -> str:
17 version_file = open(file_path).read()
18 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
19 if not version_match:
20 raise RuntimeError(f"Unable to find version string in {file_path}")
21 return version_match.group(1)
22
23
24 VERSION = find_version("kornia/_version.py")
25
26
27 # NOTE: kornia MUST only require PyTorch
28 requirements = [
29 'torch>=1.8.1', 'packaging',
30 ]
31
32 # open readme file and set long description
33 with open("README.md", encoding="utf-8") as fh:
34 long_description = fh.read()
35
36
37 def load_requirements(filename: str):
38 with open(filename) as f:
39 return [x.strip() for x in f.readlines() if "-r" != x[0:2]]
40
41
42 requirements_extras = {
43 "x": load_requirements("requirements/x.txt"),
44 "dev": load_requirements("requirements/dev.txt")
45 }
46 requirements_extras["all"] = requirements_extras["x"] + requirements_extras["dev"]
47
48
49 if __name__ == '__main__':
50 setup(
51 name='kornia',
52 version=VERSION,
53 author='Edgar Riba',
54 author_email='[email protected]',
55 url='https://www.kornia.org',
56 download_url='https://github.com/kornia/kornia',
57 license='Apache License 2.0',
58 description='Open Source Differentiable Computer Vision Library for PyTorch',
59 long_description=long_description,
60 long_description_content_type='text/markdown',
61 python_requires='>=3.6',
62 setup_requires=['pytest-runner'],
63 tests_require=['pytest'],
64 packages=find_packages(exclude=('docs', 'test', 'examples')),
65 package_data={"kornia": ["py.typed"]},
66 zip_safe=True,
67 install_requires=requirements,
68 extras_require=requirements_extras,
69 keywords=['computer vision', 'deep learning', 'pytorch'],
70 project_urls={
71 "Bug Tracker": "https://github.com/kornia/kornia/issues",
72 "Documentation": "https://kornia.readthedocs.io/en/latest",
73 "Source Code": "https://github.com/kornia/kornia",
74 },
75 classifiers=[
76 'Environment :: GPU',
77 'Environment :: Console',
78 'Natural Language :: English',
79 # How mature is this project? Common values are
80 # 3 - Alpha, 4 - Beta, 5 - Production/Stable
81 'Development Status :: 4 - Beta',
82 # Indicate who your project is intended for
83 'Intended Audience :: Developers',
84 'Intended Audience :: Education',
85 'Intended Audience :: Science/Research',
86 'Intended Audience :: Information Technology',
87 'Topic :: Software Development :: Libraries',
88 'Topic :: Scientific/Engineering :: Artificial Intelligence',
89 'Topic :: Scientific/Engineering :: Image Processing',
90 # Pick your license as you wish
91 'License :: OSI Approved :: Apache Software License',
92 'Operating System :: OS Independent',
93 # Specify the Python versions you support here. In particular, ensure
94 # that you indicate whether you support Python 2, Python 3 or both.
95 'Programming Language :: Python :: 3',
96 'Programming Language :: Python :: 3.6',
97 'Programming Language :: Python :: 3.7',
98 'Programming Language :: Python :: 3.8',
99 ],
100 )
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,6 +63,7 @@
tests_require=['pytest'],
packages=find_packages(exclude=('docs', 'test', 'examples')),
package_data={"kornia": ["py.typed"]},
+ data_files=[('', ['requirements/x.txt', 'requirements/dev.txt'])],
zip_safe=True,
install_requires=requirements,
extras_require=requirements_extras,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,6 +63,7 @@\n tests_require=['pytest'],\n packages=find_packages(exclude=('docs', 'test', 'examples')),\n package_data={\"kornia\": [\"py.typed\"]},\n+ data_files=[('', ['requirements/x.txt', 'requirements/dev.txt'])],\n zip_safe=True,\n install_requires=requirements,\n extras_require=requirements_extras,\n", "issue": "PyPI tarball missing required files\n### Describe the bug\r\n\r\nThe tarball uploaded to PyPI does not contain `requirements/*` files which are required to run the `setup.py` file.\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\n$ wget https://files.pythonhosted.org/packages/source/k/kornia/kornia-0.6.0.tar.gz\r\n$ tar zxf kornia-0.6.0.tar.gz\r\n$ cd kornia-0.6.0\r\n$ python setup.py install\r\n...\r\nTraceback (most recent call last):\r\n File \"setup.py\", line 43, in <module>\r\n \"x\": load_requirements(\"requirements/x.txt\"),\r\n File \"setup.py\", line 38, in load_requirements\r\n with open(filename) as f:\r\nFileNotFoundError: [Errno 2] No such file or directory: 'requirements/x.txt'\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nI would expect the `setup.py` to function correctly. I believe there's a setuptools option to control which files get included in the upload tarball.\r\n\r\n### Environment\r\n\r\n```shell\r\n- PyTorch Version (e.g., 1.0): 1.10\r\n- OS (e.g., Linux): macOS\r\n- How you installed PyTorch (`conda`, `pip`, source): `spack`\r\n- Build command you used (if compiling from source): `python setup.py install`\r\n- Python version: 3.8.11\r\n- CUDA/cuDNN version: N/A\r\n- GPU models and configuration: N/A\r\n- Any other relevant information: N/A\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "# Welcome to the Kornia setup.py.\n#\nimport re\nimport sys\n\n# Make sure that kornia is running on Python 3.6.0 or later\n# (to avoid running into this bug: https://bugs.python.org/issue29246)\n\nif sys.version_info < (3, 6, 0):\n raise RuntimeError(\"Kornia requires Python 3.6.0 or later.\")\n\n\nfrom setuptools import find_packages, setup\n\n\ndef find_version(file_path: str) -> str:\n version_file = open(file_path).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if not version_match:\n raise RuntimeError(f\"Unable to find version string in {file_path}\")\n return version_match.group(1)\n\n\nVERSION = find_version(\"kornia/_version.py\")\n\n\n# NOTE: kornia MUST only require PyTorch\nrequirements = [\n 'torch>=1.8.1', 'packaging',\n]\n\n# open readme file and set long description\nwith open(\"README.md\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\n\ndef load_requirements(filename: str):\n with open(filename) as f:\n return [x.strip() for x in f.readlines() if \"-r\" != x[0:2]]\n\n\nrequirements_extras = {\n \"x\": load_requirements(\"requirements/x.txt\"),\n \"dev\": load_requirements(\"requirements/dev.txt\")\n}\nrequirements_extras[\"all\"] = requirements_extras[\"x\"] + requirements_extras[\"dev\"]\n\n\nif __name__ == '__main__':\n setup(\n name='kornia',\n version=VERSION,\n author='Edgar Riba',\n author_email='[email protected]',\n url='https://www.kornia.org',\n download_url='https://github.com/kornia/kornia',\n license='Apache License 2.0',\n description='Open Source Differentiable Computer Vision Library for PyTorch',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires='>=3.6',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n packages=find_packages(exclude=('docs', 'test', 'examples')),\n package_data={\"kornia\": [\"py.typed\"]},\n zip_safe=True,\n install_requires=requirements,\n extras_require=requirements_extras,\n keywords=['computer vision', 'deep learning', 'pytorch'],\n project_urls={\n \"Bug Tracker\": \"https://github.com/kornia/kornia/issues\",\n \"Documentation\": \"https://kornia.readthedocs.io/en/latest\",\n \"Source Code\": \"https://github.com/kornia/kornia\",\n },\n classifiers=[\n 'Environment :: GPU',\n 'Environment :: Console',\n 'Natural Language :: English',\n # How mature is this project? Common values are\n # 3 - Alpha, 4 - Beta, 5 - Production/Stable\n 'Development Status :: 4 - Beta',\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Information Technology',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Image Processing',\n # Pick your license as you wish\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n )\n", "path": "setup.py"}], "after_files": [{"content": "# Welcome to the Kornia setup.py.\n#\nimport re\nimport sys\n\n# Make sure that kornia is running on Python 3.6.0 or later\n# (to avoid running into this bug: https://bugs.python.org/issue29246)\n\nif sys.version_info < (3, 6, 0):\n raise RuntimeError(\"Kornia requires Python 3.6.0 or later.\")\n\n\nfrom setuptools import find_packages, setup\n\n\ndef find_version(file_path: str) -> str:\n version_file = open(file_path).read()\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if not version_match:\n raise RuntimeError(f\"Unable to find version string in {file_path}\")\n return version_match.group(1)\n\n\nVERSION = find_version(\"kornia/_version.py\")\n\n\n# NOTE: kornia MUST only require PyTorch\nrequirements = [\n 'torch>=1.8.1', 'packaging',\n]\n\n# open readme file and set long description\nwith open(\"README.md\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\n\ndef load_requirements(filename: str):\n with open(filename) as f:\n return [x.strip() for x in f.readlines() if \"-r\" != x[0:2]]\n\n\nrequirements_extras = {\n \"x\": load_requirements(\"requirements/x.txt\"),\n \"dev\": load_requirements(\"requirements/dev.txt\")\n}\nrequirements_extras[\"all\"] = requirements_extras[\"x\"] + requirements_extras[\"dev\"]\n\n\nif __name__ == '__main__':\n setup(\n name='kornia',\n version=VERSION,\n author='Edgar Riba',\n author_email='[email protected]',\n url='https://www.kornia.org',\n download_url='https://github.com/kornia/kornia',\n license='Apache License 2.0',\n description='Open Source Differentiable Computer Vision Library for PyTorch',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires='>=3.6',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n packages=find_packages(exclude=('docs', 'test', 'examples')),\n package_data={\"kornia\": [\"py.typed\"]},\n data_files=[('', ['requirements/x.txt', 'requirements/dev.txt'])],\n zip_safe=True,\n install_requires=requirements,\n extras_require=requirements_extras,\n keywords=['computer vision', 'deep learning', 'pytorch'],\n project_urls={\n \"Bug Tracker\": \"https://github.com/kornia/kornia/issues\",\n \"Documentation\": \"https://kornia.readthedocs.io/en/latest\",\n \"Source Code\": \"https://github.com/kornia/kornia\",\n },\n classifiers=[\n 'Environment :: GPU',\n 'Environment :: Console',\n 'Natural Language :: English',\n # How mature is this project? Common values are\n # 3 - Alpha, 4 - Beta, 5 - Production/Stable\n 'Development Status :: 4 - Beta',\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Information Technology',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Image Processing',\n # Pick your license as you wish\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n )\n", "path": "setup.py"}]}
| 1,654 | 103 |
gh_patches_debug_16698
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-2462
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Outdoor - Orientations
- [x] Ajouter les noms des champs dans les filtres
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mapentity/filters.py`
Content:
```
1 from django.db.models.fields.related import ManyToOneRel
2 from django.conf import settings
3
4 from django_filters import FilterSet, Filter
5 from django_filters.filterset import get_model_field
6 from django.contrib.gis import forms
7
8 from .settings import app_settings, API_SRID
9 from .widgets import HiddenGeometryWidget
10
11
12 class PolygonFilter(Filter):
13
14 field_class = forms.PolygonField
15
16 def __init__(self, *args, **kwargs):
17 kwargs.setdefault('field_name', app_settings['GEOM_FIELD_NAME'])
18 kwargs.setdefault('widget', HiddenGeometryWidget)
19 kwargs.setdefault('lookup_expr', 'intersects')
20 super(PolygonFilter, self).__init__(*args, **kwargs)
21
22
23 class PythonPolygonFilter(PolygonFilter):
24
25 def filter(self, qs, value):
26 if not value:
27 return qs
28 if not value.srid:
29 value.srid = API_SRID
30 value.transform(settings.SRID)
31 filtered = []
32 for o in qs.all():
33 geom = getattr(o, self.field_name)
34 if geom and geom.valid and not geom.empty:
35 if getattr(geom, self.lookup_expr)(value):
36 filtered.append(o.pk)
37 else:
38 filtered.append(o.pk)
39 return qs.filter(pk__in=filtered)
40
41
42 class BaseMapEntityFilterSet(FilterSet):
43 def __init__(self, *args, **kwargs):
44 super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)
45 self.__bypass_labels()
46
47 def __bypass_labels(self):
48 """
49 These hacks allow to bypass field labels. Using either placeholders,
50 empty choices label, etc. This allows to greatly save space in form layout,
51 which is required for concise filter forms.
52 """
53 for fieldname in self.base_filters.keys():
54 field = self.form.fields[fieldname]
55 if isinstance(field, forms.MultiValueField):
56 for i, widget in enumerate(field.widget.widgets):
57 self.__set_placeholder(field.fields[i], widget)
58 elif isinstance(field, forms.ChoiceField):
59 field.empty_label = field.label
60 self.__set_placeholder(field, field.widget)
61 elif isinstance(field, forms.NullBooleanField):
62 choices = [(u'1', field.label)] + field.widget.choices[1:]
63 field.widget.choices = choices
64 self.__set_placeholder(field, field.widget)
65 else:
66 self.__set_placeholder(field, field.widget)
67
68 def __set_placeholder(self, field, widget):
69 field.help_text = '' # Hide help text
70 widget.attrs['placeholder'] = field.label
71 widget.attrs['data-placeholder'] = field.label
72 widget.attrs['title'] = field.label
73 widget.attrs['data-label'] = field.label
74
75 @classmethod
76 def add_filter(cls, name, filter_=None):
77 field = get_model_field(cls._meta.model, name)
78 if filter_ is None:
79 if isinstance(field, ManyToOneRel):
80 filter_ = cls.filter_for_reverse_field(field, name)
81 else:
82 filter_ = cls.filter_for_field(field, name)
83 cls.base_filters[name] = filter_
84
85 @classmethod
86 def add_filters(cls, filters):
87 for name, filter_ in filters.items():
88 filter_.field_name = name
89 cls.add_filter(name, filter_)
90
91
92 class MapEntityFilterSet(BaseMapEntityFilterSet):
93 bbox = PolygonFilter()
94
95 class Meta:
96 fields = ['bbox']
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mapentity/filters.py b/mapentity/filters.py
--- a/mapentity/filters.py
+++ b/mapentity/filters.py
@@ -2,6 +2,7 @@
from django.conf import settings
from django_filters import FilterSet, Filter
+from django_filters.fields import ChoiceField
from django_filters.filterset import get_model_field
from django.contrib.gis import forms
@@ -42,6 +43,9 @@
class BaseMapEntityFilterSet(FilterSet):
def __init__(self, *args, **kwargs):
super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)
+ for filter_ in self.filters.values():
+ if filter_.field_class == ChoiceField:
+ filter_.extra.setdefault('empty_label', filter_.label)
self.__bypass_labels()
def __bypass_labels(self):
|
{"golden_diff": "diff --git a/mapentity/filters.py b/mapentity/filters.py\n--- a/mapentity/filters.py\n+++ b/mapentity/filters.py\n@@ -2,6 +2,7 @@\n from django.conf import settings\n \n from django_filters import FilterSet, Filter\n+from django_filters.fields import ChoiceField\n from django_filters.filterset import get_model_field\n from django.contrib.gis import forms\n \n@@ -42,6 +43,9 @@\n class BaseMapEntityFilterSet(FilterSet):\n def __init__(self, *args, **kwargs):\n super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)\n+ for filter_ in self.filters.values():\n+ if filter_.field_class == ChoiceField:\n+ filter_.extra.setdefault('empty_label', filter_.label)\n self.__bypass_labels()\n \n def __bypass_labels(self):\n", "issue": "Outdoor - Orientations\n- [x] Ajouter les noms des champs dans les filtres\n", "before_files": [{"content": "from django.db.models.fields.related import ManyToOneRel\nfrom django.conf import settings\n\nfrom django_filters import FilterSet, Filter\nfrom django_filters.filterset import get_model_field\nfrom django.contrib.gis import forms\n\nfrom .settings import app_settings, API_SRID\nfrom .widgets import HiddenGeometryWidget\n\n\nclass PolygonFilter(Filter):\n\n field_class = forms.PolygonField\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('field_name', app_settings['GEOM_FIELD_NAME'])\n kwargs.setdefault('widget', HiddenGeometryWidget)\n kwargs.setdefault('lookup_expr', 'intersects')\n super(PolygonFilter, self).__init__(*args, **kwargs)\n\n\nclass PythonPolygonFilter(PolygonFilter):\n\n def filter(self, qs, value):\n if not value:\n return qs\n if not value.srid:\n value.srid = API_SRID\n value.transform(settings.SRID)\n filtered = []\n for o in qs.all():\n geom = getattr(o, self.field_name)\n if geom and geom.valid and not geom.empty:\n if getattr(geom, self.lookup_expr)(value):\n filtered.append(o.pk)\n else:\n filtered.append(o.pk)\n return qs.filter(pk__in=filtered)\n\n\nclass BaseMapEntityFilterSet(FilterSet):\n def __init__(self, *args, **kwargs):\n super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)\n self.__bypass_labels()\n\n def __bypass_labels(self):\n \"\"\"\n These hacks allow to bypass field labels. Using either placeholders,\n empty choices label, etc. This allows to greatly save space in form layout,\n which is required for concise filter forms.\n \"\"\"\n for fieldname in self.base_filters.keys():\n field = self.form.fields[fieldname]\n if isinstance(field, forms.MultiValueField):\n for i, widget in enumerate(field.widget.widgets):\n self.__set_placeholder(field.fields[i], widget)\n elif isinstance(field, forms.ChoiceField):\n field.empty_label = field.label\n self.__set_placeholder(field, field.widget)\n elif isinstance(field, forms.NullBooleanField):\n choices = [(u'1', field.label)] + field.widget.choices[1:]\n field.widget.choices = choices\n self.__set_placeholder(field, field.widget)\n else:\n self.__set_placeholder(field, field.widget)\n\n def __set_placeholder(self, field, widget):\n field.help_text = '' # Hide help text\n widget.attrs['placeholder'] = field.label\n widget.attrs['data-placeholder'] = field.label\n widget.attrs['title'] = field.label\n widget.attrs['data-label'] = field.label\n\n @classmethod\n def add_filter(cls, name, filter_=None):\n field = get_model_field(cls._meta.model, name)\n if filter_ is None:\n if isinstance(field, ManyToOneRel):\n filter_ = cls.filter_for_reverse_field(field, name)\n else:\n filter_ = cls.filter_for_field(field, name)\n cls.base_filters[name] = filter_\n\n @classmethod\n def add_filters(cls, filters):\n for name, filter_ in filters.items():\n filter_.field_name = name\n cls.add_filter(name, filter_)\n\n\nclass MapEntityFilterSet(BaseMapEntityFilterSet):\n bbox = PolygonFilter()\n\n class Meta:\n fields = ['bbox']\n", "path": "mapentity/filters.py"}], "after_files": [{"content": "from django.db.models.fields.related import ManyToOneRel\nfrom django.conf import settings\n\nfrom django_filters import FilterSet, Filter\nfrom django_filters.fields import ChoiceField\nfrom django_filters.filterset import get_model_field\nfrom django.contrib.gis import forms\n\nfrom .settings import app_settings, API_SRID\nfrom .widgets import HiddenGeometryWidget\n\n\nclass PolygonFilter(Filter):\n\n field_class = forms.PolygonField\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('field_name', app_settings['GEOM_FIELD_NAME'])\n kwargs.setdefault('widget', HiddenGeometryWidget)\n kwargs.setdefault('lookup_expr', 'intersects')\n super(PolygonFilter, self).__init__(*args, **kwargs)\n\n\nclass PythonPolygonFilter(PolygonFilter):\n\n def filter(self, qs, value):\n if not value:\n return qs\n if not value.srid:\n value.srid = API_SRID\n value.transform(settings.SRID)\n filtered = []\n for o in qs.all():\n geom = getattr(o, self.field_name)\n if geom and geom.valid and not geom.empty:\n if getattr(geom, self.lookup_expr)(value):\n filtered.append(o.pk)\n else:\n filtered.append(o.pk)\n return qs.filter(pk__in=filtered)\n\n\nclass BaseMapEntityFilterSet(FilterSet):\n def __init__(self, *args, **kwargs):\n super(BaseMapEntityFilterSet, self).__init__(*args, **kwargs)\n for filter_ in self.filters.values():\n if filter_.field_class == ChoiceField:\n filter_.extra.setdefault('empty_label', filter_.label)\n self.__bypass_labels()\n\n def __bypass_labels(self):\n \"\"\"\n These hacks allow to bypass field labels. Using either placeholders,\n empty choices label, etc. This allows to greatly save space in form layout,\n which is required for concise filter forms.\n \"\"\"\n for fieldname in self.base_filters.keys():\n field = self.form.fields[fieldname]\n if isinstance(field, forms.MultiValueField):\n for i, widget in enumerate(field.widget.widgets):\n self.__set_placeholder(field.fields[i], widget)\n elif isinstance(field, forms.ChoiceField):\n field.empty_label = field.label\n self.__set_placeholder(field, field.widget)\n elif isinstance(field, forms.NullBooleanField):\n choices = [(u'1', field.label)] + field.widget.choices[1:]\n field.widget.choices = choices\n self.__set_placeholder(field, field.widget)\n else:\n self.__set_placeholder(field, field.widget)\n\n def __set_placeholder(self, field, widget):\n field.help_text = '' # Hide help text\n widget.attrs['placeholder'] = field.label\n widget.attrs['data-placeholder'] = field.label\n widget.attrs['title'] = field.label\n widget.attrs['data-label'] = field.label\n\n @classmethod\n def add_filter(cls, name, filter_=None):\n field = get_model_field(cls._meta.model, name)\n if filter_ is None:\n if isinstance(field, ManyToOneRel):\n filter_ = cls.filter_for_reverse_field(field, name)\n else:\n filter_ = cls.filter_for_field(field, name)\n cls.base_filters[name] = filter_\n\n @classmethod\n def add_filters(cls, filters):\n for name, filter_ in filters.items():\n filter_.field_name = name\n cls.add_filter(name, filter_)\n\n\nclass MapEntityFilterSet(BaseMapEntityFilterSet):\n bbox = PolygonFilter()\n\n class Meta:\n fields = ['bbox']\n", "path": "mapentity/filters.py"}]}
| 1,188 | 186 |
gh_patches_debug_27824
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-976
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve Frequency
## 🚀 Feature
If we would like to log datapoints/second every 100 iterations, we most probably do like this
```python
wps_metric = Frequency(output_transformer=lambda x: x['ntokens'])
wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=100))
```
however, seems like this wont take into account all other iterations while computing the total number of tokens.
```python
class Frequency(Metric):
....
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(event_name, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
```
IMO, should be
```python
class Frequency(Metric):
....
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
```
cc @erip
Improve Frequency
## 🚀 Feature
If we would like to log datapoints/second every 100 iterations, we most probably do like this
```python
wps_metric = Frequency(output_transformer=lambda x: x['ntokens'])
wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=100))
```
however, seems like this wont take into account all other iterations while computing the total number of tokens.
```python
class Frequency(Metric):
....
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(event_name, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
```
IMO, should be
```python
class Frequency(Metric):
....
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
```
cc @erip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/frequency.py`
Content:
```
1 import torch
2 import torch.distributed as dist
3
4 from ignite.engine import Events
5 from ignite.metrics import Metric
6 from ignite.handlers.timing import Timer
7 from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced
8
9
10 class Frequency(Metric):
11 """Provides metrics for the number of examples processed per second.
12
13 Examples:
14
15 .. code-block:: python
16
17 # Compute number of tokens processed
18 wps_metric = Frequency(output_transform=lambda x: x['ntokens'])
19 wps_metric.attach(trainer, name='wps')
20 # Logging with TQDM
21 ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
22 # Progress bar will looks like
23 # Epoch [2/10]: [12/24] 50%|█████ , wps=400 [00:17<1:23]
24 """
25
26 def __init__(self, output_transform=lambda x: x, device=None):
27 self._timer = None
28 self._acc = None
29 self._n = None
30 self._elapsed = None
31 super(Frequency, self).__init__(output_transform=output_transform, device=device)
32
33 @reinit__is_reduced
34 def reset(self):
35 self._timer = Timer()
36 self._acc = 0
37 self._n = 0
38 self._elapsed = 0.0
39 super(Frequency, self).reset()
40
41 @reinit__is_reduced
42 def update(self, output):
43 self._acc += output
44 self._n = self._acc
45 self._elapsed = torch.tensor(self._timer.value(), device=self._device)
46
47 @sync_all_reduce("_n", "_elapsed")
48 def compute(self):
49 time_divisor = 1.0
50
51 if dist.is_available() and dist.is_initialized():
52 time_divisor *= dist.get_world_size()
53
54 # Returns the average processed objects per second across all workers
55 return self._n / self._elapsed.item() * time_divisor
56
57 def completed(self, engine, name):
58 engine.state.metrics[name] = int(self.compute())
59
60 def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
61 engine.add_event_handler(Events.EPOCH_STARTED, self.started)
62 engine.add_event_handler(event_name, self.iteration_completed)
63 engine.add_event_handler(event_name, self.completed, name)
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ignite/metrics/frequency.py b/ignite/metrics/frequency.py
--- a/ignite/metrics/frequency.py
+++ b/ignite/metrics/frequency.py
@@ -19,8 +19,21 @@
wps_metric.attach(trainer, name='wps')
# Logging with TQDM
ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
- # Progress bar will looks like
+ # Progress bar will look like
# Epoch [2/10]: [12/24] 50%|█████ , wps=400 [00:17<1:23]
+
+
+ To compute examples processed per second every 50th iteration:
+
+ .. code-block:: python
+
+ # Compute number of tokens processed
+ wps_metric = Frequency(output_transform=lambda x: x['ntokens'])
+ wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=50))
+ # Logging with TQDM
+ ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
+ # Progress bar will look like
+ # Epoch [2/10]: [50/100] 50%|█████ , wps=400 [00:17<00:35]
"""
def __init__(self, output_transform=lambda x: x, device=None):
@@ -59,5 +72,5 @@
def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
- engine.add_event_handler(event_name, self.iteration_completed)
+ engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
|
{"golden_diff": "diff --git a/ignite/metrics/frequency.py b/ignite/metrics/frequency.py\n--- a/ignite/metrics/frequency.py\n+++ b/ignite/metrics/frequency.py\n@@ -19,8 +19,21 @@\n wps_metric.attach(trainer, name='wps')\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n- # Progress bar will looks like\n+ # Progress bar will look like\n # Epoch [2/10]: [12/24] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<1:23]\n+\n+\n+ To compute examples processed per second every 50th iteration:\n+\n+ .. code-block:: python\n+\n+ # Compute number of tokens processed\n+ wps_metric = Frequency(output_transform=lambda x: x['ntokens'])\n+ wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=50))\n+ # Logging with TQDM\n+ ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n+ # Progress bar will look like\n+ # Epoch [2/10]: [50/100] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<00:35]\n \"\"\"\n \n def __init__(self, output_transform=lambda x: x, device=None):\n@@ -59,5 +72,5 @@\n \n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n- engine.add_event_handler(event_name, self.iteration_completed)\n+ engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\n engine.add_event_handler(event_name, self.completed, name)\n", "issue": "Improve Frequency\n## \ud83d\ude80 Feature\r\n\r\nIf we would like to log datapoints/second every 100 iterations, we most probably do like this \r\n```python\r\nwps_metric = Frequency(output_transformer=lambda x: x['ntokens'])\r\nwps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=100))\r\n```\r\nhowever, seems like this wont take into account all other iterations while computing the total number of tokens.\r\n```python\r\nclass Frequency(Metric):\r\n ....\r\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\r\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\r\n engine.add_event_handler(event_name, self.iteration_completed)\r\n engine.add_event_handler(event_name, self.completed, name)\r\n```\r\nIMO, should be \r\n```python\r\nclass Frequency(Metric):\r\n ....\r\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\r\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\r\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\r\n engine.add_event_handler(event_name, self.completed, name)\r\n```\r\n\r\ncc @erip \r\n\nImprove Frequency\n## \ud83d\ude80 Feature\r\n\r\nIf we would like to log datapoints/second every 100 iterations, we most probably do like this \r\n```python\r\nwps_metric = Frequency(output_transformer=lambda x: x['ntokens'])\r\nwps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=100))\r\n```\r\nhowever, seems like this wont take into account all other iterations while computing the total number of tokens.\r\n```python\r\nclass Frequency(Metric):\r\n ....\r\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\r\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\r\n engine.add_event_handler(event_name, self.iteration_completed)\r\n engine.add_event_handler(event_name, self.completed, name)\r\n```\r\nIMO, should be \r\n```python\r\nclass Frequency(Metric):\r\n ....\r\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\r\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\r\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\r\n engine.add_event_handler(event_name, self.completed, name)\r\n```\r\n\r\ncc @erip \r\n\n", "before_files": [{"content": "import torch\nimport torch.distributed as dist\n\nfrom ignite.engine import Events\nfrom ignite.metrics import Metric\nfrom ignite.handlers.timing import Timer\nfrom ignite.metrics.metric import sync_all_reduce, reinit__is_reduced\n\n\nclass Frequency(Metric):\n \"\"\"Provides metrics for the number of examples processed per second.\n\n Examples:\n\n .. code-block:: python\n\n # Compute number of tokens processed\n wps_metric = Frequency(output_transform=lambda x: x['ntokens'])\n wps_metric.attach(trainer, name='wps')\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n # Progress bar will looks like\n # Epoch [2/10]: [12/24] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<1:23]\n \"\"\"\n\n def __init__(self, output_transform=lambda x: x, device=None):\n self._timer = None\n self._acc = None\n self._n = None\n self._elapsed = None\n super(Frequency, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self):\n self._timer = Timer()\n self._acc = 0\n self._n = 0\n self._elapsed = 0.0\n super(Frequency, self).reset()\n\n @reinit__is_reduced\n def update(self, output):\n self._acc += output\n self._n = self._acc\n self._elapsed = torch.tensor(self._timer.value(), device=self._device)\n\n @sync_all_reduce(\"_n\", \"_elapsed\")\n def compute(self):\n time_divisor = 1.0\n\n if dist.is_available() and dist.is_initialized():\n time_divisor *= dist.get_world_size()\n\n # Returns the average processed objects per second across all workers\n return self._n / self._elapsed.item() * time_divisor\n\n def completed(self, engine, name):\n engine.state.metrics[name] = int(self.compute())\n\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n engine.add_event_handler(event_name, self.iteration_completed)\n engine.add_event_handler(event_name, self.completed, name)\n", "path": "ignite/metrics/frequency.py"}], "after_files": [{"content": "import torch\nimport torch.distributed as dist\n\nfrom ignite.engine import Events\nfrom ignite.metrics import Metric\nfrom ignite.handlers.timing import Timer\nfrom ignite.metrics.metric import sync_all_reduce, reinit__is_reduced\n\n\nclass Frequency(Metric):\n \"\"\"Provides metrics for the number of examples processed per second.\n\n Examples:\n\n .. code-block:: python\n\n # Compute number of tokens processed\n wps_metric = Frequency(output_transform=lambda x: x['ntokens'])\n wps_metric.attach(trainer, name='wps')\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n # Progress bar will look like\n # Epoch [2/10]: [12/24] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<1:23]\n\n\n To compute examples processed per second every 50th iteration:\n\n .. code-block:: python\n\n # Compute number of tokens processed\n wps_metric = Frequency(output_transform=lambda x: x['ntokens'])\n wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=50))\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['wps'])\n # Progress bar will look like\n # Epoch [2/10]: [50/100] 50%|\u2588\u2588\u2588\u2588\u2588 , wps=400 [00:17<00:35]\n \"\"\"\n\n def __init__(self, output_transform=lambda x: x, device=None):\n self._timer = None\n self._acc = None\n self._n = None\n self._elapsed = None\n super(Frequency, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self):\n self._timer = Timer()\n self._acc = 0\n self._n = 0\n self._elapsed = 0.0\n super(Frequency, self).reset()\n\n @reinit__is_reduced\n def update(self, output):\n self._acc += output\n self._n = self._acc\n self._elapsed = torch.tensor(self._timer.value(), device=self._device)\n\n @sync_all_reduce(\"_n\", \"_elapsed\")\n def compute(self):\n time_divisor = 1.0\n\n if dist.is_available() and dist.is_initialized():\n time_divisor *= dist.get_world_size()\n\n # Returns the average processed objects per second across all workers\n return self._n / self._elapsed.item() * time_divisor\n\n def completed(self, engine, name):\n engine.state.metrics[name] = int(self.compute())\n\n def attach(self, engine, name, event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\n engine.add_event_handler(event_name, self.completed, name)\n", "path": "ignite/metrics/frequency.py"}]}
| 1,421 | 425 |
gh_patches_debug_24002
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-831
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_K8S_31 failure with RuntimeDefault configured for workloads
**Describe the bug**
Please see #710
**To Reproduce**
Please see #710
**Expected behavior**
Please see #710
**Additional context**
The bug reported in #710 needs to be fixed for workloads in https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/Seccomp.py#L44:L48 as well.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
....
spec:
...
...
template:
...
...
spec:
....
....
securityContext:
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
```
**Related PRs**
#711
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/kubernetes/checks/Seccomp.py`
Content:
```
1 import dpath
2
3 from checkov.common.models.enums import CheckCategories, CheckResult
4 from checkov.kubernetes.base_spec_check import BaseK8Check
5
6
7 class Seccomp(BaseK8Check):
8
9 def __init__(self):
10 # CIS-1.5 5.7.2
11 name = "Ensure that the seccomp profile is set to docker/default or runtime/default"
12 id = "CKV_K8S_31"
13 # Location: Pod.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod
14 # Location: CronJob.spec.jobTemplate.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod
15 # Location: *.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod
16 # Location: *.spec.securityContext.seccompProfile.type
17 supported_kind = ['Pod', 'Deployment', 'DaemonSet', 'StatefulSet', 'ReplicaSet', 'ReplicationController', 'Job', 'CronJob']
18 categories = [CheckCategories.KUBERNETES]
19 super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)
20
21 def get_resource_id(self, conf):
22 if "namespace" in conf["metadata"]:
23 return "{}.{}.{}".format(conf["kind"], conf["metadata"]["name"], conf["metadata"]["namespace"])
24 else:
25 return "{}.{}.default".format(conf["kind"], conf["metadata"]["name"])
26
27 def scan_spec_conf(self, conf):
28 metadata = {}
29
30 if conf['kind'] == 'Pod':
31 security_profile = dpath.search(conf, 'spec/securityContext/seccompProfile/type')
32 if security_profile:
33 security_profile = dpath.get(conf, 'spec/securityContext/seccompProfile/type')
34 return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED
35 if "metadata" in conf:
36 metadata = conf["metadata"]
37 elif conf['kind'] == 'CronJob':
38 if "spec" in conf:
39 if "jobTemplate" in conf["spec"]:
40 if "spec" in conf["spec"]["jobTemplate"]:
41 if "template" in conf["spec"]["jobTemplate"]["spec"]:
42 if "metadata" in conf["spec"]["jobTemplate"]["spec"]["template"]:
43 metadata = conf["spec"]["jobTemplate"]["spec"]["template"]["metadata"]
44 else:
45 if "spec" in conf:
46 if "template" in conf["spec"]:
47 if "metadata" in conf["spec"]["template"]:
48 metadata = conf["spec"]["template"]["metadata"]
49
50 if metadata:
51 if "annotations" in metadata and isinstance(metadata['annotations'], dict):
52 if "seccomp.security.alpha.kubernetes.io/pod" in metadata["annotations"]:
53 if ("docker/default" in metadata["annotations"]["seccomp.security.alpha.kubernetes.io/pod"] or
54 "runtime/default" in metadata["annotations"]["seccomp.security.alpha.kubernetes.io/pod"]):
55 return CheckResult.PASSED
56 return CheckResult.FAILED
57
58
59 check = Seccomp()
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/kubernetes/checks/Seccomp.py b/checkov/kubernetes/checks/Seccomp.py
--- a/checkov/kubernetes/checks/Seccomp.py
+++ b/checkov/kubernetes/checks/Seccomp.py
@@ -34,6 +34,20 @@
return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED
if "metadata" in conf:
metadata = conf["metadata"]
+ if conf['kind'] == 'Deployment':
+ security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')
+ if security_profile:
+ security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')
+ return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED
+ if "metadata" in conf:
+ metadata = conf["metadata"]
+ if conf['kind'] == 'StatefulSet':
+ security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')
+ if security_profile:
+ security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')
+ return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED
+ if "metadata" in conf:
+ metadata = conf["metadata"]
elif conf['kind'] == 'CronJob':
if "spec" in conf:
if "jobTemplate" in conf["spec"]:
|
{"golden_diff": "diff --git a/checkov/kubernetes/checks/Seccomp.py b/checkov/kubernetes/checks/Seccomp.py\n--- a/checkov/kubernetes/checks/Seccomp.py\n+++ b/checkov/kubernetes/checks/Seccomp.py\n@@ -34,6 +34,20 @@\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"]\n+ if conf['kind'] == 'Deployment':\n+ security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n+ if security_profile:\n+ security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n+ return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n+ if \"metadata\" in conf:\n+ metadata = conf[\"metadata\"]\n+ if conf['kind'] == 'StatefulSet':\n+ security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n+ if security_profile:\n+ security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n+ return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n+ if \"metadata\" in conf:\n+ metadata = conf[\"metadata\"] \n elif conf['kind'] == 'CronJob':\n if \"spec\" in conf:\n if \"jobTemplate\" in conf[\"spec\"]:\n", "issue": "CKV_K8S_31 failure with RuntimeDefault configured for workloads\n**Describe the bug**\r\nPlease see #710\r\n\r\n**To Reproduce**\r\nPlease see #710\r\n\r\n**Expected behavior**\r\nPlease see #710\r\n\r\n**Additional context**\r\nThe bug reported in #710 needs to be fixed for workloads in https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/Seccomp.py#L44:L48 as well.\r\n\r\n```yaml\r\napiVersion: apps/v1\r\nkind: Deployment\r\nmetadata:\r\n....\r\nspec:\r\n...\r\n...\r\n template:\r\n ...\r\n ...\r\n spec:\r\n ....\r\n ....\r\n securityContext:\r\n allowPrivilegeEscalation: false\r\n seccompProfile:\r\n type: RuntimeDefault\r\n```\r\n\r\n**Related PRs**\r\n#711 \r\n\n", "before_files": [{"content": "import dpath\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.kubernetes.base_spec_check import BaseK8Check\n\n\nclass Seccomp(BaseK8Check):\n\n def __init__(self):\n # CIS-1.5 5.7.2\n name = \"Ensure that the seccomp profile is set to docker/default or runtime/default\"\n id = \"CKV_K8S_31\"\n # Location: Pod.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: CronJob.spec.jobTemplate.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: *.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: *.spec.securityContext.seccompProfile.type\n supported_kind = ['Pod', 'Deployment', 'DaemonSet', 'StatefulSet', 'ReplicaSet', 'ReplicationController', 'Job', 'CronJob']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)\n\n def get_resource_id(self, conf):\n if \"namespace\" in conf[\"metadata\"]:\n return \"{}.{}.{}\".format(conf[\"kind\"], conf[\"metadata\"][\"name\"], conf[\"metadata\"][\"namespace\"])\n else:\n return \"{}.{}.default\".format(conf[\"kind\"], conf[\"metadata\"][\"name\"])\n\n def scan_spec_conf(self, conf):\n metadata = {}\n\n if conf['kind'] == 'Pod':\n security_profile = dpath.search(conf, 'spec/securityContext/seccompProfile/type')\n if security_profile:\n security_profile = dpath.get(conf, 'spec/securityContext/seccompProfile/type')\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"]\n elif conf['kind'] == 'CronJob':\n if \"spec\" in conf:\n if \"jobTemplate\" in conf[\"spec\"]:\n if \"spec\" in conf[\"spec\"][\"jobTemplate\"]:\n if \"template\" in conf[\"spec\"][\"jobTemplate\"][\"spec\"]:\n if \"metadata\" in conf[\"spec\"][\"jobTemplate\"][\"spec\"][\"template\"]:\n metadata = conf[\"spec\"][\"jobTemplate\"][\"spec\"][\"template\"][\"metadata\"]\n else:\n if \"spec\" in conf:\n if \"template\" in conf[\"spec\"]:\n if \"metadata\" in conf[\"spec\"][\"template\"]:\n metadata = conf[\"spec\"][\"template\"][\"metadata\"]\n\n if metadata:\n if \"annotations\" in metadata and isinstance(metadata['annotations'], dict):\n if \"seccomp.security.alpha.kubernetes.io/pod\" in metadata[\"annotations\"]:\n if (\"docker/default\" in metadata[\"annotations\"][\"seccomp.security.alpha.kubernetes.io/pod\"] or\n \"runtime/default\" in metadata[\"annotations\"][\"seccomp.security.alpha.kubernetes.io/pod\"]):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = Seccomp()\n", "path": "checkov/kubernetes/checks/Seccomp.py"}], "after_files": [{"content": "import dpath\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.kubernetes.base_spec_check import BaseK8Check\n\n\nclass Seccomp(BaseK8Check):\n\n def __init__(self):\n # CIS-1.5 5.7.2\n name = \"Ensure that the seccomp profile is set to docker/default or runtime/default\"\n id = \"CKV_K8S_31\"\n # Location: Pod.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: CronJob.spec.jobTemplate.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: *.spec.template.metadata.annotations.seccomp.security.alpha.kubernetes.io/pod\n # Location: *.spec.securityContext.seccompProfile.type\n supported_kind = ['Pod', 'Deployment', 'DaemonSet', 'StatefulSet', 'ReplicaSet', 'ReplicationController', 'Job', 'CronJob']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)\n\n def get_resource_id(self, conf):\n if \"namespace\" in conf[\"metadata\"]:\n return \"{}.{}.{}\".format(conf[\"kind\"], conf[\"metadata\"][\"name\"], conf[\"metadata\"][\"namespace\"])\n else:\n return \"{}.{}.default\".format(conf[\"kind\"], conf[\"metadata\"][\"name\"])\n\n def scan_spec_conf(self, conf):\n metadata = {}\n\n if conf['kind'] == 'Pod':\n security_profile = dpath.search(conf, 'spec/securityContext/seccompProfile/type')\n if security_profile:\n security_profile = dpath.get(conf, 'spec/securityContext/seccompProfile/type')\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"]\n if conf['kind'] == 'Deployment':\n security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n if security_profile:\n security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"]\n if conf['kind'] == 'StatefulSet':\n security_profile = dpath.search(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n if security_profile:\n security_profile = dpath.get(conf, 'spec/template/spec/securityContext/seccompProfile/type')\n return CheckResult.PASSED if security_profile == 'RuntimeDefault' else CheckResult.FAILED\n if \"metadata\" in conf:\n metadata = conf[\"metadata\"] \n elif conf['kind'] == 'CronJob':\n if \"spec\" in conf:\n if \"jobTemplate\" in conf[\"spec\"]:\n if \"spec\" in conf[\"spec\"][\"jobTemplate\"]:\n if \"template\" in conf[\"spec\"][\"jobTemplate\"][\"spec\"]:\n if \"metadata\" in conf[\"spec\"][\"jobTemplate\"][\"spec\"][\"template\"]:\n metadata = conf[\"spec\"][\"jobTemplate\"][\"spec\"][\"template\"][\"metadata\"]\n else:\n if \"spec\" in conf:\n if \"template\" in conf[\"spec\"]:\n if \"metadata\" in conf[\"spec\"][\"template\"]:\n metadata = conf[\"spec\"][\"template\"][\"metadata\"]\n\n if metadata:\n if \"annotations\" in metadata and isinstance(metadata['annotations'], dict):\n if \"seccomp.security.alpha.kubernetes.io/pod\" in metadata[\"annotations\"]:\n if (\"docker/default\" in metadata[\"annotations\"][\"seccomp.security.alpha.kubernetes.io/pod\"] or\n \"runtime/default\" in metadata[\"annotations\"][\"seccomp.security.alpha.kubernetes.io/pod\"]):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = Seccomp()\n", "path": "checkov/kubernetes/checks/Seccomp.py"}]}
| 1,215 | 338 |
gh_patches_debug_18031
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2725
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Internal server error when importing CSVs with long names
Follow the same steps as reported in #2634 and observer the error from the screenshot below:
API: `http://localhost/api/db/v0/tables/12/records/?limit=500&offset=0`
<img width="1512" alt="Screenshot 2023-03-20 at 5 29 52 AM" src="https://user-images.githubusercontent.com/11032856/226218521-75355de8-eee0-4b5e-9a9c-47aa3ff67da2.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/identifiers.py`
Content:
```
1 import hashlib
2
3
4 def truncate_if_necessary(identifier):
5 """
6 Takes an identifier and returns it, truncating it, if it is too long. The truncated version
7 will end with a hash of the passed identifier, therefore column name collision should be very
8 rare.
9
10 Iteratively removes characters from the end of the identifier, until the resulting string, with
11 the suffix hash of the identifier appended, is short enough that it doesn't need to be truncated
12 anymore. Whitespace is trimmed from the truncated identifier before appending the suffix.
13 """
14 assert type(identifier) is str
15 if not is_identifier_too_long(identifier):
16 return identifier
17 right_side = "-" + _get_truncation_hash(identifier)
18 identifier_length = len(identifier)
19 assert len(right_side) < identifier_length # Sanity check
20 range_of_num_of_chars_to_remove = range(1, identifier_length)
21 for num_of_chars_to_remove in range_of_num_of_chars_to_remove:
22 left_side = identifier[:num_of_chars_to_remove * -1]
23 left_side = left_side.rstrip()
24 truncated_identifier = left_side + right_side
25 if not is_identifier_too_long(truncated_identifier):
26 return truncated_identifier
27 raise Exception(
28 "Acceptable truncation not found; should never happen."
29 )
30
31
32 def is_identifier_too_long(identifier):
33 postgres_identifier_size_limit = 63
34 size = _get_size_of_identifier_in_bytes(identifier)
35 return size > postgres_identifier_size_limit
36
37
38 def _get_truncation_hash(identifier):
39 """
40 Produces an 8-character string hash of the passed identifier.
41
42 Using hash function blake2s, because it seems fairly recommended and it seems to be better
43 suited for shorter digests than blake2b. We want short digests to not take up too much of the
44 truncated identifier in whose construction this will be used.
45 """
46 h = hashlib.blake2s(digest_size=4)
47 bytes = _get_identifier_in_bytes(identifier)
48 h.update(bytes)
49 return h.hexdigest()
50
51
52 def _get_size_of_identifier_in_bytes(s):
53 bytes = _get_identifier_in_bytes(s)
54 return len(bytes)
55
56
57 def _get_identifier_in_bytes(s):
58 """
59 Afaict, following Postgres doc [0] says that UTF-8 supports all languages; therefore, different
60 server locale configurations should not break this.
61
62 [0] https://www.postgresql.org/docs/13/multibyte.html
63 """
64 return s.encode('utf-8')
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/identifiers.py b/db/identifiers.py
--- a/db/identifiers.py
+++ b/db/identifiers.py
@@ -1,6 +1,9 @@
import hashlib
+POSTGRES_IDENTIFIER_SIZE_LIMIT = 63
+
+
def truncate_if_necessary(identifier):
"""
Takes an identifier and returns it, truncating it, if it is too long. The truncated version
@@ -30,9 +33,13 @@
def is_identifier_too_long(identifier):
- postgres_identifier_size_limit = 63
+ # TODO we should support POSTGRES_IDENTIFIER_SIZE_LIMIT here;
+ # Our current limit due to an unknown bug that manifests at least
+ # when importing CSVs seems to be 57 bytes. Here we're setting it even
+ # lower just in case.
+ our_temporary_identifier_size_limit = 48
size = _get_size_of_identifier_in_bytes(identifier)
- return size > postgres_identifier_size_limit
+ return size > our_temporary_identifier_size_limit
def _get_truncation_hash(identifier):
|
{"golden_diff": "diff --git a/db/identifiers.py b/db/identifiers.py\n--- a/db/identifiers.py\n+++ b/db/identifiers.py\n@@ -1,6 +1,9 @@\n import hashlib\n \n \n+POSTGRES_IDENTIFIER_SIZE_LIMIT = 63\n+\n+\n def truncate_if_necessary(identifier):\n \"\"\"\n Takes an identifier and returns it, truncating it, if it is too long. The truncated version\n@@ -30,9 +33,13 @@\n \n \n def is_identifier_too_long(identifier):\n- postgres_identifier_size_limit = 63\n+ # TODO we should support POSTGRES_IDENTIFIER_SIZE_LIMIT here;\n+ # Our current limit due to an unknown bug that manifests at least\n+ # when importing CSVs seems to be 57 bytes. Here we're setting it even\n+ # lower just in case.\n+ our_temporary_identifier_size_limit = 48\n size = _get_size_of_identifier_in_bytes(identifier)\n- return size > postgres_identifier_size_limit\n+ return size > our_temporary_identifier_size_limit\n \n \n def _get_truncation_hash(identifier):\n", "issue": "Internal server error when importing CSVs with long names\nFollow the same steps as reported in #2634 and observer the error from the screenshot below: \r\n\r\nAPI: `http://localhost/api/db/v0/tables/12/records/?limit=500&offset=0`\r\n\r\n<img width=\"1512\" alt=\"Screenshot 2023-03-20 at 5 29 52 AM\" src=\"https://user-images.githubusercontent.com/11032856/226218521-75355de8-eee0-4b5e-9a9c-47aa3ff67da2.png\">\r\n\n", "before_files": [{"content": "import hashlib\n\n\ndef truncate_if_necessary(identifier):\n \"\"\"\n Takes an identifier and returns it, truncating it, if it is too long. The truncated version\n will end with a hash of the passed identifier, therefore column name collision should be very\n rare.\n\n Iteratively removes characters from the end of the identifier, until the resulting string, with\n the suffix hash of the identifier appended, is short enough that it doesn't need to be truncated\n anymore. Whitespace is trimmed from the truncated identifier before appending the suffix.\n \"\"\"\n assert type(identifier) is str\n if not is_identifier_too_long(identifier):\n return identifier\n right_side = \"-\" + _get_truncation_hash(identifier)\n identifier_length = len(identifier)\n assert len(right_side) < identifier_length # Sanity check\n range_of_num_of_chars_to_remove = range(1, identifier_length)\n for num_of_chars_to_remove in range_of_num_of_chars_to_remove:\n left_side = identifier[:num_of_chars_to_remove * -1]\n left_side = left_side.rstrip()\n truncated_identifier = left_side + right_side\n if not is_identifier_too_long(truncated_identifier):\n return truncated_identifier\n raise Exception(\n \"Acceptable truncation not found; should never happen.\"\n )\n\n\ndef is_identifier_too_long(identifier):\n postgres_identifier_size_limit = 63\n size = _get_size_of_identifier_in_bytes(identifier)\n return size > postgres_identifier_size_limit\n\n\ndef _get_truncation_hash(identifier):\n \"\"\"\n Produces an 8-character string hash of the passed identifier.\n\n Using hash function blake2s, because it seems fairly recommended and it seems to be better\n suited for shorter digests than blake2b. We want short digests to not take up too much of the\n truncated identifier in whose construction this will be used.\n \"\"\"\n h = hashlib.blake2s(digest_size=4)\n bytes = _get_identifier_in_bytes(identifier)\n h.update(bytes)\n return h.hexdigest()\n\n\ndef _get_size_of_identifier_in_bytes(s):\n bytes = _get_identifier_in_bytes(s)\n return len(bytes)\n\n\ndef _get_identifier_in_bytes(s):\n \"\"\"\n Afaict, following Postgres doc [0] says that UTF-8 supports all languages; therefore, different\n server locale configurations should not break this.\n\n [0] https://www.postgresql.org/docs/13/multibyte.html\n \"\"\"\n return s.encode('utf-8')\n", "path": "db/identifiers.py"}], "after_files": [{"content": "import hashlib\n\n\nPOSTGRES_IDENTIFIER_SIZE_LIMIT = 63\n\n\ndef truncate_if_necessary(identifier):\n \"\"\"\n Takes an identifier and returns it, truncating it, if it is too long. The truncated version\n will end with a hash of the passed identifier, therefore column name collision should be very\n rare.\n\n Iteratively removes characters from the end of the identifier, until the resulting string, with\n the suffix hash of the identifier appended, is short enough that it doesn't need to be truncated\n anymore. Whitespace is trimmed from the truncated identifier before appending the suffix.\n \"\"\"\n assert type(identifier) is str\n if not is_identifier_too_long(identifier):\n return identifier\n right_side = \"-\" + _get_truncation_hash(identifier)\n identifier_length = len(identifier)\n assert len(right_side) < identifier_length # Sanity check\n range_of_num_of_chars_to_remove = range(1, identifier_length)\n for num_of_chars_to_remove in range_of_num_of_chars_to_remove:\n left_side = identifier[:num_of_chars_to_remove * -1]\n left_side = left_side.rstrip()\n truncated_identifier = left_side + right_side\n if not is_identifier_too_long(truncated_identifier):\n return truncated_identifier\n raise Exception(\n \"Acceptable truncation not found; should never happen.\"\n )\n\n\ndef is_identifier_too_long(identifier):\n # TODO we should support POSTGRES_IDENTIFIER_SIZE_LIMIT here;\n # Our current limit due to an unknown bug that manifests at least\n # when importing CSVs seems to be 57 bytes. Here we're setting it even\n # lower just in case.\n our_temporary_identifier_size_limit = 48\n size = _get_size_of_identifier_in_bytes(identifier)\n return size > our_temporary_identifier_size_limit\n\n\ndef _get_truncation_hash(identifier):\n \"\"\"\n Produces an 8-character string hash of the passed identifier.\n\n Using hash function blake2s, because it seems fairly recommended and it seems to be better\n suited for shorter digests than blake2b. We want short digests to not take up too much of the\n truncated identifier in whose construction this will be used.\n \"\"\"\n h = hashlib.blake2s(digest_size=4)\n bytes = _get_identifier_in_bytes(identifier)\n h.update(bytes)\n return h.hexdigest()\n\n\ndef _get_size_of_identifier_in_bytes(s):\n bytes = _get_identifier_in_bytes(s)\n return len(bytes)\n\n\ndef _get_identifier_in_bytes(s):\n \"\"\"\n Afaict, following Postgres doc [0] says that UTF-8 supports all languages; therefore, different\n server locale configurations should not break this.\n\n [0] https://www.postgresql.org/docs/13/multibyte.html\n \"\"\"\n return s.encode('utf-8')\n", "path": "db/identifiers.py"}]}
| 1,081 | 240 |
gh_patches_debug_63591
|
rasdani/github-patches
|
git_diff
|
openai__gym-1092
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError when installing on Windows 10 and [33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>
Dears,
Would you please let me know how I could solve this warning and this error? (Windows 10)
Using TensorFlow backend.
[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.[0m
File "C:\Users\fi\Desktop\rl\code\3.6\stock_market_reinforcement_learning-master\environment.py", line 43, in __init__
self.reset()
File "C:\Users\fi\Anaconda30\envs\tensorflow\lib\site-packages\gym\core.py", line 70, in reset
raise NotImplementedError
NotImplementedErrorr
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/envs/mujoco/mujoco_env.py`
Content:
```
1 import os
2
3 from gym import error, spaces
4 from gym.utils import seeding
5 import numpy as np
6 from os import path
7 import gym
8 import six
9
10 try:
11 import mujoco_py
12 except ImportError as e:
13 raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e))
14
15 DEFAULT_SIZE = 500
16
17 class MujocoEnv(gym.Env):
18 """Superclass for all MuJoCo environments.
19 """
20
21 def __init__(self, model_path, frame_skip):
22 if model_path.startswith("/"):
23 fullpath = model_path
24 else:
25 fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
26 if not path.exists(fullpath):
27 raise IOError("File %s does not exist" % fullpath)
28 self.frame_skip = frame_skip
29 self.model = mujoco_py.load_model_from_path(fullpath)
30 self.sim = mujoco_py.MjSim(self.model)
31 self.data = self.sim.data
32 self.viewer = None
33 self._viewers = {}
34
35 self.metadata = {
36 'render.modes': ['human', 'rgb_array'],
37 'video.frames_per_second': int(np.round(1.0 / self.dt))
38 }
39
40 self.init_qpos = self.sim.data.qpos.ravel().copy()
41 self.init_qvel = self.sim.data.qvel.ravel().copy()
42 observation, _reward, done, _info = self.step(np.zeros(self.model.nu))
43 assert not done
44 self.obs_dim = observation.size
45
46 bounds = self.model.actuator_ctrlrange.copy()
47 low = bounds[:, 0]
48 high = bounds[:, 1]
49 self.action_space = spaces.Box(low=low, high=high)
50
51 high = np.inf*np.ones(self.obs_dim)
52 low = -high
53 self.observation_space = spaces.Box(low, high)
54
55 self.seed()
56
57 def seed(self, seed=None):
58 self.np_random, seed = seeding.np_random(seed)
59 return [seed]
60
61 # methods to override:
62 # ----------------------------
63
64 def reset_model(self):
65 """
66 Reset the robot degrees of freedom (qpos and qvel).
67 Implement this in each subclass.
68 """
69 raise NotImplementedError
70
71 def viewer_setup(self):
72 """
73 This method is called when the viewer is initialized and after every reset
74 Optionally implement this method, if you need to tinker with camera position
75 and so forth.
76 """
77 pass
78
79 # -----------------------------
80
81 def reset(self):
82 self.sim.reset()
83 ob = self.reset_model()
84 old_viewer = self.viewer
85 for v in self._viewers.values():
86 self.viewer = v
87 self.viewer_setup()
88 self.viewer = old_viewer
89 return ob
90
91 def set_state(self, qpos, qvel):
92 assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
93 old_state = self.sim.get_state()
94 new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,
95 old_state.act, old_state.udd_state)
96 self.sim.set_state(new_state)
97 self.sim.forward()
98
99 @property
100 def dt(self):
101 return self.model.opt.timestep * self.frame_skip
102
103 def do_simulation(self, ctrl, n_frames):
104 self.sim.data.ctrl[:] = ctrl
105 for _ in range(n_frames):
106 self.sim.step()
107
108 def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):
109 if mode == 'rgb_array':
110 self._get_viewer(mode).render(width, height)
111 # window size used for old mujoco-py:
112 data = self._get_viewer(mode).read_pixels(width, height, depth=False)
113 # original image is upside-down, so flip it
114 return data[::-1, :, :]
115 elif mode == 'human':
116 self._get_viewer(mode).render()
117
118 def close(self):
119 if self.viewer is not None:
120 # self.viewer.finish()
121 self.viewer = None
122 self._viewers = {}
123
124 def _get_viewer(self, mode):
125 self.viewer = self._viewers.get(mode)
126 if self.viewer is None:
127 if mode == 'human':
128 self.viewer = mujoco_py.MjViewer(self.sim)
129 elif mode == 'rgb_array':
130 self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)
131 self.viewer_setup()
132 self._viewers[mode] = self.viewer
133 return self.viewer
134
135 def get_body_com(self, body_name):
136 return self.data.get_body_xpos(body_name)
137
138 def state_vector(self):
139 return np.concatenate([
140 self.sim.data.qpos.flat,
141 self.sim.data.qvel.flat
142 ])
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gym/envs/mujoco/mujoco_env.py b/gym/envs/mujoco/mujoco_env.py
--- a/gym/envs/mujoco/mujoco_env.py
+++ b/gym/envs/mujoco/mujoco_env.py
@@ -46,7 +46,7 @@
bounds = self.model.actuator_ctrlrange.copy()
low = bounds[:, 0]
high = bounds[:, 1]
- self.action_space = spaces.Box(low=low, high=high)
+ self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)
high = np.inf*np.ones(self.obs_dim)
low = -high
|
{"golden_diff": "diff --git a/gym/envs/mujoco/mujoco_env.py b/gym/envs/mujoco/mujoco_env.py\n--- a/gym/envs/mujoco/mujoco_env.py\n+++ b/gym/envs/mujoco/mujoco_env.py\n@@ -46,7 +46,7 @@\n bounds = self.model.actuator_ctrlrange.copy()\n low = bounds[:, 0]\n high = bounds[:, 1]\n- self.action_space = spaces.Box(low=low, high=high)\n+ self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n \n high = np.inf*np.ones(self.obs_dim)\n low = -high\n", "issue": "ImportError when installing on Windows 10 and [33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>\nDears,\r\nWould you please let me know how I could solve this warning and this error? (Windows 10)\r\n\r\nUsing TensorFlow backend.\r\n\u001b[33mWARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.\u001b[0m\r\n\r\n File \"C:\\Users\\fi\\Desktop\\rl\\code\\3.6\\stock_market_reinforcement_learning-master\\environment.py\", line 43, in __init__\r\n self.reset()\r\n File \"C:\\Users\\fi\\Anaconda30\\envs\\tensorflow\\lib\\site-packages\\gym\\core.py\", line 70, in reset\r\n raise NotImplementedError\r\nNotImplementedErrorr\r\n\n", "before_files": [{"content": "import os\n\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\nimport gym\nimport six\n\ntry:\n import mujoco_py\nexcept ImportError as e:\n raise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n\nDEFAULT_SIZE = 500\n\nclass MujocoEnv(gym.Env):\n \"\"\"Superclass for all MuJoCo environments.\n \"\"\"\n\n def __init__(self, model_path, frame_skip):\n if model_path.startswith(\"/\"):\n fullpath = model_path\n else:\n fullpath = os.path.join(os.path.dirname(__file__), \"assets\", model_path)\n if not path.exists(fullpath):\n raise IOError(\"File %s does not exist\" % fullpath)\n self.frame_skip = frame_skip\n self.model = mujoco_py.load_model_from_path(fullpath)\n self.sim = mujoco_py.MjSim(self.model)\n self.data = self.sim.data\n self.viewer = None\n self._viewers = {}\n\n self.metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': int(np.round(1.0 / self.dt))\n }\n\n self.init_qpos = self.sim.data.qpos.ravel().copy()\n self.init_qvel = self.sim.data.qvel.ravel().copy()\n observation, _reward, done, _info = self.step(np.zeros(self.model.nu))\n assert not done\n self.obs_dim = observation.size\n\n bounds = self.model.actuator_ctrlrange.copy()\n low = bounds[:, 0]\n high = bounds[:, 1]\n self.action_space = spaces.Box(low=low, high=high)\n\n high = np.inf*np.ones(self.obs_dim)\n low = -high\n self.observation_space = spaces.Box(low, high)\n\n self.seed()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n # methods to override:\n # ----------------------------\n\n def reset_model(self):\n \"\"\"\n Reset the robot degrees of freedom (qpos and qvel).\n Implement this in each subclass.\n \"\"\"\n raise NotImplementedError\n\n def viewer_setup(self):\n \"\"\"\n This method is called when the viewer is initialized and after every reset\n Optionally implement this method, if you need to tinker with camera position\n and so forth.\n \"\"\"\n pass\n\n # -----------------------------\n\n def reset(self):\n self.sim.reset()\n ob = self.reset_model()\n old_viewer = self.viewer\n for v in self._viewers.values():\n self.viewer = v\n self.viewer_setup()\n self.viewer = old_viewer\n return ob\n\n def set_state(self, qpos, qvel):\n assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)\n old_state = self.sim.get_state()\n new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n old_state.act, old_state.udd_state)\n self.sim.set_state(new_state)\n self.sim.forward()\n\n @property\n def dt(self):\n return self.model.opt.timestep * self.frame_skip\n\n def do_simulation(self, ctrl, n_frames):\n self.sim.data.ctrl[:] = ctrl\n for _ in range(n_frames):\n self.sim.step()\n\n def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):\n if mode == 'rgb_array':\n self._get_viewer(mode).render(width, height)\n # window size used for old mujoco-py:\n data = self._get_viewer(mode).read_pixels(width, height, depth=False)\n # original image is upside-down, so flip it\n return data[::-1, :, :]\n elif mode == 'human':\n self._get_viewer(mode).render()\n\n def close(self):\n if self.viewer is not None:\n # self.viewer.finish()\n self.viewer = None\n self._viewers = {}\n\n def _get_viewer(self, mode):\n self.viewer = self._viewers.get(mode)\n if self.viewer is None:\n if mode == 'human':\n self.viewer = mujoco_py.MjViewer(self.sim)\n elif mode == 'rgb_array':\n self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)\n self.viewer_setup()\n self._viewers[mode] = self.viewer\n return self.viewer\n\n def get_body_com(self, body_name):\n return self.data.get_body_xpos(body_name)\n\n def state_vector(self):\n return np.concatenate([\n self.sim.data.qpos.flat,\n self.sim.data.qvel.flat\n ])\n", "path": "gym/envs/mujoco/mujoco_env.py"}], "after_files": [{"content": "import os\n\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\nimport gym\nimport six\n\ntry:\n import mujoco_py\nexcept ImportError as e:\n raise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n\nDEFAULT_SIZE = 500\n\nclass MujocoEnv(gym.Env):\n \"\"\"Superclass for all MuJoCo environments.\n \"\"\"\n\n def __init__(self, model_path, frame_skip):\n if model_path.startswith(\"/\"):\n fullpath = model_path\n else:\n fullpath = os.path.join(os.path.dirname(__file__), \"assets\", model_path)\n if not path.exists(fullpath):\n raise IOError(\"File %s does not exist\" % fullpath)\n self.frame_skip = frame_skip\n self.model = mujoco_py.load_model_from_path(fullpath)\n self.sim = mujoco_py.MjSim(self.model)\n self.data = self.sim.data\n self.viewer = None\n self._viewers = {}\n\n self.metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': int(np.round(1.0 / self.dt))\n }\n\n self.init_qpos = self.sim.data.qpos.ravel().copy()\n self.init_qvel = self.sim.data.qvel.ravel().copy()\n observation, _reward, done, _info = self.step(np.zeros(self.model.nu))\n assert not done\n self.obs_dim = observation.size\n\n bounds = self.model.actuator_ctrlrange.copy()\n low = bounds[:, 0]\n high = bounds[:, 1]\n self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n\n high = np.inf*np.ones(self.obs_dim)\n low = -high\n self.observation_space = spaces.Box(low, high)\n\n self.seed()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n # methods to override:\n # ----------------------------\n\n def reset_model(self):\n \"\"\"\n Reset the robot degrees of freedom (qpos and qvel).\n Implement this in each subclass.\n \"\"\"\n raise NotImplementedError\n\n def viewer_setup(self):\n \"\"\"\n This method is called when the viewer is initialized and after every reset\n Optionally implement this method, if you need to tinker with camera position\n and so forth.\n \"\"\"\n pass\n\n # -----------------------------\n\n def reset(self):\n self.sim.reset()\n ob = self.reset_model()\n old_viewer = self.viewer\n for v in self._viewers.values():\n self.viewer = v\n self.viewer_setup()\n self.viewer = old_viewer\n return ob\n\n def set_state(self, qpos, qvel):\n assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)\n old_state = self.sim.get_state()\n new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n old_state.act, old_state.udd_state)\n self.sim.set_state(new_state)\n self.sim.forward()\n\n @property\n def dt(self):\n return self.model.opt.timestep * self.frame_skip\n\n def do_simulation(self, ctrl, n_frames):\n self.sim.data.ctrl[:] = ctrl\n for _ in range(n_frames):\n self.sim.step()\n\n def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):\n if mode == 'rgb_array':\n self._get_viewer(mode).render(width, height)\n # window size used for old mujoco-py:\n data = self._get_viewer(mode).read_pixels(width, height, depth=False)\n # original image is upside-down, so flip it\n return data[::-1, :, :]\n elif mode == 'human':\n self._get_viewer(mode).render()\n\n def close(self):\n if self.viewer is not None:\n # self.viewer.finish()\n self.viewer = None\n self._viewers = {}\n\n def _get_viewer(self, mode):\n self.viewer = self._viewers.get(mode)\n if self.viewer is None:\n if mode == 'human':\n self.viewer = mujoco_py.MjViewer(self.sim)\n elif mode == 'rgb_array':\n self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, 0)\n self.viewer_setup()\n self._viewers[mode] = self.viewer\n return self.viewer\n\n def get_body_com(self, body_name):\n return self.data.get_body_xpos(body_name)\n\n def state_vector(self):\n return np.concatenate([\n self.sim.data.qpos.flat,\n self.sim.data.qvel.flat\n ])\n", "path": "gym/envs/mujoco/mujoco_env.py"}]}
| 1,828 | 155 |
gh_patches_debug_7763
|
rasdani/github-patches
|
git_diff
|
plotly__dash-808
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Defer `pytest` import?
Looks like `pytest` isn't the safest dependency, causing issues with scikit-learn: https://community.plot.ly/t/pytest-transient-dependency/25383
Could we move the `import pytest` into the testing module/class/function itself and not require it upon install? We could even have a separate install with setup.py's "extras" feature (https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) like `pip install dash[testing]` or something.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import io
2 from setuptools import setup, find_packages
3
4 main_ns = {}
5 exec(open("dash/version.py").read(), main_ns) # pylint: disable=exec-used
6
7
8 def read_req_file(req_type):
9 with open("requires-{}.txt".format(req_type)) as fp:
10 requires = (line.strip() for line in fp)
11 return [req for req in requires if req and not req.startswith("#")]
12
13
14 setup(
15 name="dash",
16 version=main_ns["__version__"],
17 author="chris p",
18 author_email="[email protected]",
19 packages=find_packages(exclude=["tests*"]),
20 include_package_data=True,
21 license="MIT",
22 description=(
23 "A Python framework for building reactive web-apps. "
24 "Developed by Plotly."
25 ),
26 long_description=io.open("README.md", encoding="utf-8").read(),
27 long_description_content_type="text/markdown",
28 install_requires=read_req_file("install"),
29 extras_require={"ci": read_req_file("ci")},
30 entry_points={
31 "console_scripts": [
32 "dash-generate-components ="
33 " dash.development.component_generator:cli"
34 ],
35 "pytest11": ["dash = dash.testing.plugin"],
36 },
37 url="https://plot.ly/dash",
38 classifiers=[
39 "Development Status :: 5 - Production/Stable",
40 "Environment :: Web Environment",
41 "Framework :: Flask",
42 "Intended Audience :: Developers",
43 "Intended Audience :: Education",
44 "Intended Audience :: Financial and Insurance Industry",
45 "Intended Audience :: Healthcare Industry",
46 "Intended Audience :: Manufacturing",
47 "Intended Audience :: Science/Research",
48 "License :: OSI Approved :: MIT License",
49 "Programming Language :: Python",
50 "Programming Language :: Python :: 2",
51 "Programming Language :: Python :: 2.7",
52 "Programming Language :: Python :: 3",
53 "Programming Language :: Python :: 3.3",
54 "Programming Language :: Python :: 3.4",
55 "Programming Language :: Python :: 3.5",
56 "Programming Language :: Python :: 3.6",
57 "Programming Language :: Python :: 3.7",
58 "Topic :: Database :: Front-Ends",
59 "Topic :: Office/Business :: Financial :: Spreadsheet",
60 "Topic :: Scientific/Engineering :: Visualization",
61 "Topic :: Software Development :: Libraries :: Application Frameworks",
62 "Topic :: Software Development :: Widget Sets",
63 ],
64 )
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,10 @@
long_description=io.open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
install_requires=read_req_file("install"),
- extras_require={"ci": read_req_file("ci")},
+ extras_require={
+ "ci": read_req_file("ci"),
+ "testing": read_req_file("testing"),
+ },
entry_points={
"console_scripts": [
"dash-generate-components ="
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,10 @@\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n- extras_require={\"ci\": read_req_file(\"ci\")},\n+ extras_require={\n+ \"ci\": read_req_file(\"ci\"),\n+ \"testing\": read_req_file(\"testing\"),\n+ },\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components =\"\n", "issue": "Defer `pytest` import?\nLooks like `pytest` isn't the safest dependency, causing issues with scikit-learn: https://community.plot.ly/t/pytest-transient-dependency/25383\r\n\r\nCould we move the `import pytest` into the testing module/class/function itself and not require it upon install? We could even have a separate install with setup.py's \"extras\" feature (https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) like `pip install dash[testing]` or something.\n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open(\"dash/version.py\").read(), main_ns) # pylint: disable=exec-used\n\n\ndef read_req_file(req_type):\n with open(\"requires-{}.txt\".format(req_type)) as fp:\n requires = (line.strip() for line in fp)\n return [req for req in requires if req and not req.startswith(\"#\")]\n\n\nsetup(\n name=\"dash\",\n version=main_ns[\"__version__\"],\n author=\"chris p\",\n author_email=\"[email protected]\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n license=\"MIT\",\n description=(\n \"A Python framework for building reactive web-apps. \"\n \"Developed by Plotly.\"\n ),\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n extras_require={\"ci\": read_req_file(\"ci\")},\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components =\"\n \" dash.development.component_generator:cli\"\n ],\n \"pytest11\": [\"dash = dash.testing.plugin\"],\n },\n url=\"https://plot.ly/dash\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Manufacturing\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open(\"dash/version.py\").read(), main_ns) # pylint: disable=exec-used\n\n\ndef read_req_file(req_type):\n with open(\"requires-{}.txt\".format(req_type)) as fp:\n requires = (line.strip() for line in fp)\n return [req for req in requires if req and not req.startswith(\"#\")]\n\n\nsetup(\n name=\"dash\",\n version=main_ns[\"__version__\"],\n author=\"chris p\",\n author_email=\"[email protected]\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n license=\"MIT\",\n description=(\n \"A Python framework for building reactive web-apps. \"\n \"Developed by Plotly.\"\n ),\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n extras_require={\n \"ci\": read_req_file(\"ci\"),\n \"testing\": read_req_file(\"testing\"),\n },\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components =\"\n \" dash.development.component_generator:cli\"\n ],\n \"pytest11\": [\"dash = dash.testing.plugin\"],\n },\n url=\"https://plot.ly/dash\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Manufacturing\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n)\n", "path": "setup.py"}]}
| 1,028 | 131 |
gh_patches_debug_42715
|
rasdani/github-patches
|
git_diff
|
openai__gym-1878
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Box bound precision warning
I get this warning a lot when using Box environments:
```
.../gym/logger.py:30: UserWarning: WARN: Box bound precision lowered by casting to float32
```
This is particularly annoying, especially because the [default dtype for Box is](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L24) `np.float(32)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/spaces/box.py`
Content:
```
1 import numpy as np
2
3 from .space import Space
4 from gym import logger
5
6
7 class Box(Space):
8 """
9 A (possibly unbounded) box in R^n. Specifically, a Box represents the
10 Cartesian product of n closed intervals. Each interval has the form of one
11 of [a, b], (-oo, b], [a, oo), or (-oo, oo).
12
13 There are two common use cases:
14
15 * Identical bound for each dimension::
16 >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
17 Box(3, 4)
18
19 * Independent bound for each dimension::
20 >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
21 Box(2,)
22
23 """
24 def __init__(self, low, high, shape=None, dtype=np.float32):
25 assert dtype is not None, 'dtype must be explicitly provided. '
26 self.dtype = np.dtype(dtype)
27
28 if shape is None:
29 assert low.shape == high.shape, 'box dimension mismatch. '
30 self.shape = low.shape
31 self.low = low
32 self.high = high
33 else:
34 assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '
35 self.shape = tuple(shape)
36 self.low = np.full(self.shape, low)
37 self.high = np.full(self.shape, high)
38
39 def _get_precision(dtype):
40 if np.issubdtype(dtype, np.floating):
41 return np.finfo(dtype).precision
42 else:
43 return np.inf
44 low_precision = _get_precision(self.low.dtype)
45 high_precision = _get_precision(self.high.dtype)
46 dtype_precision = _get_precision(self.dtype)
47 if min(low_precision, high_precision) > dtype_precision:
48 logger.warn("Box bound precision lowered by casting to {}".format(self.dtype))
49 self.low = self.low.astype(self.dtype)
50 self.high = self.high.astype(self.dtype)
51
52 # Boolean arrays which indicate the interval type for each coordinate
53 self.bounded_below = -np.inf < self.low
54 self.bounded_above = np.inf > self.high
55
56 super(Box, self).__init__(self.shape, self.dtype)
57
58 def is_bounded(self, manner="both"):
59 below = np.all(self.bounded_below)
60 above = np.all(self.bounded_above)
61 if manner == "both":
62 return below and above
63 elif manner == "below":
64 return below
65 elif manner == "above":
66 return above
67 else:
68 raise ValueError("manner is not in {'below', 'above', 'both'}")
69
70 def sample(self):
71 """
72 Generates a single random sample inside of the Box.
73
74 In creating a sample of the box, each coordinate is sampled according to
75 the form of the interval:
76
77 * [a, b] : uniform distribution
78 * [a, oo) : shifted exponential distribution
79 * (-oo, b] : shifted negative exponential distribution
80 * (-oo, oo) : normal distribution
81 """
82 high = self.high if self.dtype.kind == 'f' \
83 else self.high.astype('int64') + 1
84 sample = np.empty(self.shape)
85
86 # Masking arrays which classify the coordinates according to interval
87 # type
88 unbounded = ~self.bounded_below & ~self.bounded_above
89 upp_bounded = ~self.bounded_below & self.bounded_above
90 low_bounded = self.bounded_below & ~self.bounded_above
91 bounded = self.bounded_below & self.bounded_above
92
93
94 # Vectorized sampling by interval type
95 sample[unbounded] = self.np_random.normal(
96 size=unbounded[unbounded].shape)
97
98 sample[low_bounded] = self.np_random.exponential(
99 size=low_bounded[low_bounded].shape) + self.low[low_bounded]
100
101 sample[upp_bounded] = -self.np_random.exponential(
102 size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]
103
104 sample[bounded] = self.np_random.uniform(low=self.low[bounded],
105 high=high[bounded],
106 size=bounded[bounded].shape)
107 if self.dtype.kind == 'i':
108 sample = np.floor(sample)
109
110 return sample.astype(self.dtype)
111
112 def contains(self, x):
113 if isinstance(x, list):
114 x = np.array(x) # Promote list to array for contains check
115 return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
116
117 def to_jsonable(self, sample_n):
118 return np.array(sample_n).tolist()
119
120 def from_jsonable(self, sample_n):
121 return [np.asarray(sample) for sample in sample_n]
122
123 def __repr__(self):
124 return "Box" + str(self.shape)
125
126 def __eq__(self, other):
127 return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gym/spaces/box.py b/gym/spaces/box.py
--- a/gym/spaces/box.py
+++ b/gym/spaces/box.py
@@ -9,13 +9,13 @@
A (possibly unbounded) box in R^n. Specifically, a Box represents the
Cartesian product of n closed intervals. Each interval has the form of one
of [a, b], (-oo, b], [a, oo), or (-oo, oo).
-
+
There are two common use cases:
-
+
* Identical bound for each dimension::
>>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
Box(3, 4)
-
+
* Independent bound for each dimension::
>>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
Box(2,)
@@ -33,8 +33,8 @@
else:
assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '
self.shape = tuple(shape)
- self.low = np.full(self.shape, low)
- self.high = np.full(self.shape, high)
+ self.low = np.full(self.shape, low, dtype=dtype)
+ self.high = np.full(self.shape, high, dtype=dtype)
def _get_precision(dtype):
if np.issubdtype(dtype, np.floating):
@@ -69,12 +69,12 @@
def sample(self):
"""
- Generates a single random sample inside of the Box.
+ Generates a single random sample inside of the Box.
In creating a sample of the box, each coordinate is sampled according to
the form of the interval:
-
- * [a, b] : uniform distribution
+
+ * [a, b] : uniform distribution
* [a, oo) : shifted exponential distribution
* (-oo, b] : shifted negative exponential distribution
* (-oo, oo) : normal distribution
@@ -89,7 +89,7 @@
upp_bounded = ~self.bounded_below & self.bounded_above
low_bounded = self.bounded_below & ~self.bounded_above
bounded = self.bounded_below & self.bounded_above
-
+
# Vectorized sampling by interval type
sample[unbounded] = self.np_random.normal(
@@ -97,18 +97,18 @@
sample[low_bounded] = self.np_random.exponential(
size=low_bounded[low_bounded].shape) + self.low[low_bounded]
-
+
sample[upp_bounded] = -self.np_random.exponential(
size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]
-
- sample[bounded] = self.np_random.uniform(low=self.low[bounded],
+
+ sample[bounded] = self.np_random.uniform(low=self.low[bounded],
high=high[bounded],
size=bounded[bounded].shape)
if self.dtype.kind == 'i':
sample = np.floor(sample)
return sample.astype(self.dtype)
-
+
def contains(self, x):
if isinstance(x, list):
x = np.array(x) # Promote list to array for contains check
|
{"golden_diff": "diff --git a/gym/spaces/box.py b/gym/spaces/box.py\n--- a/gym/spaces/box.py\n+++ b/gym/spaces/box.py\n@@ -9,13 +9,13 @@\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n- \n+\n There are two common use cases:\n- \n+\n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n- \n+\n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n@@ -33,8 +33,8 @@\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n- self.low = np.full(self.shape, low)\n- self.high = np.full(self.shape, high)\n+ self.low = np.full(self.shape, low, dtype=dtype)\n+ self.high = np.full(self.shape, high, dtype=dtype)\n \n def _get_precision(dtype):\n if np.issubdtype(dtype, np.floating):\n@@ -69,12 +69,12 @@\n \n def sample(self):\n \"\"\"\n- Generates a single random sample inside of the Box. \n+ Generates a single random sample inside of the Box.\n \n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n- \n- * [a, b] : uniform distribution \n+\n+ * [a, b] : uniform distribution\n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n@@ -89,7 +89,7 @@\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n- \n+\n \n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n@@ -97,18 +97,18 @@\n \n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n- \n+\n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]\n- \n- sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n+\n+ sample[bounded] = self.np_random.uniform(low=self.low[bounded],\n high=high[bounded],\n size=bounded[bounded].shape)\n if self.dtype.kind == 'i':\n sample = np.floor(sample)\n \n return sample.astype(self.dtype)\n- \n+\n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n", "issue": "Box bound precision warning\nI get this warning a lot when using Box environments:\r\n\r\n```\r\n.../gym/logger.py:30: UserWarning: WARN: Box bound precision lowered by casting to float32\r\n```\r\nThis is particularly annoying, especially because the [default dtype for Box is](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L24) `np.float(32)`\n", "before_files": [{"content": "import numpy as np\n\nfrom .space import Space\nfrom gym import logger\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n \n There are two common use cases:\n \n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n \n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n if shape is None:\n assert low.shape == high.shape, 'box dimension mismatch. '\n self.shape = low.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n self.low = np.full(self.shape, low)\n self.high = np.full(self.shape, high)\n\n def _get_precision(dtype):\n if np.issubdtype(dtype, np.floating):\n return np.finfo(dtype).precision\n else:\n return np.inf\n low_precision = _get_precision(self.low.dtype)\n high_precision = _get_precision(self.high.dtype)\n dtype_precision = _get_precision(self.dtype)\n if min(low_precision, high_precision) > dtype_precision:\n logger.warn(\"Box bound precision lowered by casting to {}\".format(self.dtype))\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box. \n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n \n * [a, b] : uniform distribution \n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n \n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n \n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]\n \n sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n high=high[bounded],\n size=bounded[bounded].shape)\n if self.dtype.kind == 'i':\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n \n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n", "path": "gym/spaces/box.py"}], "after_files": [{"content": "import numpy as np\n\nfrom .space import Space\nfrom gym import logger\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n\n There are two common use cases:\n\n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n\n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n if shape is None:\n assert low.shape == high.shape, 'box dimension mismatch. '\n self.shape = low.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n self.low = np.full(self.shape, low, dtype=dtype)\n self.high = np.full(self.shape, high, dtype=dtype)\n\n def _get_precision(dtype):\n if np.issubdtype(dtype, np.floating):\n return np.finfo(dtype).precision\n else:\n return np.inf\n low_precision = _get_precision(self.low.dtype)\n high_precision = _get_precision(self.high.dtype)\n dtype_precision = _get_precision(self.dtype)\n if min(low_precision, high_precision) > dtype_precision:\n logger.warn(\"Box bound precision lowered by casting to {}\".format(self.dtype))\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box.\n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n\n * [a, b] : uniform distribution\n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n\n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n\n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) + self.high[upp_bounded]\n\n sample[bounded] = self.np_random.uniform(low=self.low[bounded],\n high=high[bounded],\n size=bounded[bounded].shape)\n if self.dtype.kind == 'i':\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n\n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n", "path": "gym/spaces/box.py"}]}
| 1,765 | 769 |
gh_patches_debug_23301
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-2266
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
matplotlib.use('Agg') conflicts with user code
In this line in `chainer/chainer/training/extensions/plot_report.py` the `matplotlib` backend is changed [Source](https://github.com/pfnet/chainer/blob/master/chainer/training/extensions/plot_report.py#L16):
matplotlib.use('Agg')
Unfortunately, this can interfere with users code. For example, when the user sets the backend himself anywhere, it is not known, whether his setting or the Chainer settings wins (is imported first).
The `plot_report` gets imported, when `extensions` is imported. For now, I just removed from the corresponding `__init__` file locally, which is definitely not a clean solution.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/training/extensions/plot_report.py`
Content:
```
1 import json
2 from os import path
3 import warnings
4
5 import numpy
6 import six
7
8 from chainer import reporter
9 import chainer.serializer as serializer_module
10 from chainer.training import extension
11 import chainer.training.trigger as trigger_module
12
13 try:
14 import matplotlib
15
16 matplotlib.use('Agg')
17 from matplotlib import pyplot as plot
18
19 _available = True
20
21 except ImportError:
22 _available = False
23
24
25 def _check_available():
26 if not _available:
27 warnings.warn('matplotlib is not installed on your environment, '
28 'so nothing will be plotted at this time. '
29 'Please install matplotlib to plot figures.\n\n'
30 ' $ pip install matplotlib\n')
31
32
33 class PlotReport(extension.Extension):
34
35 """Trainer extension to output plots.
36
37 This extension accumulates the observations of the trainer to
38 :class:`~chainer.DictSummary` at a regular interval specified by a supplied
39 trigger, and plot a graph with using them.
40
41 There are two triggers to handle this extension. One is the trigger to
42 invoke this extension, which is used to handle the timing of accumulating
43 the results. It is set to ``1, 'iteration'`` by default. The other is the
44 trigger to determine when to emit the result. When this trigger returns
45 True, this extension appends the summary of accumulated values to the list
46 of past summaries, and writes the list to the log file. Then, this
47 extension makes a new fresh summary object which is used until the next
48 time that the trigger fires.
49
50 It also adds ``'epoch'`` and ``'iteration'`` entries to each result
51 dictionary, which are the epoch and iteration counts at the output.
52
53 Args:
54 y_keys (iterable of strs): Keys of values regarded as y. If this is
55 None, nothing is output to the graph.
56 x_key (str): Keys of values regarded as x. The default value is
57 'iteration'.
58 trigger: Trigger that decides when to aggregate the result and output
59 the values. This is distinct from the trigger of this extension
60 itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,
61 'iteration'``, it is passed to :class:`IntervalTrigger`.
62 postprocess: Callback to postprocess the result dictionaries. Figure
63 object, Axes object, and all plot data are passed to this callback
64 in this order. This callback can modify the figure.
65 file_name (str): Name of the figure file under the output directory.
66 It can be a format string.
67 marker (str): The marker used to plot the graph. Default is ``'x'``. If
68 ``None`` is given, it draws with no markers.
69 grid (bool): Set the axis grid on if True. Default is True.
70
71 """
72
73 def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),
74 postprocess=None, file_name='plot.png', marker='x',
75 grid=True):
76
77 _check_available()
78
79 if not _available:
80 return
81
82 self._x_key = x_key
83 if isinstance(y_keys, str):
84 y_keys = (y_keys,)
85
86 self._y_keys = y_keys
87 self._trigger = trigger_module.get_trigger(trigger)
88 self._file_name = file_name
89 self._marker = marker
90 self._grid = grid
91 self._postprocess = postprocess
92 self._init_summary()
93 self._data = {k: [] for k in y_keys}
94
95 def __call__(self, trainer):
96 if not _available:
97 return
98
99 keys = self._y_keys
100 observation = trainer.observation
101 summary = self._summary
102
103 if keys is None:
104 summary.add(observation)
105 else:
106 summary.add({k: observation[k] for k in keys if k in observation})
107
108 if self._trigger(trainer):
109 stats = self._summary.compute_mean()
110 stats_cpu = {}
111 for name, value in six.iteritems(stats):
112 stats_cpu[name] = float(value) # copy to CPU
113
114 updater = trainer.updater
115 stats_cpu['epoch'] = updater.epoch
116 stats_cpu['iteration'] = updater.iteration
117 x = stats_cpu[self._x_key]
118 data = self._data
119
120 for k in keys:
121 if k in stats_cpu:
122 data[k].append((x, stats_cpu[k]))
123
124 f = plot.figure()
125 a = f.add_subplot(111)
126 a.set_xlabel(self._x_key)
127 if self._grid:
128 a.grid()
129
130 for k in keys:
131 xy = data[k]
132 if len(xy) == 0:
133 continue
134
135 xy = numpy.array(xy)
136 a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)
137
138 if a.has_data():
139 if self._postprocess is not None:
140 self._postprocess(f, a, summary)
141 l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
142 f.savefig(path.join(trainer.out, self._file_name),
143 bbox_extra_artists=(l,), bbox_inches='tight')
144
145 plot.close()
146 self._init_summary()
147
148 def serialize(self, serializer):
149 if isinstance(serializer, serializer_module.Serializer):
150 serializer('_plot_{}'.format(self._file_name),
151 json.dumps(self._data))
152
153 else:
154 self._data = json.loads(
155 serializer('_plot_{}'.format(self._file_name), ''))
156
157 def _init_summary(self):
158 self._summary = reporter.DictSummary()
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/training/extensions/plot_report.py b/chainer/training/extensions/plot_report.py
--- a/chainer/training/extensions/plot_report.py
+++ b/chainer/training/extensions/plot_report.py
@@ -11,9 +11,6 @@
import chainer.training.trigger as trigger_module
try:
- import matplotlib
-
- matplotlib.use('Agg')
from matplotlib import pyplot as plot
_available = True
@@ -50,6 +47,25 @@
It also adds ``'epoch'`` and ``'iteration'`` entries to each result
dictionary, which are the epoch and iteration counts at the output.
+ .. warning::
+
+ If your environment needs to specify a backend of matplotlib
+ explicitly, please call ``matplotlib.use`` before importing Chainer.
+ For example:
+
+ .. code-block:: python
+
+ import matplotlib
+ matplotlib.use('Agg')
+
+ import chainer
+
+ Then, once ``chainer.training.extensions`` is imported,
+ ``matplotlib.use`` will have no effect.
+
+ For the details, please see here:
+ http://matplotlib.org/faq/usage_faq.html#what-is-a-backend
+
Args:
y_keys (iterable of strs): Keys of values regarded as y. If this is
None, nothing is output to the graph.
|
{"golden_diff": "diff --git a/chainer/training/extensions/plot_report.py b/chainer/training/extensions/plot_report.py\n--- a/chainer/training/extensions/plot_report.py\n+++ b/chainer/training/extensions/plot_report.py\n@@ -11,9 +11,6 @@\n import chainer.training.trigger as trigger_module\n \n try:\n- import matplotlib\n-\n- matplotlib.use('Agg')\n from matplotlib import pyplot as plot\n \n _available = True\n@@ -50,6 +47,25 @@\n It also adds ``'epoch'`` and ``'iteration'`` entries to each result\n dictionary, which are the epoch and iteration counts at the output.\n \n+ .. warning::\n+\n+ If your environment needs to specify a backend of matplotlib\n+ explicitly, please call ``matplotlib.use`` before importing Chainer.\n+ For example:\n+\n+ .. code-block:: python\n+\n+ import matplotlib\n+ matplotlib.use('Agg')\n+\n+ import chainer\n+\n+ Then, once ``chainer.training.extensions`` is imported,\n+ ``matplotlib.use`` will have no effect.\n+\n+ For the details, please see here:\n+ http://matplotlib.org/faq/usage_faq.html#what-is-a-backend\n+\n Args:\n y_keys (iterable of strs): Keys of values regarded as y. If this is\n None, nothing is output to the graph.\n", "issue": "matplotlib.use('Agg') conflicts with user code\nIn this line in `chainer/chainer/training/extensions/plot_report.py` the `matplotlib` backend is changed [Source](https://github.com/pfnet/chainer/blob/master/chainer/training/extensions/plot_report.py#L16):\r\n\r\n matplotlib.use('Agg')\r\n\r\nUnfortunately, this can interfere with users code. For example, when the user sets the backend himself anywhere, it is not known, whether his setting or the Chainer settings wins (is imported first).\r\n\r\nThe `plot_report` gets imported, when `extensions` is imported. For now, I just removed from the corresponding `__init__` file locally, which is definitely not a clean solution.\n", "before_files": [{"content": "import json\nfrom os import path\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import reporter\nimport chainer.serializer as serializer_module\nfrom chainer.training import extension\nimport chainer.training.trigger as trigger_module\n\ntry:\n import matplotlib\n\n matplotlib.use('Agg')\n from matplotlib import pyplot as plot\n\n _available = True\n\nexcept ImportError:\n _available = False\n\n\ndef _check_available():\n if not _available:\n warnings.warn('matplotlib is not installed on your environment, '\n 'so nothing will be plotted at this time. '\n 'Please install matplotlib to plot figures.\\n\\n'\n ' $ pip install matplotlib\\n')\n\n\nclass PlotReport(extension.Extension):\n\n \"\"\"Trainer extension to output plots.\n\n This extension accumulates the observations of the trainer to\n :class:`~chainer.DictSummary` at a regular interval specified by a supplied\n trigger, and plot a graph with using them.\n\n There are two triggers to handle this extension. One is the trigger to\n invoke this extension, which is used to handle the timing of accumulating\n the results. It is set to ``1, 'iteration'`` by default. The other is the\n trigger to determine when to emit the result. When this trigger returns\n True, this extension appends the summary of accumulated values to the list\n of past summaries, and writes the list to the log file. Then, this\n extension makes a new fresh summary object which is used until the next\n time that the trigger fires.\n\n It also adds ``'epoch'`` and ``'iteration'`` entries to each result\n dictionary, which are the epoch and iteration counts at the output.\n\n Args:\n y_keys (iterable of strs): Keys of values regarded as y. If this is\n None, nothing is output to the graph.\n x_key (str): Keys of values regarded as x. The default value is\n 'iteration'.\n trigger: Trigger that decides when to aggregate the result and output\n the values. This is distinct from the trigger of this extension\n itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,\n 'iteration'``, it is passed to :class:`IntervalTrigger`.\n postprocess: Callback to postprocess the result dictionaries. Figure\n object, Axes object, and all plot data are passed to this callback\n in this order. This callback can modify the figure.\n file_name (str): Name of the figure file under the output directory.\n It can be a format string.\n marker (str): The marker used to plot the graph. Default is ``'x'``. If\n ``None`` is given, it draws with no markers.\n grid (bool): Set the axis grid on if True. Default is True.\n\n \"\"\"\n\n def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),\n postprocess=None, file_name='plot.png', marker='x',\n grid=True):\n\n _check_available()\n\n if not _available:\n return\n\n self._x_key = x_key\n if isinstance(y_keys, str):\n y_keys = (y_keys,)\n\n self._y_keys = y_keys\n self._trigger = trigger_module.get_trigger(trigger)\n self._file_name = file_name\n self._marker = marker\n self._grid = grid\n self._postprocess = postprocess\n self._init_summary()\n self._data = {k: [] for k in y_keys}\n\n def __call__(self, trainer):\n if not _available:\n return\n\n keys = self._y_keys\n observation = trainer.observation\n summary = self._summary\n\n if keys is None:\n summary.add(observation)\n else:\n summary.add({k: observation[k] for k in keys if k in observation})\n\n if self._trigger(trainer):\n stats = self._summary.compute_mean()\n stats_cpu = {}\n for name, value in six.iteritems(stats):\n stats_cpu[name] = float(value) # copy to CPU\n\n updater = trainer.updater\n stats_cpu['epoch'] = updater.epoch\n stats_cpu['iteration'] = updater.iteration\n x = stats_cpu[self._x_key]\n data = self._data\n\n for k in keys:\n if k in stats_cpu:\n data[k].append((x, stats_cpu[k]))\n\n f = plot.figure()\n a = f.add_subplot(111)\n a.set_xlabel(self._x_key)\n if self._grid:\n a.grid()\n\n for k in keys:\n xy = data[k]\n if len(xy) == 0:\n continue\n\n xy = numpy.array(xy)\n a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)\n\n if a.has_data():\n if self._postprocess is not None:\n self._postprocess(f, a, summary)\n l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n f.savefig(path.join(trainer.out, self._file_name),\n bbox_extra_artists=(l,), bbox_inches='tight')\n\n plot.close()\n self._init_summary()\n\n def serialize(self, serializer):\n if isinstance(serializer, serializer_module.Serializer):\n serializer('_plot_{}'.format(self._file_name),\n json.dumps(self._data))\n\n else:\n self._data = json.loads(\n serializer('_plot_{}'.format(self._file_name), ''))\n\n def _init_summary(self):\n self._summary = reporter.DictSummary()\n", "path": "chainer/training/extensions/plot_report.py"}], "after_files": [{"content": "import json\nfrom os import path\nimport warnings\n\nimport numpy\nimport six\n\nfrom chainer import reporter\nimport chainer.serializer as serializer_module\nfrom chainer.training import extension\nimport chainer.training.trigger as trigger_module\n\ntry:\n from matplotlib import pyplot as plot\n\n _available = True\n\nexcept ImportError:\n _available = False\n\n\ndef _check_available():\n if not _available:\n warnings.warn('matplotlib is not installed on your environment, '\n 'so nothing will be plotted at this time. '\n 'Please install matplotlib to plot figures.\\n\\n'\n ' $ pip install matplotlib\\n')\n\n\nclass PlotReport(extension.Extension):\n\n \"\"\"Trainer extension to output plots.\n\n This extension accumulates the observations of the trainer to\n :class:`~chainer.DictSummary` at a regular interval specified by a supplied\n trigger, and plot a graph with using them.\n\n There are two triggers to handle this extension. One is the trigger to\n invoke this extension, which is used to handle the timing of accumulating\n the results. It is set to ``1, 'iteration'`` by default. The other is the\n trigger to determine when to emit the result. When this trigger returns\n True, this extension appends the summary of accumulated values to the list\n of past summaries, and writes the list to the log file. Then, this\n extension makes a new fresh summary object which is used until the next\n time that the trigger fires.\n\n It also adds ``'epoch'`` and ``'iteration'`` entries to each result\n dictionary, which are the epoch and iteration counts at the output.\n\n .. warning::\n\n If your environment needs to specify a backend of matplotlib\n explicitly, please call ``matplotlib.use`` before importing Chainer.\n For example:\n\n .. code-block:: python\n\n import matplotlib\n matplotlib.use('Agg')\n\n import chainer\n\n Then, once ``chainer.training.extensions`` is imported,\n ``matplotlib.use`` will have no effect.\n\n For the details, please see here:\n http://matplotlib.org/faq/usage_faq.html#what-is-a-backend\n\n Args:\n y_keys (iterable of strs): Keys of values regarded as y. If this is\n None, nothing is output to the graph.\n x_key (str): Keys of values regarded as x. The default value is\n 'iteration'.\n trigger: Trigger that decides when to aggregate the result and output\n the values. This is distinct from the trigger of this extension\n itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,\n 'iteration'``, it is passed to :class:`IntervalTrigger`.\n postprocess: Callback to postprocess the result dictionaries. Figure\n object, Axes object, and all plot data are passed to this callback\n in this order. This callback can modify the figure.\n file_name (str): Name of the figure file under the output directory.\n It can be a format string.\n marker (str): The marker used to plot the graph. Default is ``'x'``. If\n ``None`` is given, it draws with no markers.\n grid (bool): Set the axis grid on if True. Default is True.\n\n \"\"\"\n\n def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),\n postprocess=None, file_name='plot.png', marker='x',\n grid=True):\n\n _check_available()\n\n if not _available:\n return\n\n self._x_key = x_key\n if isinstance(y_keys, str):\n y_keys = (y_keys,)\n\n self._y_keys = y_keys\n self._trigger = trigger_module.get_trigger(trigger)\n self._file_name = file_name\n self._marker = marker\n self._grid = grid\n self._postprocess = postprocess\n self._init_summary()\n self._data = {k: [] for k in y_keys}\n\n def __call__(self, trainer):\n if not _available:\n return\n\n keys = self._y_keys\n observation = trainer.observation\n summary = self._summary\n\n if keys is None:\n summary.add(observation)\n else:\n summary.add({k: observation[k] for k in keys if k in observation})\n\n if self._trigger(trainer):\n stats = self._summary.compute_mean()\n stats_cpu = {}\n for name, value in six.iteritems(stats):\n stats_cpu[name] = float(value) # copy to CPU\n\n updater = trainer.updater\n stats_cpu['epoch'] = updater.epoch\n stats_cpu['iteration'] = updater.iteration\n x = stats_cpu[self._x_key]\n data = self._data\n\n for k in keys:\n if k in stats_cpu:\n data[k].append((x, stats_cpu[k]))\n\n f = plot.figure()\n a = f.add_subplot(111)\n a.set_xlabel(self._x_key)\n if self._grid:\n a.grid()\n\n for k in keys:\n xy = data[k]\n if len(xy) == 0:\n continue\n\n xy = numpy.array(xy)\n a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)\n\n if a.has_data():\n if self._postprocess is not None:\n self._postprocess(f, a, summary)\n l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n f.savefig(path.join(trainer.out, self._file_name),\n bbox_extra_artists=(l,), bbox_inches='tight')\n\n plot.close()\n self._init_summary()\n\n def serialize(self, serializer):\n if isinstance(serializer, serializer_module.Serializer):\n serializer('_plot_{}'.format(self._file_name),\n json.dumps(self._data))\n\n else:\n self._data = json.loads(\n serializer('_plot_{}'.format(self._file_name), ''))\n\n def _init_summary(self):\n self._summary = reporter.DictSummary()\n", "path": "chainer/training/extensions/plot_report.py"}]}
| 2,014 | 306 |
gh_patches_debug_30451
|
rasdani/github-patches
|
git_diff
|
bids-standard__pybids-447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update prep_zenodo.py to only count commits in grabbit up to 0.2.6
With #369, we're dropping the grabbit dependency, so changes there will no longer contribute to pybids.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/prep_zenodo.py`
Content:
```
1 #!/usr/bin/env python3
2 import git
3 import json
4 from subprocess import run, PIPE, CalledProcessError
5 from pathlib import Path
6 from tempfile import TemporaryDirectory
7
8
9 def decommify(name):
10 return ' '.join(name.split(', ')[::-1])
11
12
13 # List of repositories whose commits should be counted as contributions
14 codependents = ['https://github.com/grabbles/grabbit.git']
15
16 # Last shablona commit
17 origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'
18
19 git_root = Path(git.Repo('.', search_parent_directories=True).working_dir)
20 zenodo_file = git_root / '.zenodo.json'
21
22 zenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}
23
24 orig_creators = zenodo.get('creators', [])
25 creator_map = {decommify(creator['name']): creator
26 for creator in orig_creators}
27
28 shortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)
29 counts = [line.split('\t', 1)[::-1]
30 for line in shortlog.stdout.decode().split('\n') if line]
31
32 # Get additional commit counts from dependencies
33 with TemporaryDirectory() as tmpdir:
34 tmppath = Path(tmpdir)
35 for repo in codependents:
36 repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])
37 try:
38 clone = run(['git', 'clone', repo, repo_dir], check=True)
39 except CalledProcessError as err:
40 raise RuntimeError("Could not clone {}".format(repo)) from err
41 tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)
42 latest_tag = tag.stdout.decode().strip().rsplit('\n', 1)[1]
43 dep_shortlog = run(
44 ['git', '-C', repo_dir, 'shortlog', '-ns', latest_tag],
45 stdout=PIPE)
46 counts.extend(line.split('\t', 1)[::-1]
47 for line in dep_shortlog.stdout.decode().split('\n')
48 if line)
49
50 commit_counts = {}
51 for committer, commits in counts:
52 commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)
53
54 # Stable sort:
55 # Number of commits in reverse order
56 # Ties broken by alphabetical order of first name
57 committers = [committer
58 for committer, _ in sorted(commit_counts.items(),
59 key=lambda x: (-x[1], x[0]))]
60
61 # Tal to the top
62 first_author = 'Tal Yarkoni'
63 if committers[0] != first_author:
64 committers.remove(first_author)
65 committers.insert(0, first_author)
66
67 creators = [
68 creator_map.get(committer, {'name': committer})
69 for committer in committers
70 ]
71
72 zenodo['creators'] = creators
73 zenodo_file.write_text(json.dumps(zenodo, indent=2, sort_keys=True) + '\n')
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py
--- a/tools/prep_zenodo.py
+++ b/tools/prep_zenodo.py
@@ -11,7 +11,7 @@
# List of repositories whose commits should be counted as contributions
-codependents = ['https://github.com/grabbles/grabbit.git']
+codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]
# Last shablona commit
origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'
@@ -33,15 +33,23 @@
with TemporaryDirectory() as tmpdir:
tmppath = Path(tmpdir)
for repo in codependents:
+ try:
+ repo, ref = repo
+ except (TypeError, ValueError):
+ ref = None
repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])
try:
- clone = run(['git', 'clone', repo, repo_dir], check=True)
+ clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)
except CalledProcessError as err:
raise RuntimeError("Could not clone {}".format(repo)) from err
- tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)
- latest_tag = tag.stdout.decode().strip().rsplit('\n', 1)[1]
+
+ if ref is None:
+ tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)
+ # latest tag
+ ref = tag.stdout.decode().strip().rsplit('\n', 1)[1]
+
dep_shortlog = run(
- ['git', '-C', repo_dir, 'shortlog', '-ns', latest_tag],
+ ['git', '-C', repo_dir, 'shortlog', '-ns', ref],
stdout=PIPE)
counts.extend(line.split('\t', 1)[::-1]
for line in dep_shortlog.stdout.decode().split('\n')
|
{"golden_diff": "diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py\n--- a/tools/prep_zenodo.py\n+++ b/tools/prep_zenodo.py\n@@ -11,7 +11,7 @@\n \n \n # List of repositories whose commits should be counted as contributions\n-codependents = ['https://github.com/grabbles/grabbit.git']\n+codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n \n # Last shablona commit\n origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n@@ -33,15 +33,23 @@\n with TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n+ try:\n+ repo, ref = repo\n+ except (TypeError, ValueError):\n+ ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n- clone = run(['git', 'clone', repo, repo_dir], check=True)\n+ clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n- tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n- latest_tag = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n+\n+ if ref is None:\n+ tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n+ # latest tag\n+ ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n+\n dep_shortlog = run(\n- ['git', '-C', repo_dir, 'shortlog', '-ns', latest_tag],\n+ ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n", "issue": "Update prep_zenodo.py to only count commits in grabbit up to 0.2.6\nWith #369, we're dropping the grabbit dependency, so changes there will no longer contribute to pybids.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n\n# List of repositories whose commits should be counted as contributions\ncodependents = ['https://github.com/grabbles/grabbit.git']\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n latest_tag = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', latest_tag],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2, sort_keys=True) + '\\n')\n", "path": "tools/prep_zenodo.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n\n# List of repositories whose commits should be counted as contributions\ncodependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n try:\n repo, ref = repo\n except (TypeError, ValueError):\n ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n\n if ref is None:\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n # latest tag\n ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2, sort_keys=True) + '\\n')\n", "path": "tools/prep_zenodo.py"}]}
| 1,124 | 484 |
gh_patches_debug_33722
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-1225
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
redis-py-cluster new API 2.0.0
### Which version of dd-trace-py are you using?
lastest 0.34.0
### Which version of the libraries are you using?
redis-py-cluster 2.0.0
### How can we reproduce your problem?
change https://github.com/DataDog/dd-trace-py/blob/master/tox.ini redis-py-cluster version
### What is the result that you get?
ERROR. It's no more StrictRedisCluster but just RedisCluster
### What is result that you expected?
moving to new api
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/rediscluster/patch.py`
Content:
```
1 # 3p
2 import rediscluster
3 from ddtrace.vendor import wrapt
4
5 # project
6 from ddtrace import config
7 from ...constants import ANALYTICS_SAMPLE_RATE_KEY, SPAN_MEASURED_KEY
8 from ...pin import Pin
9 from ...ext import SpanTypes, redis as redisx
10 from ...utils.wrappers import unwrap
11 from ..redis.patch import traced_execute_command, traced_pipeline
12 from ..redis.util import format_command_args
13
14
15 def patch():
16 """Patch the instrumented methods
17 """
18 if getattr(rediscluster, '_datadog_patch', False):
19 return
20 setattr(rediscluster, '_datadog_patch', True)
21
22 _w = wrapt.wrap_function_wrapper
23 _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)
24 _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)
25 _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)
26 Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)
27
28
29 def unpatch():
30 if getattr(rediscluster, '_datadog_patch', False):
31 setattr(rediscluster, '_datadog_patch', False)
32 unwrap(rediscluster.StrictRedisCluster, 'execute_command')
33 unwrap(rediscluster.StrictRedisCluster, 'pipeline')
34 unwrap(rediscluster.StrictClusterPipeline, 'execute')
35
36
37 #
38 # tracing functions
39 #
40
41 def traced_execute_pipeline(func, instance, args, kwargs):
42 pin = Pin.get_from(instance)
43 if not pin or not pin.enabled():
44 return func(*args, **kwargs)
45
46 cmds = [format_command_args(c.args) for c in instance.command_stack]
47 resource = '\n'.join(cmds)
48 tracer = pin.tracer
49 with tracer.trace(redisx.CMD, resource=resource, service=pin.service, span_type=SpanTypes.REDIS) as s:
50 s.set_tag(SPAN_MEASURED_KEY)
51 s.set_tag(redisx.RAWCMD, resource)
52 s.set_metric(redisx.PIPELINE_LEN, len(instance.command_stack))
53
54 # set analytics sample rate if enabled
55 s.set_tag(
56 ANALYTICS_SAMPLE_RATE_KEY,
57 config.rediscluster.get_analytics_sample_rate()
58 )
59
60 return func(*args, **kwargs)
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/rediscluster/patch.py b/ddtrace/contrib/rediscluster/patch.py
--- a/ddtrace/contrib/rediscluster/patch.py
+++ b/ddtrace/contrib/rediscluster/patch.py
@@ -12,6 +12,11 @@
from ..redis.util import format_command_args
+# DEV: In `2.0.0` `__version__` is a string and `VERSION` is a tuple,
+# but in `1.x.x` `__version__` is a tuple annd `VERSION` does not exist
+REDISCLUSTER_VERSION = getattr(rediscluster, 'VERSION', rediscluster.__version__)
+
+
def patch():
"""Patch the instrumented methods
"""
@@ -20,18 +25,30 @@
setattr(rediscluster, '_datadog_patch', True)
_w = wrapt.wrap_function_wrapper
- _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)
- _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)
- _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)
- Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)
+ if REDISCLUSTER_VERSION >= (2, 0, 0):
+ _w('rediscluster', 'RedisCluster.execute_command', traced_execute_command)
+ _w('rediscluster', 'RedisCluster.pipeline', traced_pipeline)
+ _w('rediscluster', 'ClusterPipeline.execute', traced_execute_pipeline)
+ Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.RedisCluster)
+ else:
+ _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)
+ _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)
+ _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)
+ Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)
def unpatch():
if getattr(rediscluster, '_datadog_patch', False):
setattr(rediscluster, '_datadog_patch', False)
- unwrap(rediscluster.StrictRedisCluster, 'execute_command')
- unwrap(rediscluster.StrictRedisCluster, 'pipeline')
- unwrap(rediscluster.StrictClusterPipeline, 'execute')
+
+ if REDISCLUSTER_VERSION >= (2, 0, 0):
+ unwrap(rediscluster.RedisCluster, 'execute_command')
+ unwrap(rediscluster.RedisCluster, 'pipeline')
+ unwrap(rediscluster.ClusterPipeline, 'execute')
+ else:
+ unwrap(rediscluster.StrictRedisCluster, 'execute_command')
+ unwrap(rediscluster.StrictRedisCluster, 'pipeline')
+ unwrap(rediscluster.StrictClusterPipeline, 'execute')
#
|
{"golden_diff": "diff --git a/ddtrace/contrib/rediscluster/patch.py b/ddtrace/contrib/rediscluster/patch.py\n--- a/ddtrace/contrib/rediscluster/patch.py\n+++ b/ddtrace/contrib/rediscluster/patch.py\n@@ -12,6 +12,11 @@\n from ..redis.util import format_command_args\n \n \n+# DEV: In `2.0.0` `__version__` is a string and `VERSION` is a tuple,\n+# but in `1.x.x` `__version__` is a tuple annd `VERSION` does not exist\n+REDISCLUSTER_VERSION = getattr(rediscluster, 'VERSION', rediscluster.__version__)\n+\n+\n def patch():\n \"\"\"Patch the instrumented methods\n \"\"\"\n@@ -20,18 +25,30 @@\n setattr(rediscluster, '_datadog_patch', True)\n \n _w = wrapt.wrap_function_wrapper\n- _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)\n- _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)\n- _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)\n- Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)\n+ if REDISCLUSTER_VERSION >= (2, 0, 0):\n+ _w('rediscluster', 'RedisCluster.execute_command', traced_execute_command)\n+ _w('rediscluster', 'RedisCluster.pipeline', traced_pipeline)\n+ _w('rediscluster', 'ClusterPipeline.execute', traced_execute_pipeline)\n+ Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.RedisCluster)\n+ else:\n+ _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)\n+ _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)\n+ _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)\n+ Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)\n \n \n def unpatch():\n if getattr(rediscluster, '_datadog_patch', False):\n setattr(rediscluster, '_datadog_patch', False)\n- unwrap(rediscluster.StrictRedisCluster, 'execute_command')\n- unwrap(rediscluster.StrictRedisCluster, 'pipeline')\n- unwrap(rediscluster.StrictClusterPipeline, 'execute')\n+\n+ if REDISCLUSTER_VERSION >= (2, 0, 0):\n+ unwrap(rediscluster.RedisCluster, 'execute_command')\n+ unwrap(rediscluster.RedisCluster, 'pipeline')\n+ unwrap(rediscluster.ClusterPipeline, 'execute')\n+ else:\n+ unwrap(rediscluster.StrictRedisCluster, 'execute_command')\n+ unwrap(rediscluster.StrictRedisCluster, 'pipeline')\n+ unwrap(rediscluster.StrictClusterPipeline, 'execute')\n \n \n #\n", "issue": "redis-py-cluster new API 2.0.0\n### Which version of dd-trace-py are you using?\r\n lastest 0.34.0\r\n\r\n### Which version of the libraries are you using?\r\n\r\nredis-py-cluster 2.0.0\r\n\r\n### How can we reproduce your problem?\r\n\r\nchange https://github.com/DataDog/dd-trace-py/blob/master/tox.ini redis-py-cluster version\r\n\r\n### What is the result that you get?\r\n\r\nERROR. It's no more StrictRedisCluster but just RedisCluster\r\n\r\n### What is result that you expected?\r\n\r\nmoving to new api\r\n\n", "before_files": [{"content": "# 3p\nimport rediscluster\nfrom ddtrace.vendor import wrapt\n\n# project\nfrom ddtrace import config\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY, SPAN_MEASURED_KEY\nfrom ...pin import Pin\nfrom ...ext import SpanTypes, redis as redisx\nfrom ...utils.wrappers import unwrap\nfrom ..redis.patch import traced_execute_command, traced_pipeline\nfrom ..redis.util import format_command_args\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n \"\"\"\n if getattr(rediscluster, '_datadog_patch', False):\n return\n setattr(rediscluster, '_datadog_patch', True)\n\n _w = wrapt.wrap_function_wrapper\n _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)\n _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)\n _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)\n Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)\n\n\ndef unpatch():\n if getattr(rediscluster, '_datadog_patch', False):\n setattr(rediscluster, '_datadog_patch', False)\n unwrap(rediscluster.StrictRedisCluster, 'execute_command')\n unwrap(rediscluster.StrictRedisCluster, 'pipeline')\n unwrap(rediscluster.StrictClusterPipeline, 'execute')\n\n\n#\n# tracing functions\n#\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [format_command_args(c.args) for c in instance.command_stack]\n resource = '\\n'.join(cmds)\n tracer = pin.tracer\n with tracer.trace(redisx.CMD, resource=resource, service=pin.service, span_type=SpanTypes.REDIS) as s:\n s.set_tag(SPAN_MEASURED_KEY)\n s.set_tag(redisx.RAWCMD, resource)\n s.set_metric(redisx.PIPELINE_LEN, len(instance.command_stack))\n\n # set analytics sample rate if enabled\n s.set_tag(\n ANALYTICS_SAMPLE_RATE_KEY,\n config.rediscluster.get_analytics_sample_rate()\n )\n\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/rediscluster/patch.py"}], "after_files": [{"content": "# 3p\nimport rediscluster\nfrom ddtrace.vendor import wrapt\n\n# project\nfrom ddtrace import config\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY, SPAN_MEASURED_KEY\nfrom ...pin import Pin\nfrom ...ext import SpanTypes, redis as redisx\nfrom ...utils.wrappers import unwrap\nfrom ..redis.patch import traced_execute_command, traced_pipeline\nfrom ..redis.util import format_command_args\n\n\n# DEV: In `2.0.0` `__version__` is a string and `VERSION` is a tuple,\n# but in `1.x.x` `__version__` is a tuple annd `VERSION` does not exist\nREDISCLUSTER_VERSION = getattr(rediscluster, 'VERSION', rediscluster.__version__)\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n \"\"\"\n if getattr(rediscluster, '_datadog_patch', False):\n return\n setattr(rediscluster, '_datadog_patch', True)\n\n _w = wrapt.wrap_function_wrapper\n if REDISCLUSTER_VERSION >= (2, 0, 0):\n _w('rediscluster', 'RedisCluster.execute_command', traced_execute_command)\n _w('rediscluster', 'RedisCluster.pipeline', traced_pipeline)\n _w('rediscluster', 'ClusterPipeline.execute', traced_execute_pipeline)\n Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.RedisCluster)\n else:\n _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command)\n _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline)\n _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline)\n Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster)\n\n\ndef unpatch():\n if getattr(rediscluster, '_datadog_patch', False):\n setattr(rediscluster, '_datadog_patch', False)\n\n if REDISCLUSTER_VERSION >= (2, 0, 0):\n unwrap(rediscluster.RedisCluster, 'execute_command')\n unwrap(rediscluster.RedisCluster, 'pipeline')\n unwrap(rediscluster.ClusterPipeline, 'execute')\n else:\n unwrap(rediscluster.StrictRedisCluster, 'execute_command')\n unwrap(rediscluster.StrictRedisCluster, 'pipeline')\n unwrap(rediscluster.StrictClusterPipeline, 'execute')\n\n\n#\n# tracing functions\n#\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [format_command_args(c.args) for c in instance.command_stack]\n resource = '\\n'.join(cmds)\n tracer = pin.tracer\n with tracer.trace(redisx.CMD, resource=resource, service=pin.service, span_type=SpanTypes.REDIS) as s:\n s.set_tag(SPAN_MEASURED_KEY)\n s.set_tag(redisx.RAWCMD, resource)\n s.set_metric(redisx.PIPELINE_LEN, len(instance.command_stack))\n\n # set analytics sample rate if enabled\n s.set_tag(\n ANALYTICS_SAMPLE_RATE_KEY,\n config.rediscluster.get_analytics_sample_rate()\n )\n\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/rediscluster/patch.py"}]}
| 990 | 613 |
gh_patches_debug_34609
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-285
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for Message Reactions to ActivityHandler
ActivityHandler should be extended to include MessageReactions. This has now been added to the C# and The JavaScript.
Here is a pointer to the JavaScript implementation:
https://github.com/microsoft/botbuilder-js/pull/1038
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-core/botbuilder/core/activity_handler.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from botbuilder.schema import ActivityTypes, ChannelAccount
5 from .turn_context import TurnContext
6
7
8 class ActivityHandler:
9 async def on_turn(self, turn_context: TurnContext):
10 if turn_context is None:
11 raise TypeError("ActivityHandler.on_turn(): turn_context cannot be None.")
12
13 if hasattr(turn_context, "activity") and turn_context.activity is None:
14 raise TypeError(
15 "ActivityHandler.on_turn(): turn_context must have a non-None activity."
16 )
17
18 if (
19 hasattr(turn_context.activity, "type")
20 and turn_context.activity.type is None
21 ):
22 raise TypeError(
23 "ActivityHandler.on_turn(): turn_context activity must have a non-None type."
24 )
25
26 if turn_context.activity.type == ActivityTypes.message:
27 await self.on_message_activity(turn_context)
28 elif turn_context.activity.type == ActivityTypes.conversation_update:
29 await self.on_conversation_update_activity(turn_context)
30 elif turn_context.activity.type == ActivityTypes.event:
31 await self.on_event_activity(turn_context)
32 else:
33 await self.on_unrecognized_activity_type(turn_context)
34
35 async def on_message_activity( # pylint: disable=unused-argument
36 self, turn_context: TurnContext
37 ):
38 return
39
40 async def on_conversation_update_activity(self, turn_context: TurnContext):
41 if (
42 turn_context.activity.members_added is not None
43 and turn_context.activity.members_added
44 ):
45 return await self.on_members_added_activity(
46 turn_context.activity.members_added, turn_context
47 )
48 if (
49 turn_context.activity.members_removed is not None
50 and turn_context.activity.members_removed
51 ):
52 return await self.on_members_removed_activity(
53 turn_context.activity.members_removed, turn_context
54 )
55 return
56
57 async def on_members_added_activity(
58 self, members_added: ChannelAccount, turn_context: TurnContext
59 ): # pylint: disable=unused-argument
60 return
61
62 async def on_members_removed_activity(
63 self, members_removed: ChannelAccount, turn_context: TurnContext
64 ): # pylint: disable=unused-argument
65 return
66
67 async def on_event_activity(self, turn_context: TurnContext):
68 if turn_context.activity.name == "tokens/response":
69 return await self.on_token_response_event(turn_context)
70
71 return await self.on_event(turn_context)
72
73 async def on_token_response_event( # pylint: disable=unused-argument
74 self, turn_context: TurnContext
75 ):
76 return
77
78 async def on_event( # pylint: disable=unused-argument
79 self, turn_context: TurnContext
80 ):
81 return
82
83 async def on_unrecognized_activity_type( # pylint: disable=unused-argument
84 self, turn_context: TurnContext
85 ):
86 return
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libraries/botbuilder-core/botbuilder/core/activity_handler.py b/libraries/botbuilder-core/botbuilder/core/activity_handler.py
--- a/libraries/botbuilder-core/botbuilder/core/activity_handler.py
+++ b/libraries/botbuilder-core/botbuilder/core/activity_handler.py
@@ -1,7 +1,8 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
+from typing import List
-from botbuilder.schema import ActivityTypes, ChannelAccount
+from botbuilder.schema import ActivityTypes, ChannelAccount, MessageReaction
from .turn_context import TurnContext
@@ -27,6 +28,8 @@
await self.on_message_activity(turn_context)
elif turn_context.activity.type == ActivityTypes.conversation_update:
await self.on_conversation_update_activity(turn_context)
+ elif turn_context.activity.type == ActivityTypes.message_reaction:
+ await self.on_message_reaction_activity(turn_context)
elif turn_context.activity.type == ActivityTypes.event:
await self.on_event_activity(turn_context)
else:
@@ -64,6 +67,27 @@
): # pylint: disable=unused-argument
return
+ async def on_message_reaction_activity(self, turn_context: TurnContext):
+ if turn_context.activity.reactions_added is not None:
+ await self.on_reactions_added(
+ turn_context.activity.reactions_added, turn_context
+ )
+
+ if turn_context.activity.reactions_removed is not None:
+ await self.on_reactions_removed(
+ turn_context.activity.reactions_removed, turn_context
+ )
+
+ async def on_reactions_added( # pylint: disable=unused-argument
+ self, message_reactions: List[MessageReaction], turn_context: TurnContext
+ ):
+ return
+
+ async def on_reactions_removed( # pylint: disable=unused-argument
+ self, message_reactions: List[MessageReaction], turn_context: TurnContext
+ ):
+ return
+
async def on_event_activity(self, turn_context: TurnContext):
if turn_context.activity.name == "tokens/response":
return await self.on_token_response_event(turn_context)
|
{"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/activity_handler.py b/libraries/botbuilder-core/botbuilder/core/activity_handler.py\n--- a/libraries/botbuilder-core/botbuilder/core/activity_handler.py\n+++ b/libraries/botbuilder-core/botbuilder/core/activity_handler.py\n@@ -1,7 +1,8 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n+from typing import List\n \n-from botbuilder.schema import ActivityTypes, ChannelAccount\n+from botbuilder.schema import ActivityTypes, ChannelAccount, MessageReaction\n from .turn_context import TurnContext\n \n \n@@ -27,6 +28,8 @@\n await self.on_message_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.conversation_update:\n await self.on_conversation_update_activity(turn_context)\n+ elif turn_context.activity.type == ActivityTypes.message_reaction:\n+ await self.on_message_reaction_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.event:\n await self.on_event_activity(turn_context)\n else:\n@@ -64,6 +67,27 @@\n ): # pylint: disable=unused-argument\n return\n \n+ async def on_message_reaction_activity(self, turn_context: TurnContext):\n+ if turn_context.activity.reactions_added is not None:\n+ await self.on_reactions_added(\n+ turn_context.activity.reactions_added, turn_context\n+ )\n+\n+ if turn_context.activity.reactions_removed is not None:\n+ await self.on_reactions_removed(\n+ turn_context.activity.reactions_removed, turn_context\n+ )\n+\n+ async def on_reactions_added( # pylint: disable=unused-argument\n+ self, message_reactions: List[MessageReaction], turn_context: TurnContext\n+ ):\n+ return\n+\n+ async def on_reactions_removed( # pylint: disable=unused-argument\n+ self, message_reactions: List[MessageReaction], turn_context: TurnContext\n+ ):\n+ return\n+\n async def on_event_activity(self, turn_context: TurnContext):\n if turn_context.activity.name == \"tokens/response\":\n return await self.on_token_response_event(turn_context)\n", "issue": "Add support for Message Reactions to ActivityHandler \nActivityHandler should be extended to include MessageReactions. This has now been added to the C# and The JavaScript.\r\n\r\nHere is a pointer to the JavaScript implementation:\r\n\r\nhttps://github.com/microsoft/botbuilder-js/pull/1038\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom botbuilder.schema import ActivityTypes, ChannelAccount\nfrom .turn_context import TurnContext\n\n\nclass ActivityHandler:\n async def on_turn(self, turn_context: TurnContext):\n if turn_context is None:\n raise TypeError(\"ActivityHandler.on_turn(): turn_context cannot be None.\")\n\n if hasattr(turn_context, \"activity\") and turn_context.activity is None:\n raise TypeError(\n \"ActivityHandler.on_turn(): turn_context must have a non-None activity.\"\n )\n\n if (\n hasattr(turn_context.activity, \"type\")\n and turn_context.activity.type is None\n ):\n raise TypeError(\n \"ActivityHandler.on_turn(): turn_context activity must have a non-None type.\"\n )\n\n if turn_context.activity.type == ActivityTypes.message:\n await self.on_message_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.conversation_update:\n await self.on_conversation_update_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.event:\n await self.on_event_activity(turn_context)\n else:\n await self.on_unrecognized_activity_type(turn_context)\n\n async def on_message_activity( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_conversation_update_activity(self, turn_context: TurnContext):\n if (\n turn_context.activity.members_added is not None\n and turn_context.activity.members_added\n ):\n return await self.on_members_added_activity(\n turn_context.activity.members_added, turn_context\n )\n if (\n turn_context.activity.members_removed is not None\n and turn_context.activity.members_removed\n ):\n return await self.on_members_removed_activity(\n turn_context.activity.members_removed, turn_context\n )\n return\n\n async def on_members_added_activity(\n self, members_added: ChannelAccount, turn_context: TurnContext\n ): # pylint: disable=unused-argument\n return\n\n async def on_members_removed_activity(\n self, members_removed: ChannelAccount, turn_context: TurnContext\n ): # pylint: disable=unused-argument\n return\n\n async def on_event_activity(self, turn_context: TurnContext):\n if turn_context.activity.name == \"tokens/response\":\n return await self.on_token_response_event(turn_context)\n\n return await self.on_event(turn_context)\n\n async def on_token_response_event( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_event( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_unrecognized_activity_type( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n", "path": "libraries/botbuilder-core/botbuilder/core/activity_handler.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom typing import List\n\nfrom botbuilder.schema import ActivityTypes, ChannelAccount, MessageReaction\nfrom .turn_context import TurnContext\n\n\nclass ActivityHandler:\n async def on_turn(self, turn_context: TurnContext):\n if turn_context is None:\n raise TypeError(\"ActivityHandler.on_turn(): turn_context cannot be None.\")\n\n if hasattr(turn_context, \"activity\") and turn_context.activity is None:\n raise TypeError(\n \"ActivityHandler.on_turn(): turn_context must have a non-None activity.\"\n )\n\n if (\n hasattr(turn_context.activity, \"type\")\n and turn_context.activity.type is None\n ):\n raise TypeError(\n \"ActivityHandler.on_turn(): turn_context activity must have a non-None type.\"\n )\n\n if turn_context.activity.type == ActivityTypes.message:\n await self.on_message_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.conversation_update:\n await self.on_conversation_update_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.message_reaction:\n await self.on_message_reaction_activity(turn_context)\n elif turn_context.activity.type == ActivityTypes.event:\n await self.on_event_activity(turn_context)\n else:\n await self.on_unrecognized_activity_type(turn_context)\n\n async def on_message_activity( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_conversation_update_activity(self, turn_context: TurnContext):\n if (\n turn_context.activity.members_added is not None\n and turn_context.activity.members_added\n ):\n return await self.on_members_added_activity(\n turn_context.activity.members_added, turn_context\n )\n if (\n turn_context.activity.members_removed is not None\n and turn_context.activity.members_removed\n ):\n return await self.on_members_removed_activity(\n turn_context.activity.members_removed, turn_context\n )\n return\n\n async def on_members_added_activity(\n self, members_added: ChannelAccount, turn_context: TurnContext\n ): # pylint: disable=unused-argument\n return\n\n async def on_members_removed_activity(\n self, members_removed: ChannelAccount, turn_context: TurnContext\n ): # pylint: disable=unused-argument\n return\n\n async def on_message_reaction_activity(self, turn_context: TurnContext):\n if turn_context.activity.reactions_added is not None:\n await self.on_reactions_added(\n turn_context.activity.reactions_added, turn_context\n )\n\n if turn_context.activity.reactions_removed is not None:\n await self.on_reactions_removed(\n turn_context.activity.reactions_removed, turn_context\n )\n\n async def on_reactions_added( # pylint: disable=unused-argument\n self, message_reactions: List[MessageReaction], turn_context: TurnContext\n ):\n return\n\n async def on_reactions_removed( # pylint: disable=unused-argument\n self, message_reactions: List[MessageReaction], turn_context: TurnContext\n ):\n return\n\n async def on_event_activity(self, turn_context: TurnContext):\n if turn_context.activity.name == \"tokens/response\":\n return await self.on_token_response_event(turn_context)\n\n return await self.on_event(turn_context)\n\n async def on_token_response_event( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_event( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n\n async def on_unrecognized_activity_type( # pylint: disable=unused-argument\n self, turn_context: TurnContext\n ):\n return\n", "path": "libraries/botbuilder-core/botbuilder/core/activity_handler.py"}]}
| 1,103 | 474 |
gh_patches_debug_4811
|
rasdani/github-patches
|
git_diff
|
pytorch__text-254
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use getattr rather than __dict__ in Batch (adds support for __slots__ in Example subclasses)
This is a proposal to change [one line of code](https://github.com/pytorch/text/blob/c839a7934930819be7e240ea972e4d600966afdc/torchtext/data/batch.py#L27) in Batch.py
I suggest `[x.__dict__[name] for x in data]` should become `[getattr(x, name) for x in data]`
A major advantage to doing this is compatibility with `__slots__`. A class that is going to be instantiated for every data point is an ideal use-case for `__slots__`, which reduces per-instance memory overhead. It makes sense for specific projects to subclass Example using `__slots__` with the known fields of the project. If you do, the instances will have empty `__dicts__` but the slots can be accessed via `getattr`.
I don't _think_ this change would break anything...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/batch.py`
Content:
```
1 from torch import typename
2 from torch.tensor import _TensorBase
3
4
5 class Batch(object):
6 """Defines a batch of examples along with its Fields.
7
8 Attributes:
9 batch_size: Number of examples in the batch.
10 dataset: A reference to the dataset object the examples come from
11 (which itself contains the dataset's Field objects).
12 train: Whether the batch is from a training set.
13
14 Also stores the Variable for each column in the batch as an attribute.
15 """
16
17 def __init__(self, data=None, dataset=None, device=None, train=True):
18 """Create a Batch from a list of examples."""
19 if data is not None:
20 self.batch_size = len(data)
21 self.dataset = dataset
22 self.train = train
23 self.fields = dataset.fields.keys() # copy field names
24
25 for (name, field) in dataset.fields.items():
26 if field is not None:
27 batch = [x.__dict__[name] for x in data]
28 setattr(self, name, field.process(batch, device=device, train=train))
29
30 @classmethod
31 def fromvars(cls, dataset, batch_size, train=True, **kwargs):
32 """Create a Batch directly from a number of Variables."""
33 batch = cls()
34 batch.batch_size = batch_size
35 batch.dataset = dataset
36 batch.train = train
37 for k, v in kwargs.items():
38 setattr(batch, k, v)
39 return batch
40
41 def __repr__(self):
42 return str(self)
43
44 def __str__(self):
45 if not self.__dict__:
46 return 'Empty {} instance'.format(typename(self))
47
48 var_strs = '\n'.join(['\t[.' + name + ']' + ":" + _short_str(getattr(self, name))
49 for name in self.fields if hasattr(self, name)])
50
51 data_str = (' from {}'.format(self.dataset.name.upper())
52 if hasattr(self.dataset, 'name') and
53 isinstance(self.dataset.name, str) else '')
54
55 strt = '[{} of size {}{}]\n{}'.format(typename(self),
56 self.batch_size, data_str, var_strs)
57 return '\n' + strt
58
59 def __len__(self):
60 return self.batch_size
61
62
63 def _short_str(tensor):
64 # unwrap variable to tensor
65 if hasattr(tensor, 'data'):
66 tensor = tensor.data
67
68 # fallback in case of wrong argument type
69 if issubclass(type(tensor), _TensorBase) is False:
70 return str(tensor)
71
72 # copied from torch _tensor_str
73 size_str = 'x'.join(str(size) for size in tensor.size())
74 device_str = '' if not tensor.is_cuda else \
75 ' (GPU {})'.format(tensor.get_device())
76 strt = '[{} of size {}{}]'.format(typename(tensor),
77 size_str, device_str)
78 return strt
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchtext/data/batch.py b/torchtext/data/batch.py
--- a/torchtext/data/batch.py
+++ b/torchtext/data/batch.py
@@ -24,7 +24,7 @@
for (name, field) in dataset.fields.items():
if field is not None:
- batch = [x.__dict__[name] for x in data]
+ batch = [getattr(x, name) for x in data]
setattr(self, name, field.process(batch, device=device, train=train))
@classmethod
|
{"golden_diff": "diff --git a/torchtext/data/batch.py b/torchtext/data/batch.py\n--- a/torchtext/data/batch.py\n+++ b/torchtext/data/batch.py\n@@ -24,7 +24,7 @@\n \n for (name, field) in dataset.fields.items():\n if field is not None:\n- batch = [x.__dict__[name] for x in data]\n+ batch = [getattr(x, name) for x in data]\n setattr(self, name, field.process(batch, device=device, train=train))\n \n @classmethod\n", "issue": "Use getattr rather than __dict__ in Batch (adds support for __slots__ in Example subclasses)\nThis is a proposal to change [one line of code](https://github.com/pytorch/text/blob/c839a7934930819be7e240ea972e4d600966afdc/torchtext/data/batch.py#L27) in Batch.py\r\n\r\nI suggest `[x.__dict__[name] for x in data]` should become `[getattr(x, name) for x in data]`\r\n\r\nA major advantage to doing this is compatibility with `__slots__`. A class that is going to be instantiated for every data point is an ideal use-case for `__slots__`, which reduces per-instance memory overhead. It makes sense for specific projects to subclass Example using `__slots__` with the known fields of the project. If you do, the instances will have empty `__dicts__` but the slots can be accessed via `getattr`.\r\n\r\nI don't _think_ this change would break anything...\n", "before_files": [{"content": "from torch import typename\nfrom torch.tensor import _TensorBase\n\n\nclass Batch(object):\n \"\"\"Defines a batch of examples along with its Fields.\n\n Attributes:\n batch_size: Number of examples in the batch.\n dataset: A reference to the dataset object the examples come from\n (which itself contains the dataset's Field objects).\n train: Whether the batch is from a training set.\n\n Also stores the Variable for each column in the batch as an attribute.\n \"\"\"\n\n def __init__(self, data=None, dataset=None, device=None, train=True):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n self.dataset = dataset\n self.train = train\n self.fields = dataset.fields.keys() # copy field names\n\n for (name, field) in dataset.fields.items():\n if field is not None:\n batch = [x.__dict__[name] for x in data]\n setattr(self, name, field.process(batch, device=device, train=train))\n\n @classmethod\n def fromvars(cls, dataset, batch_size, train=True, **kwargs):\n \"\"\"Create a Batch directly from a number of Variables.\"\"\"\n batch = cls()\n batch.batch_size = batch_size\n batch.dataset = dataset\n batch.train = train\n for k, v in kwargs.items():\n setattr(batch, k, v)\n return batch\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n if not self.__dict__:\n return 'Empty {} instance'.format(typename(self))\n\n var_strs = '\\n'.join(['\\t[.' + name + ']' + \":\" + _short_str(getattr(self, name))\n for name in self.fields if hasattr(self, name)])\n\n data_str = (' from {}'.format(self.dataset.name.upper())\n if hasattr(self.dataset, 'name') and\n isinstance(self.dataset.name, str) else '')\n\n strt = '[{} of size {}{}]\\n{}'.format(typename(self),\n self.batch_size, data_str, var_strs)\n return '\\n' + strt\n\n def __len__(self):\n return self.batch_size\n\n\ndef _short_str(tensor):\n # unwrap variable to tensor\n if hasattr(tensor, 'data'):\n tensor = tensor.data\n\n # fallback in case of wrong argument type\n if issubclass(type(tensor), _TensorBase) is False:\n return str(tensor)\n\n # copied from torch _tensor_str\n size_str = 'x'.join(str(size) for size in tensor.size())\n device_str = '' if not tensor.is_cuda else \\\n ' (GPU {})'.format(tensor.get_device())\n strt = '[{} of size {}{}]'.format(typename(tensor),\n size_str, device_str)\n return strt\n", "path": "torchtext/data/batch.py"}], "after_files": [{"content": "from torch import typename\nfrom torch.tensor import _TensorBase\n\n\nclass Batch(object):\n \"\"\"Defines a batch of examples along with its Fields.\n\n Attributes:\n batch_size: Number of examples in the batch.\n dataset: A reference to the dataset object the examples come from\n (which itself contains the dataset's Field objects).\n train: Whether the batch is from a training set.\n\n Also stores the Variable for each column in the batch as an attribute.\n \"\"\"\n\n def __init__(self, data=None, dataset=None, device=None, train=True):\n \"\"\"Create a Batch from a list of examples.\"\"\"\n if data is not None:\n self.batch_size = len(data)\n self.dataset = dataset\n self.train = train\n self.fields = dataset.fields.keys() # copy field names\n\n for (name, field) in dataset.fields.items():\n if field is not None:\n batch = [getattr(x, name) for x in data]\n setattr(self, name, field.process(batch, device=device, train=train))\n\n @classmethod\n def fromvars(cls, dataset, batch_size, train=True, **kwargs):\n \"\"\"Create a Batch directly from a number of Variables.\"\"\"\n batch = cls()\n batch.batch_size = batch_size\n batch.dataset = dataset\n batch.train = train\n for k, v in kwargs.items():\n setattr(batch, k, v)\n return batch\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n if not self.__dict__:\n return 'Empty {} instance'.format(typename(self))\n\n var_strs = '\\n'.join(['\\t[.' + name + ']' + \":\" + _short_str(getattr(self, name))\n for name in self.fields if hasattr(self, name)])\n\n data_str = (' from {}'.format(self.dataset.name.upper())\n if hasattr(self.dataset, 'name') and\n isinstance(self.dataset.name, str) else '')\n\n strt = '[{} of size {}{}]\\n{}'.format(typename(self),\n self.batch_size, data_str, var_strs)\n return '\\n' + strt\n\n def __len__(self):\n return self.batch_size\n\n\ndef _short_str(tensor):\n # unwrap variable to tensor\n if hasattr(tensor, 'data'):\n tensor = tensor.data\n\n # fallback in case of wrong argument type\n if issubclass(type(tensor), _TensorBase) is False:\n return str(tensor)\n\n # copied from torch _tensor_str\n size_str = 'x'.join(str(size) for size in tensor.size())\n device_str = '' if not tensor.is_cuda else \\\n ' (GPU {})'.format(tensor.get_device())\n strt = '[{} of size {}{}]'.format(typename(tensor),\n size_str, device_str)\n return strt\n", "path": "torchtext/data/batch.py"}]}
| 1,247 | 124 |
gh_patches_debug_4743
|
rasdani/github-patches
|
git_diff
|
netket__netket-1112
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade `flakehell` in the pre-commit hook
It seems that `flakehell` is not actively maintained, and it is incompatible with `flake8 4.x` released in last October (see flakehell/flakehell#22). That issue is not resolved after a few months. If a new developer of NetKet runs `pre-commit install-hooks`, it will just fail.
We may use [this fix](https://github.com/flakehell/flakehell/pull/23#issuecomment-985879201), or change it to [flakeheaven](https://github.com/flakeheaven/flakeheaven) which seems more actively maintained.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 DEV_DEPENDENCIES = [
4 "pytest>=6",
5 "pytest-xdist>=2",
6 "coverage>=5",
7 "pytest-cov>=2.10.1",
8 "networkx~=2.4",
9 "flaky>=3.7",
10 "pre-commit",
11 "black==22.1.0",
12 "flakehell>=0.9",
13 ]
14 MPI_DEPENDENCIES = ["mpi4py>=3.0.1, <4", "mpi4jax~=0.3.1"]
15 EXTRA_DEPENDENCIES = ["tensorboardx>=2.0.0", "openfermion>=1.0.0"]
16 BASE_DEPENDENCIES = [
17 "numpy~=1.18",
18 "scipy>=1.5.3, <2",
19 "tqdm~=4.60",
20 "plum-dispatch~=1.5.1",
21 "numba>=0.52, <0.56",
22 "igraph~=0.9.8",
23 "jax>=0.2.23, <0.4",
24 "jaxlib>=0.1.69",
25 "flax>=0.3.5, <0.5",
26 "orjson~=3.4",
27 "optax>=0.1.1, <0.2",
28 "numba4jax>=0.0.3, <0.1",
29 ]
30
31 setup(
32 name="netket",
33 author="Giuseppe Carleo et al.",
34 url="http://github.com/netket/netket",
35 author_email="[email protected]",
36 license="Apache 2.0",
37 description="Netket : Machine Learning techniques for many-body quantum systems.",
38 long_description="""NetKet is an open-source project delivering cutting-edge
39 methods for the study of many-body quantum systems with artificial
40 neural networks and machine learning techniques.""",
41 classifiers=[
42 "Programming Language :: Python :: 3",
43 "Development Status :: 5 - Production/Stable",
44 "Intended Audience :: Science/Research",
45 "License :: OSI Approved :: Apache Software License",
46 "Operating System :: MacOS :: MacOS X",
47 "Operating System :: POSIX :: Linux",
48 "Operating System :: Unix",
49 "Topic :: Scientific/Engineering :: Physics",
50 ],
51 packages=find_packages(),
52 install_requires=BASE_DEPENDENCIES,
53 python_requires=">=3.7",
54 extras_require={
55 "dev": DEV_DEPENDENCIES,
56 "mpi": MPI_DEPENDENCIES,
57 "extra": EXTRA_DEPENDENCIES,
58 "all": MPI_DEPENDENCIES + DEV_DEPENDENCIES + EXTRA_DEPENDENCIES,
59 },
60 )
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,10 +6,9 @@
"coverage>=5",
"pytest-cov>=2.10.1",
"networkx~=2.4",
- "flaky>=3.7",
- "pre-commit",
+ "pre-commit>=2.7",
"black==22.1.0",
- "flakehell>=0.9",
+ "flake8==4.0.1",
]
MPI_DEPENDENCIES = ["mpi4py>=3.0.1, <4", "mpi4jax~=0.3.1"]
EXTRA_DEPENDENCIES = ["tensorboardx>=2.0.0", "openfermion>=1.0.0"]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,10 +6,9 @@\n \"coverage>=5\",\n \"pytest-cov>=2.10.1\",\n \"networkx~=2.4\",\n- \"flaky>=3.7\",\n- \"pre-commit\",\n+ \"pre-commit>=2.7\",\n \"black==22.1.0\",\n- \"flakehell>=0.9\",\n+ \"flake8==4.0.1\",\n ]\n MPI_DEPENDENCIES = [\"mpi4py>=3.0.1, <4\", \"mpi4jax~=0.3.1\"]\n EXTRA_DEPENDENCIES = [\"tensorboardx>=2.0.0\", \"openfermion>=1.0.0\"]\n", "issue": "Upgrade `flakehell` in the pre-commit hook\nIt seems that `flakehell` is not actively maintained, and it is incompatible with `flake8 4.x` released in last October (see flakehell/flakehell#22). That issue is not resolved after a few months. If a new developer of NetKet runs `pre-commit install-hooks`, it will just fail.\r\n\r\nWe may use [this fix](https://github.com/flakehell/flakehell/pull/23#issuecomment-985879201), or change it to [flakeheaven](https://github.com/flakeheaven/flakeheaven) which seems more actively maintained.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nDEV_DEPENDENCIES = [\n \"pytest>=6\",\n \"pytest-xdist>=2\",\n \"coverage>=5\",\n \"pytest-cov>=2.10.1\",\n \"networkx~=2.4\",\n \"flaky>=3.7\",\n \"pre-commit\",\n \"black==22.1.0\",\n \"flakehell>=0.9\",\n]\nMPI_DEPENDENCIES = [\"mpi4py>=3.0.1, <4\", \"mpi4jax~=0.3.1\"]\nEXTRA_DEPENDENCIES = [\"tensorboardx>=2.0.0\", \"openfermion>=1.0.0\"]\nBASE_DEPENDENCIES = [\n \"numpy~=1.18\",\n \"scipy>=1.5.3, <2\",\n \"tqdm~=4.60\",\n \"plum-dispatch~=1.5.1\",\n \"numba>=0.52, <0.56\",\n \"igraph~=0.9.8\",\n \"jax>=0.2.23, <0.4\",\n \"jaxlib>=0.1.69\",\n \"flax>=0.3.5, <0.5\",\n \"orjson~=3.4\",\n \"optax>=0.1.1, <0.2\",\n \"numba4jax>=0.0.3, <0.1\",\n]\n\nsetup(\n name=\"netket\",\n author=\"Giuseppe Carleo et al.\",\n url=\"http://github.com/netket/netket\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n description=\"Netket : Machine Learning techniques for many-body quantum systems.\",\n long_description=\"\"\"NetKet is an open-source project delivering cutting-edge\n methods for the study of many-body quantum systems with artificial\n neural networks and machine learning techniques.\"\"\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Unix\",\n \"Topic :: Scientific/Engineering :: Physics\",\n ],\n packages=find_packages(),\n install_requires=BASE_DEPENDENCIES,\n python_requires=\">=3.7\",\n extras_require={\n \"dev\": DEV_DEPENDENCIES,\n \"mpi\": MPI_DEPENDENCIES,\n \"extra\": EXTRA_DEPENDENCIES,\n \"all\": MPI_DEPENDENCIES + DEV_DEPENDENCIES + EXTRA_DEPENDENCIES,\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nDEV_DEPENDENCIES = [\n \"pytest>=6\",\n \"pytest-xdist>=2\",\n \"coverage>=5\",\n \"pytest-cov>=2.10.1\",\n \"networkx~=2.4\",\n \"pre-commit>=2.7\",\n \"black==22.1.0\",\n \"flake8==4.0.1\",\n]\nMPI_DEPENDENCIES = [\"mpi4py>=3.0.1, <4\", \"mpi4jax~=0.3.1\"]\nEXTRA_DEPENDENCIES = [\"tensorboardx>=2.0.0\", \"openfermion>=1.0.0\"]\nBASE_DEPENDENCIES = [\n \"numpy~=1.18\",\n \"scipy>=1.5.3, <2\",\n \"tqdm~=4.60\",\n \"plum-dispatch~=1.5.1\",\n \"numba>=0.52, <0.56\",\n \"igraph~=0.9.8\",\n \"jax>=0.2.23, <0.4\",\n \"jaxlib>=0.1.69\",\n \"flax>=0.3.5, <0.5\",\n \"orjson~=3.4\",\n \"optax>=0.1.1, <0.2\",\n \"numba4jax>=0.0.3, <0.1\",\n]\n\nsetup(\n name=\"netket\",\n author=\"Giuseppe Carleo et al.\",\n url=\"http://github.com/netket/netket\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n description=\"Netket : Machine Learning techniques for many-body quantum systems.\",\n long_description=\"\"\"NetKet is an open-source project delivering cutting-edge\n methods for the study of many-body quantum systems with artificial\n neural networks and machine learning techniques.\"\"\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Unix\",\n \"Topic :: Scientific/Engineering :: Physics\",\n ],\n packages=find_packages(),\n install_requires=BASE_DEPENDENCIES,\n python_requires=\">=3.7\",\n extras_require={\n \"dev\": DEV_DEPENDENCIES,\n \"mpi\": MPI_DEPENDENCIES,\n \"extra\": EXTRA_DEPENDENCIES,\n \"all\": MPI_DEPENDENCIES + DEV_DEPENDENCIES + EXTRA_DEPENDENCIES,\n },\n)\n", "path": "setup.py"}]}
| 1,084 | 175 |
gh_patches_debug_27640
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-6301
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wagtail should not change month name translations
### Issue Summary
Wagtail translations overrides month name translations (at least for Slovenian language) which changes how dates are formatted.
### Steps to Reproduce
With wagtail installed:
```python
>>> from django.utils.translation import activate
>>> activate("sl")
>>> from django.utils import formats
...
>>> from datetime import date
>>> formats.date_format(date.today())
'5. Avgust 2020'
```
It should be (and without wagtail installed it is) `5. avgust 2020`.
* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: (yes / no)
yes
### Technical details
* Python version: Run `python --version`.
Python 3.7.1
Django version: Look in your requirements.txt, or run `pip show django | grep Version`.
Version: 2.2.14
* Wagtail version: Look at the bottom of the Settings menu in the Wagtail admin, or run `pip show wagtail | grep Version:`.
Version: 2.9.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/localization.py`
Content:
```
1 import pytz
2
3 from django.conf import settings
4 from django.utils.translation import gettext as _
5 from django.utils.translation import gettext_lazy
6
7
8 # Wagtail languages with >=90% coverage
9 # This list is manually maintained
10 WAGTAILADMIN_PROVIDED_LANGUAGES = [
11 ('ar', gettext_lazy('Arabic')),
12 ('ca', gettext_lazy('Catalan')),
13 ('cs', gettext_lazy('Czech')),
14 ('de', gettext_lazy('German')),
15 ('el', gettext_lazy('Greek')),
16 ('en', gettext_lazy('English')),
17 ('es', gettext_lazy('Spanish')),
18 ('fi', gettext_lazy('Finnish')),
19 ('fr', gettext_lazy('French')),
20 ('gl', gettext_lazy('Galician')),
21 ('hu', gettext_lazy('Hungarian')),
22 ('id-id', gettext_lazy('Indonesian')),
23 ('is-is', gettext_lazy('Icelandic')),
24 ('it', gettext_lazy('Italian')),
25 ('ja', gettext_lazy('Japanese')),
26 ('ko', gettext_lazy('Korean')),
27 ('lt', gettext_lazy('Lithuanian')),
28 ('mn', gettext_lazy('Mongolian')),
29 ('nb', gettext_lazy('Norwegian Bokmål')),
30 ('nl-nl', gettext_lazy('Netherlands Dutch')),
31 ('fa', gettext_lazy('Persian')),
32 ('pl', gettext_lazy('Polish')),
33 ('pt-br', gettext_lazy('Brazilian Portuguese')),
34 ('pt-pt', gettext_lazy('Portuguese')),
35 ('ro', gettext_lazy('Romanian')),
36 ('ru', gettext_lazy('Russian')),
37 ('sv', gettext_lazy('Swedish')),
38 ('sk-sk', gettext_lazy('Slovak')),
39 ('th', gettext_lazy('Thai')),
40 ('tr', gettext_lazy('Turkish')),
41 ('tr-tr', gettext_lazy('Turkish (Turkey)')),
42 ('uk', gettext_lazy('Ukrainian')),
43 ('zh-hans', gettext_lazy('Chinese (Simplified)')),
44 ('zh-hant', gettext_lazy('Chinese (Traditional)')),
45 ]
46
47
48 # Translatable strings to be made available to JavaScript code
49 # as the wagtailConfig.STRINGS object
50 def get_js_translation_strings():
51 return {
52 'DELETE': _('Delete'),
53 'EDIT': _('Edit'),
54 'PAGE': _('Page'),
55 'PAGES': _('Pages'),
56 'LOADING': _('Loading…'),
57 'NO_RESULTS': _('No results'),
58 'SERVER_ERROR': _('Server Error'),
59 'SEE_ALL': _('See all'),
60 'CLOSE_EXPLORER': _('Close explorer'),
61 'ALT_TEXT': _('Alt text'),
62 'WRITE_HERE': _('Write here…'),
63 'HORIZONTAL_LINE': _('Horizontal line'),
64 'LINE_BREAK': _('Line break'),
65 'UNDO': _('Undo'),
66 'REDO': _('Redo'),
67 'RELOAD_PAGE': _('Reload the page'),
68 'RELOAD_EDITOR': _('Reload saved content'),
69 'SHOW_LATEST_CONTENT': _('Show latest content'),
70 'SHOW_ERROR': _('Show error'),
71 'EDITOR_CRASH': _('The editor just crashed. Content has been reset to the last saved version.'),
72 'BROKEN_LINK': _('Broken link'),
73 'MISSING_DOCUMENT': _('Missing document'),
74 'CLOSE': _('Close'),
75 'EDIT_PAGE': _('Edit \'{title}\''),
76 'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \'{title}\''),
77 'PAGE_EXPLORER': _('Page explorer'),
78
79 'MONTHS': [
80 _('January'),
81 _('February'),
82 _('March'),
83 _('April'),
84 _('May'),
85 _('June'),
86 _('July'),
87 _('August'),
88 _('September'),
89 _('October'),
90 _('November'),
91 _('December')
92 ],
93 'WEEKDAYS': [
94 _('Sunday'),
95 _('Monday'),
96 _('Tuesday'),
97 _('Wednesday'),
98 _('Thursday'),
99 _('Friday'),
100 _('Saturday')
101 ],
102 'WEEKDAYS_SHORT': [
103 _('Sun'),
104 _('Mon'),
105 _('Tue'),
106 _('Wed'),
107 _('Thu'),
108 _('Fri'),
109 _('Sat')
110 ]
111 }
112
113
114 def get_available_admin_languages():
115 return getattr(settings, 'WAGTAILADMIN_PERMITTED_LANGUAGES', WAGTAILADMIN_PROVIDED_LANGUAGES)
116
117
118 def get_available_admin_time_zones():
119 return getattr(settings, 'WAGTAIL_USER_TIME_ZONES', pytz.common_timezones)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/admin/localization.py b/wagtail/admin/localization.py
--- a/wagtail/admin/localization.py
+++ b/wagtail/admin/localization.py
@@ -1,6 +1,7 @@
import pytz
from django.conf import settings
+from django.utils.dates import MONTHS, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
@@ -76,38 +77,12 @@
'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \'{title}\''),
'PAGE_EXPLORER': _('Page explorer'),
- 'MONTHS': [
- _('January'),
- _('February'),
- _('March'),
- _('April'),
- _('May'),
- _('June'),
- _('July'),
- _('August'),
- _('September'),
- _('October'),
- _('November'),
- _('December')
- ],
- 'WEEKDAYS': [
- _('Sunday'),
- _('Monday'),
- _('Tuesday'),
- _('Wednesday'),
- _('Thursday'),
- _('Friday'),
- _('Saturday')
- ],
- 'WEEKDAYS_SHORT': [
- _('Sun'),
- _('Mon'),
- _('Tue'),
- _('Wed'),
- _('Thu'),
- _('Fri'),
- _('Sat')
- ]
+ 'MONTHS': [str(m) for m in MONTHS.values()],
+
+ # Django's WEEKDAYS list begins on Monday, but ours should start on Sunday, so start
+ # counting from -1 and use modulo 7 to get an array index
+ 'WEEKDAYS': [str(WEEKDAYS[d % 7]) for d in range(-1, 6)],
+ 'WEEKDAYS_SHORT': [str(WEEKDAYS_ABBR[d % 7]) for d in range(-1, 6)],
}
|
{"golden_diff": "diff --git a/wagtail/admin/localization.py b/wagtail/admin/localization.py\n--- a/wagtail/admin/localization.py\n+++ b/wagtail/admin/localization.py\n@@ -1,6 +1,7 @@\n import pytz\n \n from django.conf import settings\n+from django.utils.dates import MONTHS, WEEKDAYS, WEEKDAYS_ABBR\n from django.utils.translation import gettext as _\n from django.utils.translation import gettext_lazy\n \n@@ -76,38 +77,12 @@\n 'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \\'{title}\\''),\n 'PAGE_EXPLORER': _('Page explorer'),\n \n- 'MONTHS': [\n- _('January'),\n- _('February'),\n- _('March'),\n- _('April'),\n- _('May'),\n- _('June'),\n- _('July'),\n- _('August'),\n- _('September'),\n- _('October'),\n- _('November'),\n- _('December')\n- ],\n- 'WEEKDAYS': [\n- _('Sunday'),\n- _('Monday'),\n- _('Tuesday'),\n- _('Wednesday'),\n- _('Thursday'),\n- _('Friday'),\n- _('Saturday')\n- ],\n- 'WEEKDAYS_SHORT': [\n- _('Sun'),\n- _('Mon'),\n- _('Tue'),\n- _('Wed'),\n- _('Thu'),\n- _('Fri'),\n- _('Sat')\n- ]\n+ 'MONTHS': [str(m) for m in MONTHS.values()],\n+\n+ # Django's WEEKDAYS list begins on Monday, but ours should start on Sunday, so start\n+ # counting from -1 and use modulo 7 to get an array index\n+ 'WEEKDAYS': [str(WEEKDAYS[d % 7]) for d in range(-1, 6)],\n+ 'WEEKDAYS_SHORT': [str(WEEKDAYS_ABBR[d % 7]) for d in range(-1, 6)],\n }\n", "issue": "Wagtail should not change month name translations\n### Issue Summary\r\n\r\nWagtail translations overrides month name translations (at least for Slovenian language) which changes how dates are formatted.\r\n\r\n### Steps to Reproduce\r\n\r\nWith wagtail installed:\r\n\r\n```python\r\n>>> from django.utils.translation import activate \r\n>>> activate(\"sl\") \r\n>>> from django.utils import formats \r\n... \r\n>>> from datetime import date \r\n>>> formats.date_format(date.today()) \r\n'5. Avgust 2020'\r\n```\r\n\r\nIt should be (and without wagtail installed it is) `5. avgust 2020`.\r\n\r\n* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: (yes / no)\r\n\r\nyes\r\n\r\n### Technical details\r\n\r\n* Python version: Run `python --version`.\r\n\r\nPython 3.7.1\r\n\r\nDjango version: Look in your requirements.txt, or run `pip show django | grep Version`.\r\n\r\nVersion: 2.2.14\r\n\r\n* Wagtail version: Look at the bottom of the Settings menu in the Wagtail admin, or run `pip show wagtail | grep Version:`.\r\n\r\nVersion: 2.9.2\r\n\n", "before_files": [{"content": "import pytz\n\nfrom django.conf import settings\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\n\n# Wagtail languages with >=90% coverage\n# This list is manually maintained\nWAGTAILADMIN_PROVIDED_LANGUAGES = [\n ('ar', gettext_lazy('Arabic')),\n ('ca', gettext_lazy('Catalan')),\n ('cs', gettext_lazy('Czech')),\n ('de', gettext_lazy('German')),\n ('el', gettext_lazy('Greek')),\n ('en', gettext_lazy('English')),\n ('es', gettext_lazy('Spanish')),\n ('fi', gettext_lazy('Finnish')),\n ('fr', gettext_lazy('French')),\n ('gl', gettext_lazy('Galician')),\n ('hu', gettext_lazy('Hungarian')),\n ('id-id', gettext_lazy('Indonesian')),\n ('is-is', gettext_lazy('Icelandic')),\n ('it', gettext_lazy('Italian')),\n ('ja', gettext_lazy('Japanese')),\n ('ko', gettext_lazy('Korean')),\n ('lt', gettext_lazy('Lithuanian')),\n ('mn', gettext_lazy('Mongolian')),\n ('nb', gettext_lazy('Norwegian Bokm\u00e5l')),\n ('nl-nl', gettext_lazy('Netherlands Dutch')),\n ('fa', gettext_lazy('Persian')),\n ('pl', gettext_lazy('Polish')),\n ('pt-br', gettext_lazy('Brazilian Portuguese')),\n ('pt-pt', gettext_lazy('Portuguese')),\n ('ro', gettext_lazy('Romanian')),\n ('ru', gettext_lazy('Russian')),\n ('sv', gettext_lazy('Swedish')),\n ('sk-sk', gettext_lazy('Slovak')),\n ('th', gettext_lazy('Thai')),\n ('tr', gettext_lazy('Turkish')),\n ('tr-tr', gettext_lazy('Turkish (Turkey)')),\n ('uk', gettext_lazy('Ukrainian')),\n ('zh-hans', gettext_lazy('Chinese (Simplified)')),\n ('zh-hant', gettext_lazy('Chinese (Traditional)')),\n]\n\n\n# Translatable strings to be made available to JavaScript code\n# as the wagtailConfig.STRINGS object\ndef get_js_translation_strings():\n return {\n 'DELETE': _('Delete'),\n 'EDIT': _('Edit'),\n 'PAGE': _('Page'),\n 'PAGES': _('Pages'),\n 'LOADING': _('Loading\u2026'),\n 'NO_RESULTS': _('No results'),\n 'SERVER_ERROR': _('Server Error'),\n 'SEE_ALL': _('See all'),\n 'CLOSE_EXPLORER': _('Close explorer'),\n 'ALT_TEXT': _('Alt text'),\n 'WRITE_HERE': _('Write here\u2026'),\n 'HORIZONTAL_LINE': _('Horizontal line'),\n 'LINE_BREAK': _('Line break'),\n 'UNDO': _('Undo'),\n 'REDO': _('Redo'),\n 'RELOAD_PAGE': _('Reload the page'),\n 'RELOAD_EDITOR': _('Reload saved content'),\n 'SHOW_LATEST_CONTENT': _('Show latest content'),\n 'SHOW_ERROR': _('Show error'),\n 'EDITOR_CRASH': _('The editor just crashed. Content has been reset to the last saved version.'),\n 'BROKEN_LINK': _('Broken link'),\n 'MISSING_DOCUMENT': _('Missing document'),\n 'CLOSE': _('Close'),\n 'EDIT_PAGE': _('Edit \\'{title}\\''),\n 'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \\'{title}\\''),\n 'PAGE_EXPLORER': _('Page explorer'),\n\n 'MONTHS': [\n _('January'),\n _('February'),\n _('March'),\n _('April'),\n _('May'),\n _('June'),\n _('July'),\n _('August'),\n _('September'),\n _('October'),\n _('November'),\n _('December')\n ],\n 'WEEKDAYS': [\n _('Sunday'),\n _('Monday'),\n _('Tuesday'),\n _('Wednesday'),\n _('Thursday'),\n _('Friday'),\n _('Saturday')\n ],\n 'WEEKDAYS_SHORT': [\n _('Sun'),\n _('Mon'),\n _('Tue'),\n _('Wed'),\n _('Thu'),\n _('Fri'),\n _('Sat')\n ]\n }\n\n\ndef get_available_admin_languages():\n return getattr(settings, 'WAGTAILADMIN_PERMITTED_LANGUAGES', WAGTAILADMIN_PROVIDED_LANGUAGES)\n\n\ndef get_available_admin_time_zones():\n return getattr(settings, 'WAGTAIL_USER_TIME_ZONES', pytz.common_timezones)\n", "path": "wagtail/admin/localization.py"}], "after_files": [{"content": "import pytz\n\nfrom django.conf import settings\nfrom django.utils.dates import MONTHS, WEEKDAYS, WEEKDAYS_ABBR\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\n\n# Wagtail languages with >=90% coverage\n# This list is manually maintained\nWAGTAILADMIN_PROVIDED_LANGUAGES = [\n ('ar', gettext_lazy('Arabic')),\n ('ca', gettext_lazy('Catalan')),\n ('cs', gettext_lazy('Czech')),\n ('de', gettext_lazy('German')),\n ('el', gettext_lazy('Greek')),\n ('en', gettext_lazy('English')),\n ('es', gettext_lazy('Spanish')),\n ('fi', gettext_lazy('Finnish')),\n ('fr', gettext_lazy('French')),\n ('gl', gettext_lazy('Galician')),\n ('hu', gettext_lazy('Hungarian')),\n ('id-id', gettext_lazy('Indonesian')),\n ('is-is', gettext_lazy('Icelandic')),\n ('it', gettext_lazy('Italian')),\n ('ja', gettext_lazy('Japanese')),\n ('ko', gettext_lazy('Korean')),\n ('lt', gettext_lazy('Lithuanian')),\n ('mn', gettext_lazy('Mongolian')),\n ('nb', gettext_lazy('Norwegian Bokm\u00e5l')),\n ('nl-nl', gettext_lazy('Netherlands Dutch')),\n ('fa', gettext_lazy('Persian')),\n ('pl', gettext_lazy('Polish')),\n ('pt-br', gettext_lazy('Brazilian Portuguese')),\n ('pt-pt', gettext_lazy('Portuguese')),\n ('ro', gettext_lazy('Romanian')),\n ('ru', gettext_lazy('Russian')),\n ('sv', gettext_lazy('Swedish')),\n ('sk-sk', gettext_lazy('Slovak')),\n ('th', gettext_lazy('Thai')),\n ('tr', gettext_lazy('Turkish')),\n ('tr-tr', gettext_lazy('Turkish (Turkey)')),\n ('uk', gettext_lazy('Ukrainian')),\n ('zh-hans', gettext_lazy('Chinese (Simplified)')),\n ('zh-hant', gettext_lazy('Chinese (Traditional)')),\n]\n\n\n# Translatable strings to be made available to JavaScript code\n# as the wagtailConfig.STRINGS object\ndef get_js_translation_strings():\n return {\n 'DELETE': _('Delete'),\n 'EDIT': _('Edit'),\n 'PAGE': _('Page'),\n 'PAGES': _('Pages'),\n 'LOADING': _('Loading\u2026'),\n 'NO_RESULTS': _('No results'),\n 'SERVER_ERROR': _('Server Error'),\n 'SEE_ALL': _('See all'),\n 'CLOSE_EXPLORER': _('Close explorer'),\n 'ALT_TEXT': _('Alt text'),\n 'WRITE_HERE': _('Write here\u2026'),\n 'HORIZONTAL_LINE': _('Horizontal line'),\n 'LINE_BREAK': _('Line break'),\n 'UNDO': _('Undo'),\n 'REDO': _('Redo'),\n 'RELOAD_PAGE': _('Reload the page'),\n 'RELOAD_EDITOR': _('Reload saved content'),\n 'SHOW_LATEST_CONTENT': _('Show latest content'),\n 'SHOW_ERROR': _('Show error'),\n 'EDITOR_CRASH': _('The editor just crashed. Content has been reset to the last saved version.'),\n 'BROKEN_LINK': _('Broken link'),\n 'MISSING_DOCUMENT': _('Missing document'),\n 'CLOSE': _('Close'),\n 'EDIT_PAGE': _('Edit \\'{title}\\''),\n 'VIEW_CHILD_PAGES_OF_PAGE': _('View child pages of \\'{title}\\''),\n 'PAGE_EXPLORER': _('Page explorer'),\n\n 'MONTHS': [str(m) for m in MONTHS.values()],\n\n # Django's WEEKDAYS list begins on Monday, but ours should start on Sunday, so start\n # counting from -1 and use modulo 7 to get an array index\n 'WEEKDAYS': [str(WEEKDAYS[d % 7]) for d in range(-1, 6)],\n 'WEEKDAYS_SHORT': [str(WEEKDAYS_ABBR[d % 7]) for d in range(-1, 6)],\n }\n\n\ndef get_available_admin_languages():\n return getattr(settings, 'WAGTAILADMIN_PERMITTED_LANGUAGES', WAGTAILADMIN_PROVIDED_LANGUAGES)\n\n\ndef get_available_admin_time_zones():\n return getattr(settings, 'WAGTAIL_USER_TIME_ZONES', pytz.common_timezones)\n", "path": "wagtail/admin/localization.py"}]}
| 1,696 | 431 |
gh_patches_debug_29474
|
rasdani/github-patches
|
git_diff
|
borgbackup__borg-1193
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
borgbackup build failure when using OpenSSL 1.1.0
https://groups.google.com/d/msg/linux.debian.devel/53fq9S-Qpp4/V_0pPtdzBQAJ
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `borg/testsuite/crypto.py`
Content:
```
1 from binascii import hexlify
2
3 from ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes
4 from . import BaseTestCase
5
6
7 class CryptoTestCase(BaseTestCase):
8
9 def test_bytes_to_int(self):
10 self.assert_equal(bytes_to_int(b'\0\0\0\1'), 1)
11
12 def test_bytes_to_long(self):
13 self.assert_equal(bytes_to_long(b'\0\0\0\0\0\0\0\1'), 1)
14 self.assert_equal(long_to_bytes(1), b'\0\0\0\0\0\0\0\1')
15
16 def test_aes(self):
17 key = b'X' * 32
18 data = b'foo' * 10
19 # encrypt
20 aes = AES(is_encrypt=True, key=key)
21 self.assert_equal(bytes_to_long(aes.iv, 8), 0)
22 cdata = aes.encrypt(data)
23 self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466')
24 self.assert_equal(bytes_to_long(aes.iv, 8), 2)
25 # decrypt
26 aes = AES(is_encrypt=False, key=key)
27 self.assert_equal(bytes_to_long(aes.iv, 8), 0)
28 pdata = aes.decrypt(cdata)
29 self.assert_equal(data, pdata)
30 self.assert_equal(bytes_to_long(aes.iv, 8), 2)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/borg/testsuite/crypto.py b/borg/testsuite/crypto.py
--- a/borg/testsuite/crypto.py
+++ b/borg/testsuite/crypto.py
@@ -1,6 +1,7 @@
from binascii import hexlify
from ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes
+from ..crypto import increment_iv, bytes16_to_int, int_to_bytes16
from . import BaseTestCase
@@ -13,6 +14,27 @@
self.assert_equal(bytes_to_long(b'\0\0\0\0\0\0\0\1'), 1)
self.assert_equal(long_to_bytes(1), b'\0\0\0\0\0\0\0\1')
+ def test_bytes16_to_int(self):
+ self.assert_equal(bytes16_to_int(b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1'), 1)
+ self.assert_equal(int_to_bytes16(1), b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1')
+ self.assert_equal(bytes16_to_int(b'\0\0\0\0\0\0\0\1\0\0\0\0\0\0\0\0'), 2 ** 64)
+ self.assert_equal(int_to_bytes16(2 ** 64), b'\0\0\0\0\0\0\0\1\0\0\0\0\0\0\0\0')
+
+ def test_increment_iv(self):
+ iv0 = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'
+ iv1 = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1'
+ iv2 = b'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\2'
+ self.assert_equal(increment_iv(iv0, 0), iv0)
+ self.assert_equal(increment_iv(iv0, 1), iv1)
+ self.assert_equal(increment_iv(iv0, 2), iv2)
+ iva = b'\0\0\0\0\0\0\0\0\xff\xff\xff\xff\xff\xff\xff\xff'
+ ivb = b'\0\0\0\0\0\0\0\1\x00\x00\x00\x00\x00\x00\x00\x00'
+ ivc = b'\0\0\0\0\0\0\0\1\x00\x00\x00\x00\x00\x00\x00\x01'
+ self.assert_equal(increment_iv(iva, 0), iva)
+ self.assert_equal(increment_iv(iva, 1), ivb)
+ self.assert_equal(increment_iv(iva, 2), ivc)
+ self.assert_equal(increment_iv(iv0, 2**64), ivb)
+
def test_aes(self):
key = b'X' * 32
data = b'foo' * 10
|
{"golden_diff": "diff --git a/borg/testsuite/crypto.py b/borg/testsuite/crypto.py\n--- a/borg/testsuite/crypto.py\n+++ b/borg/testsuite/crypto.py\n@@ -1,6 +1,7 @@\n from binascii import hexlify\n \n from ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes\n+from ..crypto import increment_iv, bytes16_to_int, int_to_bytes16\n from . import BaseTestCase\n \n \n@@ -13,6 +14,27 @@\n self.assert_equal(bytes_to_long(b'\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n self.assert_equal(long_to_bytes(1), b'\\0\\0\\0\\0\\0\\0\\0\\1')\n \n+ def test_bytes16_to_int(self):\n+ self.assert_equal(bytes16_to_int(b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n+ self.assert_equal(int_to_bytes16(1), b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1')\n+ self.assert_equal(bytes16_to_int(b'\\0\\0\\0\\0\\0\\0\\0\\1\\0\\0\\0\\0\\0\\0\\0\\0'), 2 ** 64)\n+ self.assert_equal(int_to_bytes16(2 ** 64), b'\\0\\0\\0\\0\\0\\0\\0\\1\\0\\0\\0\\0\\0\\0\\0\\0')\n+\n+ def test_increment_iv(self):\n+ iv0 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0'\n+ iv1 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1'\n+ iv2 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\2'\n+ self.assert_equal(increment_iv(iv0, 0), iv0)\n+ self.assert_equal(increment_iv(iv0, 1), iv1)\n+ self.assert_equal(increment_iv(iv0, 2), iv2)\n+ iva = b'\\0\\0\\0\\0\\0\\0\\0\\0\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff'\n+ ivb = b'\\0\\0\\0\\0\\0\\0\\0\\1\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n+ ivc = b'\\0\\0\\0\\0\\0\\0\\0\\1\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01'\n+ self.assert_equal(increment_iv(iva, 0), iva)\n+ self.assert_equal(increment_iv(iva, 1), ivb)\n+ self.assert_equal(increment_iv(iva, 2), ivc)\n+ self.assert_equal(increment_iv(iv0, 2**64), ivb)\n+\n def test_aes(self):\n key = b'X' * 32\n data = b'foo' * 10\n", "issue": "borgbackup build failure when using OpenSSL 1.1.0\nhttps://groups.google.com/d/msg/linux.debian.devel/53fq9S-Qpp4/V_0pPtdzBQAJ\n\n", "before_files": [{"content": "from binascii import hexlify\n\nfrom ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes\nfrom . import BaseTestCase\n\n\nclass CryptoTestCase(BaseTestCase):\n\n def test_bytes_to_int(self):\n self.assert_equal(bytes_to_int(b'\\0\\0\\0\\1'), 1)\n\n def test_bytes_to_long(self):\n self.assert_equal(bytes_to_long(b'\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n self.assert_equal(long_to_bytes(1), b'\\0\\0\\0\\0\\0\\0\\0\\1')\n\n def test_aes(self):\n key = b'X' * 32\n data = b'foo' * 10\n # encrypt\n aes = AES(is_encrypt=True, key=key)\n self.assert_equal(bytes_to_long(aes.iv, 8), 0)\n cdata = aes.encrypt(data)\n self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466')\n self.assert_equal(bytes_to_long(aes.iv, 8), 2)\n # decrypt\n aes = AES(is_encrypt=False, key=key)\n self.assert_equal(bytes_to_long(aes.iv, 8), 0)\n pdata = aes.decrypt(cdata)\n self.assert_equal(data, pdata)\n self.assert_equal(bytes_to_long(aes.iv, 8), 2)\n", "path": "borg/testsuite/crypto.py"}], "after_files": [{"content": "from binascii import hexlify\n\nfrom ..crypto import AES, bytes_to_long, bytes_to_int, long_to_bytes\nfrom ..crypto import increment_iv, bytes16_to_int, int_to_bytes16\nfrom . import BaseTestCase\n\n\nclass CryptoTestCase(BaseTestCase):\n\n def test_bytes_to_int(self):\n self.assert_equal(bytes_to_int(b'\\0\\0\\0\\1'), 1)\n\n def test_bytes_to_long(self):\n self.assert_equal(bytes_to_long(b'\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n self.assert_equal(long_to_bytes(1), b'\\0\\0\\0\\0\\0\\0\\0\\1')\n\n def test_bytes16_to_int(self):\n self.assert_equal(bytes16_to_int(b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1'), 1)\n self.assert_equal(int_to_bytes16(1), b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1')\n self.assert_equal(bytes16_to_int(b'\\0\\0\\0\\0\\0\\0\\0\\1\\0\\0\\0\\0\\0\\0\\0\\0'), 2 ** 64)\n self.assert_equal(int_to_bytes16(2 ** 64), b'\\0\\0\\0\\0\\0\\0\\0\\1\\0\\0\\0\\0\\0\\0\\0\\0')\n\n def test_increment_iv(self):\n iv0 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0'\n iv1 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\1'\n iv2 = b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\2'\n self.assert_equal(increment_iv(iv0, 0), iv0)\n self.assert_equal(increment_iv(iv0, 1), iv1)\n self.assert_equal(increment_iv(iv0, 2), iv2)\n iva = b'\\0\\0\\0\\0\\0\\0\\0\\0\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff'\n ivb = b'\\0\\0\\0\\0\\0\\0\\0\\1\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n ivc = b'\\0\\0\\0\\0\\0\\0\\0\\1\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01'\n self.assert_equal(increment_iv(iva, 0), iva)\n self.assert_equal(increment_iv(iva, 1), ivb)\n self.assert_equal(increment_iv(iva, 2), ivc)\n self.assert_equal(increment_iv(iv0, 2**64), ivb)\n\n def test_aes(self):\n key = b'X' * 32\n data = b'foo' * 10\n # encrypt\n aes = AES(is_encrypt=True, key=key)\n self.assert_equal(bytes_to_long(aes.iv, 8), 0)\n cdata = aes.encrypt(data)\n self.assert_equal(hexlify(cdata), b'c6efb702de12498f34a2c2bbc8149e759996d08bf6dc5c610aefc0c3a466')\n self.assert_equal(bytes_to_long(aes.iv, 8), 2)\n # decrypt\n aes = AES(is_encrypt=False, key=key)\n self.assert_equal(bytes_to_long(aes.iv, 8), 0)\n pdata = aes.decrypt(cdata)\n self.assert_equal(data, pdata)\n self.assert_equal(bytes_to_long(aes.iv, 8), 2)\n", "path": "borg/testsuite/crypto.py"}]}
| 709 | 779 |
gh_patches_debug_7561
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6693
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
T - TypeError 'count.toLocaleString' in SSR
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/7090931/
```
TypeError: Cannot read property 'toLocaleString' of undefined
File "/app/kuma/javascript/dist/ssr.js", line 22, in kt
'{snip} ocuments found for "%(query)s" in %(locale)s.',a),{count:a.toLocaleString(),locale:n,query:c})," ",t))}function jt(e){var t=e.locale;return( {snip}
File "/app/kuma/javascript/dist/ssr.js", line 22, in a
'{snip} .state);null!=d&&(f.state=r({},f.state,d))}}else if(D={},f=a(o.props,i,s),null==(f=G(a,o.props,f,i))||null==f.render)return void he(e=f,a);i {snip}
File "/app/kuma/javascript/dist/ssr.js", line 22, in ye
'{snip} lement(e);){var i=e,c=i.type;if("function"!=typeof c)break;a(i,c)}return{child:e,context:t}}var ve=function(){function e(t,n){if(!(this inst {snip}
File "/app/kuma/javascript/dist/ssr.js", line 22, in e.render
'{snip} -- --\x3e"+I(n):(this.previousWasTextNode=!0,I(n));if(e=(t=ye(e,t,this.threadID)).child,t=t.context,null===e||!1===e)return"";if(!o.isValidE {snip}
File "/app/kuma/javascript/dist/ssr.js", line 22, in e.read
'{snip} +=c}else{var f=i.children[i.childIndex++],m="";try{m+=this.render(f,i.context,i.domNamespace)}catch(e){throw e}r.length<=this.suspenseDepth& {snip}
...
(5 additional frame(s) were not displayed)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/search/views.py`
Content:
```
1 from django.shortcuts import render
2 from django.urls import reverse_lazy
3 from django.views.decorators.cache import never_cache
4 from django.views.decorators.http import require_GET
5 from django.views.generic import RedirectView
6 from ratelimit.decorators import ratelimit
7
8 from kuma.api.v1.views import search as search_api
9 from kuma.core.decorators import shared_cache_control
10 from kuma.core.utils import is_wiki
11
12 from .search import SearchView
13
14 # Since the search endpoint accepts user input (via query parameters) and its
15 # response is compressed, use rate limiting to mitigate the BREACH attack
16 # (see http://breachattack.com/). It still needs to allow a user to click
17 # the filter switches (bug 1426968).
18 # Alternate: forbid gzip by setting Content-Encoding: identity
19 @never_cache
20 @require_GET
21 @ratelimit(key="user_or_ip", rate="25/m", block=True)
22 def search(request, *args, **kwargs):
23 """
24 The search view.
25 """
26 if is_wiki(request):
27 return wiki_search(request, *args, **kwargs)
28
29 results = search_api(request, *args, **kwargs).data
30
31 # Determine if there were validation errors
32 error = results.get("error") or results.get("q")
33 # If q is returned in the data, there was a validation error for that field,
34 # so return 400 status.
35 status = 200 if results.get("q") is None else 400
36
37 context = {"results": {"results": None if error else results, "error": error}}
38
39 return render(request, "search/react.html", context, status=status)
40
41
42 wiki_search = SearchView.as_view()
43
44
45 class SearchRedirectView(RedirectView):
46 permanent = True
47
48 def get_redirect_url(self, *args, **kwargs):
49 query_string = self.request.META.get("QUERY_STRING")
50 url = reverse_lazy(
51 "api.v1.search", kwargs={"locale": self.request.LANGUAGE_CODE}
52 )
53 if query_string:
54 url += "?" + query_string
55 return url
56
57
58 @shared_cache_control(s_maxage=60 * 60 * 24 * 7)
59 def plugin(request):
60 """Render an OpenSearch Plugin."""
61 return render(
62 request,
63 "search/plugin.html",
64 {"locale": request.LANGUAGE_CODE},
65 content_type="application/opensearchdescription+xml",
66 )
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kuma/search/views.py b/kuma/search/views.py
--- a/kuma/search/views.py
+++ b/kuma/search/views.py
@@ -33,6 +33,10 @@
# If q is returned in the data, there was a validation error for that field,
# so return 400 status.
status = 200 if results.get("q") is None else 400
+ # If there was an error with the pagination you'll get...
+ if results.get("detail"):
+ error = str(results["detail"])
+ status = 400
context = {"results": {"results": None if error else results, "error": error}}
|
{"golden_diff": "diff --git a/kuma/search/views.py b/kuma/search/views.py\n--- a/kuma/search/views.py\n+++ b/kuma/search/views.py\n@@ -33,6 +33,10 @@\n # If q is returned in the data, there was a validation error for that field,\n # so return 400 status.\n status = 200 if results.get(\"q\") is None else 400\n+ # If there was an error with the pagination you'll get...\n+ if results.get(\"detail\"):\n+ error = str(results[\"detail\"])\n+ status = 400\n \n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n", "issue": "T - TypeError 'count.toLocaleString' in SSR\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/7090931/\n\n```\nTypeError: Cannot read property 'toLocaleString' of undefined\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in kt\n '{snip} ocuments found for \"%(query)s\" in %(locale)s.',a),{count:a.toLocaleString(),locale:n,query:c}),\" \",t))}function jt(e){var t=e.locale;return( {snip}\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in a\n '{snip} .state);null!=d&&(f.state=r({},f.state,d))}}else if(D={},f=a(o.props,i,s),null==(f=G(a,o.props,f,i))||null==f.render)return void he(e=f,a);i {snip}\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in ye\n '{snip} lement(e);){var i=e,c=i.type;if(\"function\"!=typeof c)break;a(i,c)}return{child:e,context:t}}var ve=function(){function e(t,n){if(!(this inst {snip}\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in e.render\n '{snip} -- --\\x3e\"+I(n):(this.previousWasTextNode=!0,I(n));if(e=(t=ye(e,t,this.threadID)).child,t=t.context,null===e||!1===e)return\"\";if(!o.isValidE {snip}\n File \"/app/kuma/javascript/dist/ssr.js\", line 22, in e.read\n '{snip} +=c}else{var f=i.children[i.childIndex++],m=\"\";try{m+=this.render(f,i.context,i.domNamespace)}catch(e){throw e}r.length<=this.suspenseDepth& {snip}\n...\n(5 additional frame(s) were not displayed)\n```\n", "before_files": [{"content": "from django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.views import search as search_api\nfrom kuma.core.decorators import shared_cache_control\nfrom kuma.core.utils import is_wiki\n\nfrom .search import SearchView\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n \"\"\"\n if is_wiki(request):\n return wiki_search(request, *args, **kwargs)\n\n results = search_api(request, *args, **kwargs).data\n\n # Determine if there were validation errors\n error = results.get(\"error\") or results.get(\"q\")\n # If q is returned in the data, there was a validation error for that field,\n # so return 400 status.\n status = 200 if results.get(\"q\") is None else 400\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n\n return render(request, \"search/react.html\", context, status=status)\n\n\nwiki_search = SearchView.as_view()\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\n \"api.v1.search\", kwargs={\"locale\": self.request.LANGUAGE_CODE}\n )\n if query_string:\n url += \"?\" + query_string\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}], "after_files": [{"content": "from django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.views import search as search_api\nfrom kuma.core.decorators import shared_cache_control\nfrom kuma.core.utils import is_wiki\n\nfrom .search import SearchView\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n \"\"\"\n if is_wiki(request):\n return wiki_search(request, *args, **kwargs)\n\n results = search_api(request, *args, **kwargs).data\n\n # Determine if there were validation errors\n error = results.get(\"error\") or results.get(\"q\")\n # If q is returned in the data, there was a validation error for that field,\n # so return 400 status.\n status = 200 if results.get(\"q\") is None else 400\n # If there was an error with the pagination you'll get...\n if results.get(\"detail\"):\n error = str(results[\"detail\"])\n status = 400\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n\n return render(request, \"search/react.html\", context, status=status)\n\n\nwiki_search = SearchView.as_view()\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\n \"api.v1.search\", kwargs={\"locale\": self.request.LANGUAGE_CODE}\n )\n if query_string:\n url += \"?\" + query_string\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}]}
| 1,357 | 156 |
gh_patches_debug_40463
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1371
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django Celery trace.id integration
**Is your feature request related to a problem? Please describe.**
As of now, it is impossible to keep the same trace id between a Django view and a Celery task launched from the same view.
**Describe the solution you'd like**
Provide a way to easily pass a trace parent string to the Celery task. Preferably via it's headers field (introduced in Celery 3.1).
### What would it looks likes
**User side code (Django view)**
```python
def get(self, request):
transaction = execution_context.get_transaction()
trace_parent = transaction.trace_parent
trace_parent_string = trace_parent.to_string()
my_celery_task.apply_async(headers={"elasticapm": {"trace_parent_string": trace_parent_string} })
```
**Library side code (`elasticapm.contrib.celery.__init__.py`), rewrite of `begin_transaction()`, naïve implementation**
```python
def begin_transaction(*args, **kwargs):
trace_parent = None
try:
trace_parent_string = kwargs["task"].request.headers["elasticapm"]["trace_parent_string"]
trace_parent = TraceParent.from_string(trace_parent_string)
except:
pass
client.begin_transaction("celery", trace_parent=trace_parent)
```
- **Why using Celery headers field ?** It seems the most unobstrusive way of doing it.
- **Why using a nested field (["elasticapm"]["trace_parent_string"]) ?** Seems "future proof", usefull future fields for elasticapm could be added under the "elasticapm" key. Users of the API shouldn't see their code break as they are aware that using this library, the headers Celery field has a reserved key "elasticapm" used for this integration.
**Additional context**
**For anyone wanting to try it, BEWARE !!** There is a Celery [bug](https://github.com/celery/celery/issues/4875) concerning it's headers field.
You might have to do this:
```python
my_celery_task.apply_async(headers={"headers": {"elasticapm": {"trace_parent_string": trace_parent_string} } })
```
Edits: fixed code error/typos
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/contrib/celery/__init__.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 from celery import signals, states
33
34 import elasticapm
35 from elasticapm.conf import constants
36 from elasticapm.utils import get_name_from_func
37
38
39 class CeleryFilter(object):
40 def filter(self, record):
41 if record.funcName in ("_log_error",):
42 return 0
43 else:
44 return 1
45
46
47 def register_exception_tracking(client):
48 dispatch_uid = "elasticapm-exc-tracking"
49
50 def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):
51 client.capture_exception(
52 extra={"task_id": task_id, "task": sender, "args": args, "kwargs": kwargs}, handled=False
53 )
54
55 signals.task_failure.disconnect(process_failure_signal, dispatch_uid=dispatch_uid)
56 signals.task_failure.connect(process_failure_signal, weak=False, dispatch_uid=dispatch_uid)
57 _register_worker_signals(client)
58
59
60 def register_instrumentation(client):
61 def begin_transaction(*args, **kwargs):
62 client.begin_transaction("celery")
63
64 def end_transaction(task_id, task, *args, **kwargs):
65 name = get_name_from_func(task)
66 state = kwargs.get("state", "None")
67 if state == states.SUCCESS:
68 outcome = constants.OUTCOME.SUCCESS
69 elif state in states.EXCEPTION_STATES:
70 outcome = constants.OUTCOME.FAILURE
71 else:
72 outcome = constants.OUTCOME.UNKNOWN
73 elasticapm.set_transaction_outcome(outcome, override=False)
74 client.end_transaction(name, state)
75
76 dispatch_uid = "elasticapm-tracing-%s"
77
78 # unregister any existing clients
79 signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % "prerun")
80 signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % "postrun")
81
82 # register for this client
83 signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % "prerun", weak=False)
84 signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % "postrun")
85 _register_worker_signals(client)
86
87
88 def _register_worker_signals(client):
89 def worker_shutdown(*args, **kwargs):
90 client.close()
91
92 def connect_worker_process_init(*args, **kwargs):
93 signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid="elasticapm-shutdown-worker", weak=False)
94
95 signals.worker_init.connect(
96 connect_worker_process_init, dispatch_uid="elasticapm-connect-start-threads", weak=False
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/contrib/celery/__init__.py b/elasticapm/contrib/celery/__init__.py
--- a/elasticapm/contrib/celery/__init__.py
+++ b/elasticapm/contrib/celery/__init__.py
@@ -27,13 +27,15 @@
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-
+from contextlib import suppress
from celery import signals, states
import elasticapm
from elasticapm.conf import constants
+from elasticapm.traces import execution_context
from elasticapm.utils import get_name_from_func
+from elasticapm.utils.disttracing import TraceParent
class CeleryFilter(object):
@@ -57,9 +59,41 @@
_register_worker_signals(client)
+def set_celery_headers(headers=None, **kwargs):
+ """
+ Add elasticapm specific information to celery headers
+ """
+ headers = {} if headers is None else headers
+
+ transaction = execution_context.get_transaction()
+ if transaction is not None:
+ trace_parent = transaction.trace_parent
+ trace_parent_string = trace_parent.to_string()
+
+ headers.update({"elasticapm": {"trace_parent_string": trace_parent_string}})
+
+
+def get_trace_parent(celery_task):
+ """
+ Return a trace parent contained in the request headers of a Celery Task object or None
+ """
+ trace_parent = None
+ with suppress(AttributeError, KeyError, TypeError):
+ if celery_task.request.headers is not None:
+ trace_parent_string = celery_task.request.headers["elasticapm"]["trace_parent_string"]
+ trace_parent = TraceParent.from_string(trace_parent_string)
+ else:
+ trace_parent_string = celery_task.request.elasticapm["trace_parent_string"]
+ trace_parent = TraceParent.from_string(trace_parent_string)
+ return trace_parent
+
+
def register_instrumentation(client):
def begin_transaction(*args, **kwargs):
- client.begin_transaction("celery")
+ task = kwargs["task"]
+
+ trace_parent = get_trace_parent(task)
+ client.begin_transaction("celery", trace_parent=trace_parent)
def end_transaction(task_id, task, *args, **kwargs):
name = get_name_from_func(task)
@@ -76,10 +110,12 @@
dispatch_uid = "elasticapm-tracing-%s"
# unregister any existing clients
+ signals.before_task_publish.disconnect(set_celery_headers, dispatch_uid=dispatch_uid % "before-publish")
signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % "prerun")
signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % "postrun")
# register for this client
+ signals.before_task_publish.connect(set_celery_headers, dispatch_uid=dispatch_uid % "before-publish")
signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % "prerun", weak=False)
signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % "postrun")
_register_worker_signals(client)
|
{"golden_diff": "diff --git a/elasticapm/contrib/celery/__init__.py b/elasticapm/contrib/celery/__init__.py\n--- a/elasticapm/contrib/celery/__init__.py\n+++ b/elasticapm/contrib/celery/__init__.py\n@@ -27,13 +27,15 @@\n # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n-\n+from contextlib import suppress\n \n from celery import signals, states\n \n import elasticapm\n from elasticapm.conf import constants\n+from elasticapm.traces import execution_context\n from elasticapm.utils import get_name_from_func\n+from elasticapm.utils.disttracing import TraceParent\n \n \n class CeleryFilter(object):\n@@ -57,9 +59,41 @@\n _register_worker_signals(client)\n \n \n+def set_celery_headers(headers=None, **kwargs):\n+ \"\"\"\n+ Add elasticapm specific information to celery headers\n+ \"\"\"\n+ headers = {} if headers is None else headers\n+\n+ transaction = execution_context.get_transaction()\n+ if transaction is not None:\n+ trace_parent = transaction.trace_parent\n+ trace_parent_string = trace_parent.to_string()\n+\n+ headers.update({\"elasticapm\": {\"trace_parent_string\": trace_parent_string}})\n+\n+\n+def get_trace_parent(celery_task):\n+ \"\"\"\n+ Return a trace parent contained in the request headers of a Celery Task object or None\n+ \"\"\"\n+ trace_parent = None\n+ with suppress(AttributeError, KeyError, TypeError):\n+ if celery_task.request.headers is not None:\n+ trace_parent_string = celery_task.request.headers[\"elasticapm\"][\"trace_parent_string\"]\n+ trace_parent = TraceParent.from_string(trace_parent_string)\n+ else:\n+ trace_parent_string = celery_task.request.elasticapm[\"trace_parent_string\"]\n+ trace_parent = TraceParent.from_string(trace_parent_string)\n+ return trace_parent\n+\n+\n def register_instrumentation(client):\n def begin_transaction(*args, **kwargs):\n- client.begin_transaction(\"celery\")\n+ task = kwargs[\"task\"]\n+\n+ trace_parent = get_trace_parent(task)\n+ client.begin_transaction(\"celery\", trace_parent=trace_parent)\n \n def end_transaction(task_id, task, *args, **kwargs):\n name = get_name_from_func(task)\n@@ -76,10 +110,12 @@\n dispatch_uid = \"elasticapm-tracing-%s\"\n \n # unregister any existing clients\n+ signals.before_task_publish.disconnect(set_celery_headers, dispatch_uid=dispatch_uid % \"before-publish\")\n signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\")\n signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % \"postrun\")\n \n # register for this client\n+ signals.before_task_publish.connect(set_celery_headers, dispatch_uid=dispatch_uid % \"before-publish\")\n signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\", weak=False)\n signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % \"postrun\")\n _register_worker_signals(client)\n", "issue": "Django Celery trace.id integration\n**Is your feature request related to a problem? Please describe.**\r\nAs of now, it is impossible to keep the same trace id between a Django view and a Celery task launched from the same view.\r\n\r\n**Describe the solution you'd like**\r\nProvide a way to easily pass a trace parent string to the Celery task. Preferably via it's headers field (introduced in Celery 3.1).\r\n\r\n### What would it looks likes\r\n**User side code (Django view)**\r\n```python\r\ndef get(self, request):\r\n transaction = execution_context.get_transaction()\r\n trace_parent = transaction.trace_parent\r\n trace_parent_string = trace_parent.to_string()\r\n my_celery_task.apply_async(headers={\"elasticapm\": {\"trace_parent_string\": trace_parent_string} })\r\n```\r\n\r\n**Library side code (`elasticapm.contrib.celery.__init__.py`), rewrite of `begin_transaction()`, na\u00efve implementation**\r\n```python\r\ndef begin_transaction(*args, **kwargs):\r\n trace_parent = None\r\n try:\r\n trace_parent_string = kwargs[\"task\"].request.headers[\"elasticapm\"][\"trace_parent_string\"]\r\n trace_parent = TraceParent.from_string(trace_parent_string)\r\n except:\r\n pass\r\n client.begin_transaction(\"celery\", trace_parent=trace_parent)\r\n```\r\n\r\n- **Why using Celery headers field ?** It seems the most unobstrusive way of doing it.\r\n- **Why using a nested field ([\"elasticapm\"][\"trace_parent_string\"]) ?** Seems \"future proof\", usefull future fields for elasticapm could be added under the \"elasticapm\" key. Users of the API shouldn't see their code break as they are aware that using this library, the headers Celery field has a reserved key \"elasticapm\" used for this integration.\r\n\r\n**Additional context**\r\n**For anyone wanting to try it, BEWARE !!** There is a Celery [bug](https://github.com/celery/celery/issues/4875) concerning it's headers field.\r\nYou might have to do this:\r\n```python\r\nmy_celery_task.apply_async(headers={\"headers\": {\"elasticapm\": {\"trace_parent_string\": trace_parent_string} } })\r\n```\r\n\r\nEdits: fixed code error/typos\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom celery import signals, states\n\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.utils import get_name_from_func\n\n\nclass CeleryFilter(object):\n def filter(self, record):\n if record.funcName in (\"_log_error\",):\n return 0\n else:\n return 1\n\n\ndef register_exception_tracking(client):\n dispatch_uid = \"elasticapm-exc-tracking\"\n\n def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):\n client.capture_exception(\n extra={\"task_id\": task_id, \"task\": sender, \"args\": args, \"kwargs\": kwargs}, handled=False\n )\n\n signals.task_failure.disconnect(process_failure_signal, dispatch_uid=dispatch_uid)\n signals.task_failure.connect(process_failure_signal, weak=False, dispatch_uid=dispatch_uid)\n _register_worker_signals(client)\n\n\ndef register_instrumentation(client):\n def begin_transaction(*args, **kwargs):\n client.begin_transaction(\"celery\")\n\n def end_transaction(task_id, task, *args, **kwargs):\n name = get_name_from_func(task)\n state = kwargs.get(\"state\", \"None\")\n if state == states.SUCCESS:\n outcome = constants.OUTCOME.SUCCESS\n elif state in states.EXCEPTION_STATES:\n outcome = constants.OUTCOME.FAILURE\n else:\n outcome = constants.OUTCOME.UNKNOWN\n elasticapm.set_transaction_outcome(outcome, override=False)\n client.end_transaction(name, state)\n\n dispatch_uid = \"elasticapm-tracing-%s\"\n\n # unregister any existing clients\n signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\")\n signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % \"postrun\")\n\n # register for this client\n signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\", weak=False)\n signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % \"postrun\")\n _register_worker_signals(client)\n\n\ndef _register_worker_signals(client):\n def worker_shutdown(*args, **kwargs):\n client.close()\n\n def connect_worker_process_init(*args, **kwargs):\n signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid=\"elasticapm-shutdown-worker\", weak=False)\n\n signals.worker_init.connect(\n connect_worker_process_init, dispatch_uid=\"elasticapm-connect-start-threads\", weak=False\n )\n", "path": "elasticapm/contrib/celery/__init__.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nfrom contextlib import suppress\n\nfrom celery import signals, states\n\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.traces import execution_context\nfrom elasticapm.utils import get_name_from_func\nfrom elasticapm.utils.disttracing import TraceParent\n\n\nclass CeleryFilter(object):\n def filter(self, record):\n if record.funcName in (\"_log_error\",):\n return 0\n else:\n return 1\n\n\ndef register_exception_tracking(client):\n dispatch_uid = \"elasticapm-exc-tracking\"\n\n def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):\n client.capture_exception(\n extra={\"task_id\": task_id, \"task\": sender, \"args\": args, \"kwargs\": kwargs}, handled=False\n )\n\n signals.task_failure.disconnect(process_failure_signal, dispatch_uid=dispatch_uid)\n signals.task_failure.connect(process_failure_signal, weak=False, dispatch_uid=dispatch_uid)\n _register_worker_signals(client)\n\n\ndef set_celery_headers(headers=None, **kwargs):\n \"\"\"\n Add elasticapm specific information to celery headers\n \"\"\"\n headers = {} if headers is None else headers\n\n transaction = execution_context.get_transaction()\n if transaction is not None:\n trace_parent = transaction.trace_parent\n trace_parent_string = trace_parent.to_string()\n\n headers.update({\"elasticapm\": {\"trace_parent_string\": trace_parent_string}})\n\n\ndef get_trace_parent(celery_task):\n \"\"\"\n Return a trace parent contained in the request headers of a Celery Task object or None\n \"\"\"\n trace_parent = None\n with suppress(AttributeError, KeyError, TypeError):\n if celery_task.request.headers is not None:\n trace_parent_string = celery_task.request.headers[\"elasticapm\"][\"trace_parent_string\"]\n trace_parent = TraceParent.from_string(trace_parent_string)\n else:\n trace_parent_string = celery_task.request.elasticapm[\"trace_parent_string\"]\n trace_parent = TraceParent.from_string(trace_parent_string)\n return trace_parent\n\n\ndef register_instrumentation(client):\n def begin_transaction(*args, **kwargs):\n task = kwargs[\"task\"]\n\n trace_parent = get_trace_parent(task)\n client.begin_transaction(\"celery\", trace_parent=trace_parent)\n\n def end_transaction(task_id, task, *args, **kwargs):\n name = get_name_from_func(task)\n state = kwargs.get(\"state\", \"None\")\n if state == states.SUCCESS:\n outcome = constants.OUTCOME.SUCCESS\n elif state in states.EXCEPTION_STATES:\n outcome = constants.OUTCOME.FAILURE\n else:\n outcome = constants.OUTCOME.UNKNOWN\n elasticapm.set_transaction_outcome(outcome, override=False)\n client.end_transaction(name, state)\n\n dispatch_uid = \"elasticapm-tracing-%s\"\n\n # unregister any existing clients\n signals.before_task_publish.disconnect(set_celery_headers, dispatch_uid=dispatch_uid % \"before-publish\")\n signals.task_prerun.disconnect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\")\n signals.task_postrun.disconnect(end_transaction, dispatch_uid=dispatch_uid % \"postrun\")\n\n # register for this client\n signals.before_task_publish.connect(set_celery_headers, dispatch_uid=dispatch_uid % \"before-publish\")\n signals.task_prerun.connect(begin_transaction, dispatch_uid=dispatch_uid % \"prerun\", weak=False)\n signals.task_postrun.connect(end_transaction, weak=False, dispatch_uid=dispatch_uid % \"postrun\")\n _register_worker_signals(client)\n\n\ndef _register_worker_signals(client):\n def worker_shutdown(*args, **kwargs):\n client.close()\n\n def connect_worker_process_init(*args, **kwargs):\n signals.worker_process_shutdown.connect(worker_shutdown, dispatch_uid=\"elasticapm-shutdown-worker\", weak=False)\n\n signals.worker_init.connect(\n connect_worker_process_init, dispatch_uid=\"elasticapm-connect-start-threads\", weak=False\n )\n", "path": "elasticapm/contrib/celery/__init__.py"}]}
| 1,809 | 738 |
gh_patches_debug_66285
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Poetry run: ModuleOrPackageNotFound with implicit namespace packages (PEP420)
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Arch Linux 4.18.16
- **Poetry version**: 0.12.5
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/Kazy/692963f6a41c64d38f38ac9a3f95619a
## Issue
I'm using implicit namespace packages to organize the packages at work, which works well by specifying the `packages` value in the `pyproject.toml` like that:
```toml
packages = [
{ include = "org" }
]
```
The file structure is like that:
```
├── org
│ └── subpackage
│ ├── __init__.py
│ └── command
│ └── cli.py
└── pyproject.toml
```
The issue is when you try to do `poetry run my-command`, you get:
```
[ModuleOrPackageNotFound]
No file/folder found for package org-subpackage-command
```
I already found how to fix it and will follow with a PR, but I wanted to create the issue in case my fix isn't the right one, and to make organization easier on your side as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/console/commands/run.py`
Content:
```
1 from .env_command import EnvCommand
2
3
4 class RunCommand(EnvCommand):
5 """
6 Runs a command in the appropriate environment.
7
8 run
9 { args* : The command and arguments/options to run. }
10 """
11
12 def handle(self):
13 args = self.argument("args")
14 script = args[0]
15 scripts = self.poetry.local_config.get("scripts")
16
17 if scripts and script in scripts:
18 return self.run_script(scripts[script], args)
19
20 return self.env.execute(*args)
21
22 def run_script(self, script, args):
23 if isinstance(script, dict):
24 script = script["callable"]
25
26 module, callable_ = script.split(":")
27
28 src_in_sys_path = "sys.path.append('src'); " if self._module.is_in_src() else ""
29
30 cmd = ["python", "-c"]
31
32 cmd += [
33 '"import sys; '
34 "from importlib import import_module; "
35 "sys.argv = {!r}; {}"
36 "import_module('{}').{}()\"".format(
37 args, src_in_sys_path, module, callable_
38 )
39 ]
40
41 return self.env.run(*cmd, shell=True, call=True)
42
43 @property
44 def _module(self):
45 from ...masonry.utils.module import Module
46
47 poetry = self.poetry
48 package = poetry.package
49 path = poetry.file.parent
50 module = Module(package.name, path.as_posix())
51 return module
52
53 def merge_application_definition(self, merge_args=True):
54 if self._application is None or (
55 self._application_definition_merged
56 and (self._application_definition_merged_with_args or not merge_args)
57 ):
58 return
59
60 if merge_args:
61 current_arguments = self._definition.get_arguments()
62 self._definition.set_arguments(
63 self._application.get_definition().get_arguments()
64 )
65 self._definition.add_arguments(current_arguments)
66
67 self._application_definition_merged = True
68 if merge_args:
69 self._application_definition_merged_with_args = True
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/poetry/console/commands/run.py b/poetry/console/commands/run.py
--- a/poetry/console/commands/run.py
+++ b/poetry/console/commands/run.py
@@ -47,7 +47,7 @@
poetry = self.poetry
package = poetry.package
path = poetry.file.parent
- module = Module(package.name, path.as_posix())
+ module = Module(package.name, path.as_posix(), package.packages)
return module
def merge_application_definition(self, merge_args=True):
|
{"golden_diff": "diff --git a/poetry/console/commands/run.py b/poetry/console/commands/run.py\n--- a/poetry/console/commands/run.py\n+++ b/poetry/console/commands/run.py\n@@ -47,7 +47,7 @@\n poetry = self.poetry\n package = poetry.package\n path = poetry.file.parent\n- module = Module(package.name, path.as_posix())\n+ module = Module(package.name, path.as_posix(), package.packages)\n return module\n \n def merge_application_definition(self, merge_args=True):\n", "issue": "Poetry run: ModuleOrPackageNotFound with implicit namespace packages (PEP420)\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Arch Linux 4.18.16\r\n- **Poetry version**: 0.12.5\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/Kazy/692963f6a41c64d38f38ac9a3f95619a\r\n\r\n## Issue\r\nI'm using implicit namespace packages to organize the packages at work, which works well by specifying the `packages` value in the `pyproject.toml` like that:\r\n```toml\r\npackages = [\r\n { include = \"org\" }\r\n]\r\n```\r\nThe file structure is like that:\r\n```\r\n\u251c\u2500\u2500 org\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 subpackage\r\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 __init__.py\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 command\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 cli.py\r\n\u2514\u2500\u2500 pyproject.toml\r\n```\r\n\r\nThe issue is when you try to do `poetry run my-command`, you get:\r\n```\r\n[ModuleOrPackageNotFound]\r\nNo file/folder found for package org-subpackage-command\r\n```\r\n\r\nI already found how to fix it and will follow with a PR, but I wanted to create the issue in case my fix isn't the right one, and to make organization easier on your side as well.\r\n\n", "before_files": [{"content": "from .env_command import EnvCommand\n\n\nclass RunCommand(EnvCommand):\n \"\"\"\n Runs a command in the appropriate environment.\n\n run\n { args* : The command and arguments/options to run. }\n \"\"\"\n\n def handle(self):\n args = self.argument(\"args\")\n script = args[0]\n scripts = self.poetry.local_config.get(\"scripts\")\n\n if scripts and script in scripts:\n return self.run_script(scripts[script], args)\n\n return self.env.execute(*args)\n\n def run_script(self, script, args):\n if isinstance(script, dict):\n script = script[\"callable\"]\n\n module, callable_ = script.split(\":\")\n\n src_in_sys_path = \"sys.path.append('src'); \" if self._module.is_in_src() else \"\"\n\n cmd = [\"python\", \"-c\"]\n\n cmd += [\n '\"import sys; '\n \"from importlib import import_module; \"\n \"sys.argv = {!r}; {}\"\n \"import_module('{}').{}()\\\"\".format(\n args, src_in_sys_path, module, callable_\n )\n ]\n\n return self.env.run(*cmd, shell=True, call=True)\n\n @property\n def _module(self):\n from ...masonry.utils.module import Module\n\n poetry = self.poetry\n package = poetry.package\n path = poetry.file.parent\n module = Module(package.name, path.as_posix())\n return module\n\n def merge_application_definition(self, merge_args=True):\n if self._application is None or (\n self._application_definition_merged\n and (self._application_definition_merged_with_args or not merge_args)\n ):\n return\n\n if merge_args:\n current_arguments = self._definition.get_arguments()\n self._definition.set_arguments(\n self._application.get_definition().get_arguments()\n )\n self._definition.add_arguments(current_arguments)\n\n self._application_definition_merged = True\n if merge_args:\n self._application_definition_merged_with_args = True\n", "path": "poetry/console/commands/run.py"}], "after_files": [{"content": "from .env_command import EnvCommand\n\n\nclass RunCommand(EnvCommand):\n \"\"\"\n Runs a command in the appropriate environment.\n\n run\n { args* : The command and arguments/options to run. }\n \"\"\"\n\n def handle(self):\n args = self.argument(\"args\")\n script = args[0]\n scripts = self.poetry.local_config.get(\"scripts\")\n\n if scripts and script in scripts:\n return self.run_script(scripts[script], args)\n\n return self.env.execute(*args)\n\n def run_script(self, script, args):\n if isinstance(script, dict):\n script = script[\"callable\"]\n\n module, callable_ = script.split(\":\")\n\n src_in_sys_path = \"sys.path.append('src'); \" if self._module.is_in_src() else \"\"\n\n cmd = [\"python\", \"-c\"]\n\n cmd += [\n '\"import sys; '\n \"from importlib import import_module; \"\n \"sys.argv = {!r}; {}\"\n \"import_module('{}').{}()\\\"\".format(\n args, src_in_sys_path, module, callable_\n )\n ]\n\n return self.env.run(*cmd, shell=True, call=True)\n\n @property\n def _module(self):\n from ...masonry.utils.module import Module\n\n poetry = self.poetry\n package = poetry.package\n path = poetry.file.parent\n module = Module(package.name, path.as_posix(), package.packages)\n return module\n\n def merge_application_definition(self, merge_args=True):\n if self._application is None or (\n self._application_definition_merged\n and (self._application_definition_merged_with_args or not merge_args)\n ):\n return\n\n if merge_args:\n current_arguments = self._definition.get_arguments()\n self._definition.set_arguments(\n self._application.get_definition().get_arguments()\n )\n self._definition.add_arguments(current_arguments)\n\n self._application_definition_merged = True\n if merge_args:\n self._application_definition_merged_with_args = True\n", "path": "poetry/console/commands/run.py"}]}
| 1,238 | 121 |
gh_patches_debug_25695
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-6665
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
export_csv example under 0.12.7dev11 doesn't resize table
Running example export_csv from https://github.com/bokeh/bokeh/tree/master/examples/app/export_csv
under 0.12.7dev11 the table doesn't resize and extra rows are filled with 'undefined', '$NaN'.
The number of rows is 248 and doesn't change when moving the slider.
The rows after 248 are not shown.
Under 0.12.6 everything works perfect.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/app/export_csv/main.py`
Content:
```
1 from os.path import dirname, join
2
3 import pandas as pd
4
5 from bokeh.layouts import row, widgetbox
6 from bokeh.models import ColumnDataSource, CustomJS
7 from bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter
8 from bokeh.io import curdoc
9
10 df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))
11
12 source = ColumnDataSource(data=dict())
13
14 def update():
15 current = df[df['salary'] <= slider.value].dropna()
16 source.data = {
17 'name' : current.name,
18 'salary' : current.salary,
19 'years_experience' : current.years_experience,
20 }
21
22 slider = Slider(title="Max Salary", start=10000, end=250000, value=150000, step=1000)
23 slider.on_change('value', lambda attr, old, new: update())
24
25 button = Button(label="Download", button_type="success")
26 button.callback = CustomJS(args=dict(source=source),
27 code=open(join(dirname(__file__), "download.js")).read())
28
29 columns = [
30 TableColumn(field="name", title="Employee Name"),
31 TableColumn(field="salary", title="Income", formatter=NumberFormatter(format="$0,0.00")),
32 TableColumn(field="years_experience", title="Experience (years)")
33 ]
34
35 data_table = DataTable(source=source, columns=columns, width=800)
36
37 controls = widgetbox(slider, button)
38 table = widgetbox(data_table)
39
40 curdoc().add_root(row(controls, table))
41 curdoc().title = "Export CSV"
42
43 update()
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/app/export_csv/main.py b/examples/app/export_csv/main.py
--- a/examples/app/export_csv/main.py
+++ b/examples/app/export_csv/main.py
@@ -4,7 +4,7 @@
from bokeh.layouts import row, widgetbox
from bokeh.models import ColumnDataSource, CustomJS
-from bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter
+from bokeh.models.widgets import RangeSlider, Button, DataTable, TableColumn, NumberFormatter
from bokeh.io import curdoc
df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))
@@ -12,14 +12,14 @@
source = ColumnDataSource(data=dict())
def update():
- current = df[df['salary'] <= slider.value].dropna()
+ current = df[(df['salary'] >= slider.value[0]) & (df['salary'] <= slider.value[1])].dropna()
source.data = {
'name' : current.name,
'salary' : current.salary,
'years_experience' : current.years_experience,
}
-slider = Slider(title="Max Salary", start=10000, end=250000, value=150000, step=1000)
+slider = RangeSlider(title="Max Salary", start=10000, end=110000, value=(10000, 50000), step=1000, format="0,0")
slider.on_change('value', lambda attr, old, new: update())
button = Button(label="Download", button_type="success")
|
{"golden_diff": "diff --git a/examples/app/export_csv/main.py b/examples/app/export_csv/main.py\n--- a/examples/app/export_csv/main.py\n+++ b/examples/app/export_csv/main.py\n@@ -4,7 +4,7 @@\n \n from bokeh.layouts import row, widgetbox\n from bokeh.models import ColumnDataSource, CustomJS\n-from bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter\n+from bokeh.models.widgets import RangeSlider, Button, DataTable, TableColumn, NumberFormatter\n from bokeh.io import curdoc\n \n df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))\n@@ -12,14 +12,14 @@\n source = ColumnDataSource(data=dict())\n \n def update():\n- current = df[df['salary'] <= slider.value].dropna()\n+ current = df[(df['salary'] >= slider.value[0]) & (df['salary'] <= slider.value[1])].dropna()\n source.data = {\n 'name' : current.name,\n 'salary' : current.salary,\n 'years_experience' : current.years_experience,\n }\n \n-slider = Slider(title=\"Max Salary\", start=10000, end=250000, value=150000, step=1000)\n+slider = RangeSlider(title=\"Max Salary\", start=10000, end=110000, value=(10000, 50000), step=1000, format=\"0,0\")\n slider.on_change('value', lambda attr, old, new: update())\n \n button = Button(label=\"Download\", button_type=\"success\")\n", "issue": "export_csv example under 0.12.7dev11 doesn't resize table\nRunning example export_csv from https://github.com/bokeh/bokeh/tree/master/examples/app/export_csv\r\n\r\nunder 0.12.7dev11 the table doesn't resize and extra rows are filled with 'undefined', '$NaN'.\r\nThe number of rows is 248 and doesn't change when moving the slider.\r\nThe rows after 248 are not shown.\r\n\r\nUnder 0.12.6 everything works perfect.\n", "before_files": [{"content": "from os.path import dirname, join\n\nimport pandas as pd\n\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.models import ColumnDataSource, CustomJS\nfrom bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter\nfrom bokeh.io import curdoc\n\ndf = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))\n\nsource = ColumnDataSource(data=dict())\n\ndef update():\n current = df[df['salary'] <= slider.value].dropna()\n source.data = {\n 'name' : current.name,\n 'salary' : current.salary,\n 'years_experience' : current.years_experience,\n }\n\nslider = Slider(title=\"Max Salary\", start=10000, end=250000, value=150000, step=1000)\nslider.on_change('value', lambda attr, old, new: update())\n\nbutton = Button(label=\"Download\", button_type=\"success\")\nbutton.callback = CustomJS(args=dict(source=source),\n code=open(join(dirname(__file__), \"download.js\")).read())\n\ncolumns = [\n TableColumn(field=\"name\", title=\"Employee Name\"),\n TableColumn(field=\"salary\", title=\"Income\", formatter=NumberFormatter(format=\"$0,0.00\")),\n TableColumn(field=\"years_experience\", title=\"Experience (years)\")\n]\n\ndata_table = DataTable(source=source, columns=columns, width=800)\n\ncontrols = widgetbox(slider, button)\ntable = widgetbox(data_table)\n\ncurdoc().add_root(row(controls, table))\ncurdoc().title = \"Export CSV\"\n\nupdate()\n", "path": "examples/app/export_csv/main.py"}], "after_files": [{"content": "from os.path import dirname, join\n\nimport pandas as pd\n\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.models import ColumnDataSource, CustomJS\nfrom bokeh.models.widgets import RangeSlider, Button, DataTable, TableColumn, NumberFormatter\nfrom bokeh.io import curdoc\n\ndf = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))\n\nsource = ColumnDataSource(data=dict())\n\ndef update():\n current = df[(df['salary'] >= slider.value[0]) & (df['salary'] <= slider.value[1])].dropna()\n source.data = {\n 'name' : current.name,\n 'salary' : current.salary,\n 'years_experience' : current.years_experience,\n }\n\nslider = RangeSlider(title=\"Max Salary\", start=10000, end=110000, value=(10000, 50000), step=1000, format=\"0,0\")\nslider.on_change('value', lambda attr, old, new: update())\n\nbutton = Button(label=\"Download\", button_type=\"success\")\nbutton.callback = CustomJS(args=dict(source=source),\n code=open(join(dirname(__file__), \"download.js\")).read())\n\ncolumns = [\n TableColumn(field=\"name\", title=\"Employee Name\"),\n TableColumn(field=\"salary\", title=\"Income\", formatter=NumberFormatter(format=\"$0,0.00\")),\n TableColumn(field=\"years_experience\", title=\"Experience (years)\")\n]\n\ndata_table = DataTable(source=source, columns=columns, width=800)\n\ncontrols = widgetbox(slider, button)\ntable = widgetbox(data_table)\n\ncurdoc().add_root(row(controls, table))\ncurdoc().title = \"Export CSV\"\n\nupdate()\n", "path": "examples/app/export_csv/main.py"}]}
| 797 | 361 |
gh_patches_debug_21709
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-706
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TensorBoardLogger and ModelCheckpoint are not using the same folder by default
## 🐛 Bug
(master branch)
By default, the TensorBoardLogger writes logs into `lightning_logs/0` but ModelCheckpoint writes checkpoint into `lightning_logs/version_0`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/logging/tensorboard.py`
Content:
```
1 import os
2 from warnings import warn
3 from argparse import Namespace
4 from pkg_resources import parse_version
5
6 import torch
7 import pandas as pd
8 from torch.utils.tensorboard import SummaryWriter
9
10 from .base import LightningLoggerBase, rank_zero_only
11
12
13 class TensorBoardLogger(LightningLoggerBase):
14 r"""
15
16 Log to local file system in TensorBoard format
17
18 Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to
19 `os.path.join(save_dir, name, version)`
20
21 Example
22 --------
23
24 .. code-block:: python
25
26 logger = TensorBoardLogger("tb_logs", name="my_model")
27 trainer = Trainer(logger=logger)
28 trainer.train(model)
29
30 Args:
31 save_dir (str): Save directory
32 name (str): Experiment name. Defaults to "default".
33 version (int): Experiment version. If version is not specified the logger inspects the save
34 directory for existing versions, then automatically assigns the next available version.
35 \**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor.
36
37 """
38 NAME_CSV_TAGS = 'meta_tags.csv'
39
40 def __init__(self, save_dir, name="default", version=None, **kwargs):
41 super().__init__()
42 self.save_dir = save_dir
43 self._name = name
44 self._version = version
45
46 self._experiment = None
47 self.tags = {}
48 self.kwargs = kwargs
49
50 @property
51 def experiment(self):
52 r"""
53
54 Actual tensorboard object. To use tensorboard features do the following.
55
56 Example::
57
58 self.logger.experiment.some_tensorboard_function()
59
60 """
61 if self._experiment is not None:
62 return self._experiment
63
64 root_dir = os.path.join(self.save_dir, self.name)
65 os.makedirs(root_dir, exist_ok=True)
66 log_dir = os.path.join(root_dir, str(self.version))
67 self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)
68 return self._experiment
69
70 @rank_zero_only
71 def log_hyperparams(self, params):
72 if params is None:
73 return
74
75 # in case converting from namespace
76 if isinstance(params, Namespace):
77 params = vars(params)
78 params = dict(params)
79
80 if parse_version(torch.__version__) < parse_version("1.3.0"):
81 warn(
82 f"Hyperparameter logging is not available for Torch version {torch.__version__}."
83 " Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable"
84 " hyperparameter logging."
85 )
86 else:
87 # `add_hparams` requires both - hparams and metric
88 self.experiment.add_hparams(hparam_dict=params, metric_dict={})
89 # some alternative should be added
90 self.tags.update(params)
91
92 @rank_zero_only
93 def log_metrics(self, metrics, step=None):
94 for k, v in metrics.items():
95 if isinstance(v, torch.Tensor):
96 v = v.item()
97 self.experiment.add_scalar(k, v, step)
98
99 @rank_zero_only
100 def save(self):
101 try:
102 self.experiment.flush()
103 except AttributeError:
104 # you are using PT version (<v1.2) which does not have implemented flush
105 self.experiment._get_file_writer().flush()
106
107 # create a preudo standard path ala test-tube
108 dir_path = os.path.join(self.save_dir, self.name, 'version_%s' % self.version)
109 if not os.path.isdir(dir_path):
110 dir_path = self.save_dir
111 # prepare the file path
112 meta_tags_path = os.path.join(dir_path, self.NAME_CSV_TAGS)
113 # save the metatags file
114 df = pd.DataFrame({'key': list(self.tags.keys()),
115 'value': list(self.tags.values())})
116 df.to_csv(meta_tags_path, index=False)
117
118 @rank_zero_only
119 def finalize(self, status):
120 self.save()
121
122 @property
123 def name(self):
124 return self._name
125
126 @property
127 def version(self):
128 if self._version is None:
129 self._version = self._get_next_version()
130 return self._version
131
132 def _get_next_version(self):
133 root_dir = os.path.join(self.save_dir, self.name)
134 existing_versions = [
135 int(d) for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d)) and d.isdigit()
136 ]
137 if len(existing_versions) == 0:
138 return 0
139 else:
140 return max(existing_versions) + 1
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_lightning/logging/tensorboard.py b/pytorch_lightning/logging/tensorboard.py
--- a/pytorch_lightning/logging/tensorboard.py
+++ b/pytorch_lightning/logging/tensorboard.py
@@ -63,7 +63,7 @@
root_dir = os.path.join(self.save_dir, self.name)
os.makedirs(root_dir, exist_ok=True)
- log_dir = os.path.join(root_dir, str(self.version))
+ log_dir = os.path.join(root_dir, "version_" + str(self.version))
self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)
return self._experiment
@@ -131,9 +131,11 @@
def _get_next_version(self):
root_dir = os.path.join(self.save_dir, self.name)
- existing_versions = [
- int(d) for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d)) and d.isdigit()
- ]
+ existing_versions = []
+ for d in os.listdir(root_dir):
+ if os.path.isdir(os.path.join(root_dir, d)) and d.startswith("version_"):
+ existing_versions.append(int(d.split("_")[1]))
+
if len(existing_versions) == 0:
return 0
else:
|
{"golden_diff": "diff --git a/pytorch_lightning/logging/tensorboard.py b/pytorch_lightning/logging/tensorboard.py\n--- a/pytorch_lightning/logging/tensorboard.py\n+++ b/pytorch_lightning/logging/tensorboard.py\n@@ -63,7 +63,7 @@\n \n root_dir = os.path.join(self.save_dir, self.name)\n os.makedirs(root_dir, exist_ok=True)\n- log_dir = os.path.join(root_dir, str(self.version))\n+ log_dir = os.path.join(root_dir, \"version_\" + str(self.version))\n self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)\n return self._experiment\n \n@@ -131,9 +131,11 @@\n \n def _get_next_version(self):\n root_dir = os.path.join(self.save_dir, self.name)\n- existing_versions = [\n- int(d) for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d)) and d.isdigit()\n- ]\n+ existing_versions = []\n+ for d in os.listdir(root_dir):\n+ if os.path.isdir(os.path.join(root_dir, d)) and d.startswith(\"version_\"):\n+ existing_versions.append(int(d.split(\"_\")[1]))\n+\n if len(existing_versions) == 0:\n return 0\n else:\n", "issue": "TensorBoardLogger and ModelCheckpoint are not using the same folder by default\n## \ud83d\udc1b Bug\r\n(master branch)\r\nBy default, the TensorBoardLogger writes logs into `lightning_logs/0` but ModelCheckpoint writes checkpoint into `lightning_logs/version_0`.\n", "before_files": [{"content": "import os\nfrom warnings import warn\nfrom argparse import Namespace\nfrom pkg_resources import parse_version\n\nimport torch\nimport pandas as pd\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .base import LightningLoggerBase, rank_zero_only\n\n\nclass TensorBoardLogger(LightningLoggerBase):\n r\"\"\"\n\n Log to local file system in TensorBoard format\n\n Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to\n `os.path.join(save_dir, name, version)`\n\n Example\n --------\n\n .. code-block:: python\n\n logger = TensorBoardLogger(\"tb_logs\", name=\"my_model\")\n trainer = Trainer(logger=logger)\n trainer.train(model)\n\n Args:\n save_dir (str): Save directory\n name (str): Experiment name. Defaults to \"default\".\n version (int): Experiment version. If version is not specified the logger inspects the save\n directory for existing versions, then automatically assigns the next available version.\n \\**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor.\n\n \"\"\"\n NAME_CSV_TAGS = 'meta_tags.csv'\n\n def __init__(self, save_dir, name=\"default\", version=None, **kwargs):\n super().__init__()\n self.save_dir = save_dir\n self._name = name\n self._version = version\n\n self._experiment = None\n self.tags = {}\n self.kwargs = kwargs\n\n @property\n def experiment(self):\n r\"\"\"\n\n Actual tensorboard object. To use tensorboard features do the following.\n\n Example::\n\n self.logger.experiment.some_tensorboard_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n root_dir = os.path.join(self.save_dir, self.name)\n os.makedirs(root_dir, exist_ok=True)\n log_dir = os.path.join(root_dir, str(self.version))\n self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n if params is None:\n return\n\n # in case converting from namespace\n if isinstance(params, Namespace):\n params = vars(params)\n params = dict(params)\n\n if parse_version(torch.__version__) < parse_version(\"1.3.0\"):\n warn(\n f\"Hyperparameter logging is not available for Torch version {torch.__version__}.\"\n \" Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable\"\n \" hyperparameter logging.\"\n )\n else:\n # `add_hparams` requires both - hparams and metric\n self.experiment.add_hparams(hparam_dict=params, metric_dict={})\n # some alternative should be added\n self.tags.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n for k, v in metrics.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n self.experiment.add_scalar(k, v, step)\n\n @rank_zero_only\n def save(self):\n try:\n self.experiment.flush()\n except AttributeError:\n # you are using PT version (<v1.2) which does not have implemented flush\n self.experiment._get_file_writer().flush()\n\n # create a preudo standard path ala test-tube\n dir_path = os.path.join(self.save_dir, self.name, 'version_%s' % self.version)\n if not os.path.isdir(dir_path):\n dir_path = self.save_dir\n # prepare the file path\n meta_tags_path = os.path.join(dir_path, self.NAME_CSV_TAGS)\n # save the metatags file\n df = pd.DataFrame({'key': list(self.tags.keys()),\n 'value': list(self.tags.values())})\n df.to_csv(meta_tags_path, index=False)\n\n @rank_zero_only\n def finalize(self, status):\n self.save()\n\n @property\n def name(self):\n return self._name\n\n @property\n def version(self):\n if self._version is None:\n self._version = self._get_next_version()\n return self._version\n\n def _get_next_version(self):\n root_dir = os.path.join(self.save_dir, self.name)\n existing_versions = [\n int(d) for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d)) and d.isdigit()\n ]\n if len(existing_versions) == 0:\n return 0\n else:\n return max(existing_versions) + 1\n", "path": "pytorch_lightning/logging/tensorboard.py"}], "after_files": [{"content": "import os\nfrom warnings import warn\nfrom argparse import Namespace\nfrom pkg_resources import parse_version\n\nimport torch\nimport pandas as pd\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom .base import LightningLoggerBase, rank_zero_only\n\n\nclass TensorBoardLogger(LightningLoggerBase):\n r\"\"\"\n\n Log to local file system in TensorBoard format\n\n Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to\n `os.path.join(save_dir, name, version)`\n\n Example\n --------\n\n .. code-block:: python\n\n logger = TensorBoardLogger(\"tb_logs\", name=\"my_model\")\n trainer = Trainer(logger=logger)\n trainer.train(model)\n\n Args:\n save_dir (str): Save directory\n name (str): Experiment name. Defaults to \"default\".\n version (int): Experiment version. If version is not specified the logger inspects the save\n directory for existing versions, then automatically assigns the next available version.\n \\**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor.\n\n \"\"\"\n NAME_CSV_TAGS = 'meta_tags.csv'\n\n def __init__(self, save_dir, name=\"default\", version=None, **kwargs):\n super().__init__()\n self.save_dir = save_dir\n self._name = name\n self._version = version\n\n self._experiment = None\n self.tags = {}\n self.kwargs = kwargs\n\n @property\n def experiment(self):\n r\"\"\"\n\n Actual tensorboard object. To use tensorboard features do the following.\n\n Example::\n\n self.logger.experiment.some_tensorboard_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n root_dir = os.path.join(self.save_dir, self.name)\n os.makedirs(root_dir, exist_ok=True)\n log_dir = os.path.join(root_dir, \"version_\" + str(self.version))\n self._experiment = SummaryWriter(log_dir=log_dir, **self.kwargs)\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params):\n if params is None:\n return\n\n # in case converting from namespace\n if isinstance(params, Namespace):\n params = vars(params)\n params = dict(params)\n\n if parse_version(torch.__version__) < parse_version(\"1.3.0\"):\n warn(\n f\"Hyperparameter logging is not available for Torch version {torch.__version__}.\"\n \" Skipping log_hyperparams. Upgrade to Torch 1.3.0 or above to enable\"\n \" hyperparameter logging.\"\n )\n else:\n # `add_hparams` requires both - hparams and metric\n self.experiment.add_hparams(hparam_dict=params, metric_dict={})\n # some alternative should be added\n self.tags.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n for k, v in metrics.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n self.experiment.add_scalar(k, v, step)\n\n @rank_zero_only\n def save(self):\n try:\n self.experiment.flush()\n except AttributeError:\n # you are using PT version (<v1.2) which does not have implemented flush\n self.experiment._get_file_writer().flush()\n\n # create a preudo standard path ala test-tube\n dir_path = os.path.join(self.save_dir, self.name, 'version_%s' % self.version)\n if not os.path.isdir(dir_path):\n dir_path = self.save_dir\n # prepare the file path\n meta_tags_path = os.path.join(dir_path, self.NAME_CSV_TAGS)\n # save the metatags file\n df = pd.DataFrame({'key': list(self.tags.keys()),\n 'value': list(self.tags.values())})\n df.to_csv(meta_tags_path, index=False)\n\n @rank_zero_only\n def finalize(self, status):\n self.save()\n\n @property\n def name(self):\n return self._name\n\n @property\n def version(self):\n if self._version is None:\n self._version = self._get_next_version()\n return self._version\n\n def _get_next_version(self):\n root_dir = os.path.join(self.save_dir, self.name)\n existing_versions = []\n for d in os.listdir(root_dir):\n if os.path.isdir(os.path.join(root_dir, d)) and d.startswith(\"version_\"):\n existing_versions.append(int(d.split(\"_\")[1]))\n\n if len(existing_versions) == 0:\n return 0\n else:\n return max(existing_versions) + 1\n", "path": "pytorch_lightning/logging/tensorboard.py"}]}
| 1,627 | 285 |
gh_patches_debug_17184
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-33
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not print page title above each page
Having a h1 HOME on your home page looks stupid. Either remove this completely and show currently selected page in menu, or put page title at top of content by default, so it is there by default but can be edited away if needed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/comicsite/views.py`
Content:
```
1 '''
2 Created on Jun 18, 2012
3
4 Testing views. Each of these views is referenced in urls.py
5
6 @author: Sjoerd
7 '''
8
9 from django.http import HttpResponse
10 from django.http import Http404
11 from django.shortcuts import render_to_response
12 from django.template import RequestContext
13
14 from comicsite.models import ComicSite,Page,ComicSiteException
15 from dataproviders import FileSystemDataProvider
16
17
18 def index(request):
19 return HttpResponse("ComicSite index page.",context_instance=RequestContext(request))
20
21
22 def site(request, site_short_name):
23 """ show a single COMIC site, default start page """
24 #TODO: Is it bad to use site name here, which is not the specified key?
25
26 site = getSite(site_short_name)
27
28 pages = getPages(site_short_name)
29
30 return render_to_response('page.html', {'site': site, 'page': pages[0], "pages":pages },context_instance=RequestContext(request))
31
32
33 def page(request, site_short_name, page_title):
34 """ show a single page on a site """
35
36 try:
37 p = Page.objects.get(ComicSite__short_name=site_short_name, title=page_title)
38 except Page.DoesNotExist:
39 raise Http404
40 pages = getPages(site_short_name)
41
42 return render_to_response('page.html', {'site': p.ComicSite, 'page': p, "pages":pages },context_instance=RequestContext(request))
43
44
45
46
47 def dataPage(request):
48 """ test function for data provider. Just get some files from provider and show them as list"""
49 #= r"D:\userdata\Sjoerd\Aptana Studio 3 Workspace\comic-django\django\static\files"
50
51 path = r"D:\userdata\Sjoerd\Aptana Studio 3 Workspace\comic-django\django\static\files"
52 dp = FileSystemDataProvider.FileSystemDataProvider(path)
53 images = dp.getImages()
54
55 htmlOut = "available files:"+", ".join(images)
56 p = createTestPage(html=htmlOut)
57 pages = [p]
58
59 return render_to_response('page.html', {'site': p.ComicSite, 'page': p, "pages":pages },context_instance=RequestContext(request))
60
61 # ======================================== not called directly from urls.py =========================================
62
63 def getSite(site_short_name):
64 try:
65 site = ComicSite.objects.get(short_name=site_short_name)
66 except ComicSite.DoesNotExist:
67 raise Http404
68 return site
69
70
71 def getPages(site_short_name):
72 """ get all pages of the given site from db"""
73 try:
74 pages = Page.objects.filter(ComicSite__short_name=site_short_name)
75 except Page.DoesNotExist:
76 raise Http404
77 return pages
78
79 # trying to follow pep 0008 here, finally.
80 def site_exists(site_short_name):
81 try:
82 site = ComicSite.objects.get(short_name=site_short_name)
83 return True
84 except ComicSite.DoesNotExist:
85 return False
86
87
88 # ====================================================== debug and test ==================================================
89 def createTestPage(title="testPage",html=""):
90 """ Create a quick mockup on the ComicSite 'Test'"""
91
92 if site_exists("test"):
93 #TODO log a warning here, no exception.
94 raise ComicSiteException("I am creating a spoof ComicSite called 'test' on the fly, by a project called 'test' was already defined in DB. This message should be a warning instead of an exception")
95
96 # if no site exists by that name, create it on the fly.
97 site = ComicSite()
98 site.short_name = "test"
99 site.name = "Test Page"
100 site.skin = ""
101
102 return Page(ComicSite=site,title=title,html=html)
103
104
105 def givePageHTML(page):
106 return "<h1>%s</h1> <p>%s</p>" %(page.title ,page.html)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/comicsite/views.py b/django/comicsite/views.py
--- a/django/comicsite/views.py
+++ b/django/comicsite/views.py
@@ -27,7 +27,7 @@
pages = getPages(site_short_name)
- return render_to_response('page.html', {'site': site, 'page': pages[0], "pages":pages },context_instance=RequestContext(request))
+ return render_to_response('page.html', {'site': site, 'currentpage': pages[0], "pages":pages },context_instance=RequestContext(request))
def page(request, site_short_name, page_title):
@@ -39,7 +39,7 @@
raise Http404
pages = getPages(site_short_name)
- return render_to_response('page.html', {'site': p.ComicSite, 'page': p, "pages":pages },context_instance=RequestContext(request))
+ return render_to_response('page.html', {'site': p.ComicSite, 'currentpage': p, "pages":pages },context_instance=RequestContext(request))
|
{"golden_diff": "diff --git a/django/comicsite/views.py b/django/comicsite/views.py\n--- a/django/comicsite/views.py\n+++ b/django/comicsite/views.py\n@@ -27,7 +27,7 @@\n \n pages = getPages(site_short_name)\n \n- return render_to_response('page.html', {'site': site, 'page': pages[0], \"pages\":pages },context_instance=RequestContext(request))\n+ return render_to_response('page.html', {'site': site, 'currentpage': pages[0], \"pages\":pages },context_instance=RequestContext(request))\n \n \n def page(request, site_short_name, page_title):\n@@ -39,7 +39,7 @@\n raise Http404\n pages = getPages(site_short_name)\n \n- return render_to_response('page.html', {'site': p.ComicSite, 'page': p, \"pages\":pages },context_instance=RequestContext(request))\n+ return render_to_response('page.html', {'site': p.ComicSite, 'currentpage': p, \"pages\":pages },context_instance=RequestContext(request))\n", "issue": "Do not print page title above each page\nHaving a h1 HOME on your home page looks stupid. Either remove this completely and show currently selected page in menu, or put page title at top of content by default, so it is there by default but can be edited away if needed \n\n", "before_files": [{"content": "'''\nCreated on Jun 18, 2012\n\nTesting views. Each of these views is referenced in urls.py \n\n@author: Sjoerd\n'''\n\nfrom django.http import HttpResponse\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom comicsite.models import ComicSite,Page,ComicSiteException\nfrom dataproviders import FileSystemDataProvider\n\n\ndef index(request):\n return HttpResponse(\"ComicSite index page.\",context_instance=RequestContext(request))\n\n\ndef site(request, site_short_name):\n \"\"\" show a single COMIC site, default start page \"\"\"\n #TODO: Is it bad to use site name here, which is not the specified key?\n \n site = getSite(site_short_name)\n \n pages = getPages(site_short_name)\n \n return render_to_response('page.html', {'site': site, 'page': pages[0], \"pages\":pages },context_instance=RequestContext(request))\n \n\ndef page(request, site_short_name, page_title):\n \"\"\" show a single page on a site \"\"\"\n \n try:\n p = Page.objects.get(ComicSite__short_name=site_short_name, title=page_title)\n except Page.DoesNotExist: \n raise Http404\n pages = getPages(site_short_name)\n \n return render_to_response('page.html', {'site': p.ComicSite, 'page': p, \"pages\":pages },context_instance=RequestContext(request))\n \n \n \n\ndef dataPage(request):\n \"\"\" test function for data provider. Just get some files from provider and show them as list\"\"\"\n #= r\"D:\\userdata\\Sjoerd\\Aptana Studio 3 Workspace\\comic-django\\django\\static\\files\"\n \n path = r\"D:\\userdata\\Sjoerd\\Aptana Studio 3 Workspace\\comic-django\\django\\static\\files\"\n dp = FileSystemDataProvider.FileSystemDataProvider(path)\n images = dp.getImages()\n \n htmlOut = \"available files:\"+\", \".join(images)\n p = createTestPage(html=htmlOut)\n pages = [p]\n \n return render_to_response('page.html', {'site': p.ComicSite, 'page': p, \"pages\":pages },context_instance=RequestContext(request))\n\n# ======================================== not called directly from urls.py =========================================\n\ndef getSite(site_short_name):\n try:\n site = ComicSite.objects.get(short_name=site_short_name)\n except ComicSite.DoesNotExist: \n raise Http404 \n return site \n \n \ndef getPages(site_short_name):\n \"\"\" get all pages of the given site from db\"\"\"\n try:\n pages = Page.objects.filter(ComicSite__short_name=site_short_name)\n except Page.DoesNotExist: \n raise Http404\n return pages\n\n# trying to follow pep 0008 here, finally.\ndef site_exists(site_short_name):\n try:\n site = ComicSite.objects.get(short_name=site_short_name)\n return True\n except ComicSite.DoesNotExist: \n return False\n \n \n# ====================================================== debug and test ==================================================\ndef createTestPage(title=\"testPage\",html=\"\"):\n \"\"\" Create a quick mockup on the ComicSite 'Test'\"\"\"\n \n if site_exists(\"test\"):\n #TODO log a warning here, no exception.\n raise ComicSiteException(\"I am creating a spoof ComicSite called 'test' on the fly, by a project called 'test' was already defined in DB. This message should be a warning instead of an exception\") \n \n # if no site exists by that name, create it on the fly.\n site = ComicSite()\n site.short_name = \"test\"\n site.name = \"Test Page\"\n site.skin = \"\"\n \n return Page(ComicSite=site,title=title,html=html)\n \n\ndef givePageHTML(page):\n return \"<h1>%s</h1> <p>%s</p>\" %(page.title ,page.html)", "path": "django/comicsite/views.py"}], "after_files": [{"content": "'''\nCreated on Jun 18, 2012\n\nTesting views. Each of these views is referenced in urls.py \n\n@author: Sjoerd\n'''\n\nfrom django.http import HttpResponse\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom comicsite.models import ComicSite,Page,ComicSiteException\nfrom dataproviders import FileSystemDataProvider\n\n\ndef index(request):\n return HttpResponse(\"ComicSite index page.\",context_instance=RequestContext(request))\n\n\ndef site(request, site_short_name):\n \"\"\" show a single COMIC site, default start page \"\"\"\n #TODO: Is it bad to use site name here, which is not the specified key?\n \n site = getSite(site_short_name)\n \n pages = getPages(site_short_name)\n \n return render_to_response('page.html', {'site': site, 'currentpage': pages[0], \"pages\":pages },context_instance=RequestContext(request))\n \n\ndef page(request, site_short_name, page_title):\n \"\"\" show a single page on a site \"\"\"\n \n try:\n p = Page.objects.get(ComicSite__short_name=site_short_name, title=page_title)\n except Page.DoesNotExist: \n raise Http404\n pages = getPages(site_short_name)\n \n return render_to_response('page.html', {'site': p.ComicSite, 'currentpage': p, \"pages\":pages },context_instance=RequestContext(request))\n \n \n \n\ndef dataPage(request):\n \"\"\" test function for data provider. Just get some files from provider and show them as list\"\"\"\n #= r\"D:\\userdata\\Sjoerd\\Aptana Studio 3 Workspace\\comic-django\\django\\static\\files\"\n \n path = r\"D:\\userdata\\Sjoerd\\Aptana Studio 3 Workspace\\comic-django\\django\\static\\files\"\n dp = FileSystemDataProvider.FileSystemDataProvider(path)\n images = dp.getImages()\n \n htmlOut = \"available files:\"+\", \".join(images)\n p = createTestPage(html=htmlOut)\n pages = [p]\n \n return render_to_response('page.html', {'site': p.ComicSite, 'page': p, \"pages\":pages },context_instance=RequestContext(request))\n\n# ======================================== not called directly from urls.py =========================================\n\ndef getSite(site_short_name):\n try:\n site = ComicSite.objects.get(short_name=site_short_name)\n except ComicSite.DoesNotExist: \n raise Http404 \n return site \n \n \ndef getPages(site_short_name):\n \"\"\" get all pages of the given site from db\"\"\"\n try:\n pages = Page.objects.filter(ComicSite__short_name=site_short_name)\n except Page.DoesNotExist: \n raise Http404\n return pages\n\n# trying to follow pep 0008 here, finally.\ndef site_exists(site_short_name):\n try:\n site = ComicSite.objects.get(short_name=site_short_name)\n return True\n except ComicSite.DoesNotExist: \n return False\n \n \n# ====================================================== debug and test ==================================================\ndef createTestPage(title=\"testPage\",html=\"\"):\n \"\"\" Create a quick mockup on the ComicSite 'Test'\"\"\"\n \n if site_exists(\"test\"):\n #TODO log a warning here, no exception.\n raise ComicSiteException(\"I am creating a spoof ComicSite called 'test' on the fly, by a project called 'test' was already defined in DB. This message should be a warning instead of an exception\") \n \n # if no site exists by that name, create it on the fly.\n site = ComicSite()\n site.short_name = \"test\"\n site.name = \"Test Page\"\n site.skin = \"\"\n \n return Page(ComicSite=site,title=title,html=html)\n \n\ndef givePageHTML(page):\n return \"<h1>%s</h1> <p>%s</p>\" %(page.title ,page.html)", "path": "django/comicsite/views.py"}]}
| 1,377 | 245 |
gh_patches_debug_19590
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-1913
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change order of the blog posts
Currently all blog posts that are published on grand-challenge are sorted based on the date the post was initially created. We would like to change this to the date the post was published such that the most recent post are shown on top. I had contact with @jmsmkn on slack after which he suggested a few changes I could make in the code to change this. After discussing this with Kiran we thought it might be best to first create an issue here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/blogs/models.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.auth import get_user_model
3 from django.db import models
4 from django_extensions.db.fields import AutoSlugField
5 from simple_history.models import HistoricalRecords
6 from stdimage import JPEGField
7
8 from grandchallenge.core.storage import get_logo_path, public_s3_storage
9 from grandchallenge.subdomains.utils import reverse
10
11
12 class Tag(models.Model):
13 name = models.CharField(max_length=200, unique=True)
14 slug = AutoSlugField(populate_from="name", max_length=200)
15
16 def __str__(self):
17 return self.name
18
19
20 class Post(models.Model):
21 created = models.DateTimeField(auto_now_add=True)
22 modified = models.DateTimeField(auto_now=True)
23
24 title = models.CharField(max_length=1024)
25 slug = AutoSlugField(populate_from="title", max_length=1024)
26 description = models.TextField()
27 content = models.TextField()
28
29 authors = models.ManyToManyField(
30 to=get_user_model(), related_name="blog_authors"
31 )
32
33 logo = JPEGField(
34 upload_to=get_logo_path,
35 storage=public_s3_storage,
36 variations=settings.STDIMAGE_SOCIAL_VARIATIONS,
37 )
38
39 tags = models.ManyToManyField(to=Tag, blank=True, related_name="posts")
40
41 published = models.BooleanField(default=False)
42
43 history = HistoricalRecords()
44
45 class Meta:
46 ordering = ("-created",)
47
48 def __str__(self):
49 return self.title
50
51 def get_absolute_url(self):
52 return reverse("blogs:detail", kwargs={"slug": self.slug})
53
54 @property
55 def public(self):
56 return self.published
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/grandchallenge/blogs/models.py b/app/grandchallenge/blogs/models.py
--- a/app/grandchallenge/blogs/models.py
+++ b/app/grandchallenge/blogs/models.py
@@ -1,6 +1,7 @@
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
+from django.utils import timezone
from django_extensions.db.fields import AutoSlugField
from simple_history.models import HistoricalRecords
from stdimage import JPEGField
@@ -48,6 +49,16 @@
def __str__(self):
return self.title
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._published_orig = self.published
+
+ def save(self, *args, **kwargs):
+ if self._published_orig is False and self.published is True:
+ self.created = timezone.now()
+
+ super().save(*args, **kwargs)
+
def get_absolute_url(self):
return reverse("blogs:detail", kwargs={"slug": self.slug})
|
{"golden_diff": "diff --git a/app/grandchallenge/blogs/models.py b/app/grandchallenge/blogs/models.py\n--- a/app/grandchallenge/blogs/models.py\n+++ b/app/grandchallenge/blogs/models.py\n@@ -1,6 +1,7 @@\n from django.conf import settings\n from django.contrib.auth import get_user_model\n from django.db import models\n+from django.utils import timezone\n from django_extensions.db.fields import AutoSlugField\n from simple_history.models import HistoricalRecords\n from stdimage import JPEGField\n@@ -48,6 +49,16 @@\n def __str__(self):\n return self.title\n \n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self._published_orig = self.published\n+\n+ def save(self, *args, **kwargs):\n+ if self._published_orig is False and self.published is True:\n+ self.created = timezone.now()\n+\n+ super().save(*args, **kwargs)\n+\n def get_absolute_url(self):\n return reverse(\"blogs:detail\", kwargs={\"slug\": self.slug})\n", "issue": "Change order of the blog posts\nCurrently all blog posts that are published on grand-challenge are sorted based on the date the post was initially created. We would like to change this to the date the post was published such that the most recent post are shown on top. I had contact with @jmsmkn on slack after which he suggested a few changes I could make in the code to change this. After discussing this with Kiran we thought it might be best to first create an issue here. \r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django_extensions.db.fields import AutoSlugField\nfrom simple_history.models import HistoricalRecords\nfrom stdimage import JPEGField\n\nfrom grandchallenge.core.storage import get_logo_path, public_s3_storage\nfrom grandchallenge.subdomains.utils import reverse\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=200, unique=True)\n slug = AutoSlugField(populate_from=\"name\", max_length=200)\n\n def __str__(self):\n return self.name\n\n\nclass Post(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n title = models.CharField(max_length=1024)\n slug = AutoSlugField(populate_from=\"title\", max_length=1024)\n description = models.TextField()\n content = models.TextField()\n\n authors = models.ManyToManyField(\n to=get_user_model(), related_name=\"blog_authors\"\n )\n\n logo = JPEGField(\n upload_to=get_logo_path,\n storage=public_s3_storage,\n variations=settings.STDIMAGE_SOCIAL_VARIATIONS,\n )\n\n tags = models.ManyToManyField(to=Tag, blank=True, related_name=\"posts\")\n\n published = models.BooleanField(default=False)\n\n history = HistoricalRecords()\n\n class Meta:\n ordering = (\"-created\",)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse(\"blogs:detail\", kwargs={\"slug\": self.slug})\n\n @property\n def public(self):\n return self.published\n", "path": "app/grandchallenge/blogs/models.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils import timezone\nfrom django_extensions.db.fields import AutoSlugField\nfrom simple_history.models import HistoricalRecords\nfrom stdimage import JPEGField\n\nfrom grandchallenge.core.storage import get_logo_path, public_s3_storage\nfrom grandchallenge.subdomains.utils import reverse\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=200, unique=True)\n slug = AutoSlugField(populate_from=\"name\", max_length=200)\n\n def __str__(self):\n return self.name\n\n\nclass Post(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n\n title = models.CharField(max_length=1024)\n slug = AutoSlugField(populate_from=\"title\", max_length=1024)\n description = models.TextField()\n content = models.TextField()\n\n authors = models.ManyToManyField(\n to=get_user_model(), related_name=\"blog_authors\"\n )\n\n logo = JPEGField(\n upload_to=get_logo_path,\n storage=public_s3_storage,\n variations=settings.STDIMAGE_SOCIAL_VARIATIONS,\n )\n\n tags = models.ManyToManyField(to=Tag, blank=True, related_name=\"posts\")\n\n published = models.BooleanField(default=False)\n\n history = HistoricalRecords()\n\n class Meta:\n ordering = (\"-created\",)\n\n def __str__(self):\n return self.title\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._published_orig = self.published\n\n def save(self, *args, **kwargs):\n if self._published_orig is False and self.published is True:\n self.created = timezone.now()\n\n super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse(\"blogs:detail\", kwargs={\"slug\": self.slug})\n\n @property\n def public(self):\n return self.published\n", "path": "app/grandchallenge/blogs/models.py"}]}
| 823 | 238 |
gh_patches_debug_16154
|
rasdani/github-patches
|
git_diff
|
bornhack__bornhack-website-378
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
backoffice: show quantity of products ordered after scanning qr code
backoffice: show quantity of products ordered after scanning qr code
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/tickets/models.py`
Content:
```
1 import io
2 import hashlib
3 import base64
4 import qrcode
5 from django.conf import settings
6 from django.urls import reverse_lazy
7 from django.utils.translation import ugettext_lazy as _
8
9 from shop.models import OrderProductRelation
10 from utils.models import UUIDModel, CampRelatedModel
11 from utils.pdf import generate_pdf_letter
12 from django.db import models
13 import logging
14
15 logger = logging.getLogger("bornhack.%s" % __name__)
16
17
18 # TicketType can be full week, one day. etc.
19 class TicketType(CampRelatedModel, UUIDModel):
20 name = models.TextField()
21 camp = models.ForeignKey("camps.Camp", on_delete=models.PROTECT)
22 includes_badge = models.BooleanField(default=False)
23 single_ticket_per_product = models.BooleanField(
24 default=False,
25 help_text=(
26 "Only create one ticket for a product/order pair no matter the quantity. "
27 "Useful for products which are bought in larger quantity (ie. village chairs)"
28 ),
29 )
30
31 def __str__(self):
32 return "{} ({})".format(self.name, self.camp.title)
33
34
35 def create_ticket_token(string):
36 return hashlib.sha256(string).hexdigest()
37
38
39 def qr_code_base64(token):
40 qr = qrcode.make(
41 token, version=1, error_correction=qrcode.constants.ERROR_CORRECT_H
42 ).resize((250, 250))
43 file_like = io.BytesIO()
44 qr.save(file_like, format="png")
45 qrcode_base64 = base64.b64encode(file_like.getvalue())
46 return qrcode_base64
47
48
49 class BaseTicket(CampRelatedModel, UUIDModel):
50 ticket_type = models.ForeignKey("TicketType", on_delete=models.PROTECT)
51 used = models.BooleanField(default=False)
52 badge_handed_out = models.BooleanField(default=False)
53 token = models.CharField(max_length=64, blank=True)
54 badge_token = models.CharField(max_length=64, blank=True)
55
56 class Meta:
57 abstract = True
58
59 @property
60 def camp(self):
61 return self.ticket_type.camp
62
63 def save(self, **kwargs):
64 self.token = self._get_token()
65 self.badge_token = self._get_badge_token()
66 super().save(**kwargs)
67
68 def _get_token(self):
69 return create_ticket_token(
70 "{_id}{secret_key}".format(
71 _id=self.uuid, secret_key=settings.SECRET_KEY
72 ).encode("utf-8")
73 )
74
75 def _get_badge_token(self):
76 return create_ticket_token(
77 "{_id}{secret_key}-badge".format(
78 _id=self.uuid, secret_key=settings.SECRET_KEY
79 ).encode("utf-8")
80 )
81
82 def get_qr_code_url(self):
83 return "data:image/png;base64,{}".format(
84 qr_code_base64(self._get_token()).decode("utf-8")
85 )
86
87 def get_qr_badge_code_url(self):
88 return "data:image/png;base64,{}".format(
89 qr_code_base64(self._get_badge_token()).decode("utf-8")
90 )
91
92 def generate_pdf(self):
93 formatdict = {"ticket": self}
94
95 if self.ticket_type.single_ticket_per_product and self.shortname == "shop":
96 orp = self.get_orp()
97 formatdict["quantity"] = orp.quantity
98
99 return generate_pdf_letter(
100 filename="{}_ticket_{}.pdf".format(self.shortname, self.pk),
101 formatdict=formatdict,
102 template="pdf/ticket.html",
103 )
104
105
106 class SponsorTicket(BaseTicket):
107 sponsor = models.ForeignKey("sponsors.Sponsor", on_delete=models.PROTECT)
108
109 def __str__(self):
110 return "SponsorTicket: {}".format(self.pk)
111
112 @property
113 def shortname(self):
114 return "sponsor"
115
116
117 class DiscountTicket(BaseTicket):
118 price = models.IntegerField(
119 help_text=_("Price of the discounted ticket (in DKK, including VAT).")
120 )
121
122 def __str__(self):
123 return "DiscountTicket: {}".format(self.pk)
124
125 @property
126 def shortname(self):
127 return "discount"
128
129
130 class ShopTicket(BaseTicket):
131 order = models.ForeignKey(
132 "shop.Order", related_name="shoptickets", on_delete=models.PROTECT
133 )
134 product = models.ForeignKey("shop.Product", on_delete=models.PROTECT)
135
136 name = models.CharField(
137 max_length=100,
138 help_text=(
139 "Name of the person this ticket belongs to. "
140 "This can be different from the buying user."
141 ),
142 null=True,
143 blank=True,
144 )
145
146 email = models.EmailField(null=True, blank=True)
147
148 # overwrite the _get_token method because old tickets use the user_id
149 def _get_token(self):
150 return hashlib.sha256(
151 "{_id}{user_id}{secret_key}".format(
152 _id=self.pk, user_id=self.order.user.pk, secret_key=settings.SECRET_KEY
153 ).encode("utf-8")
154 ).hexdigest()
155
156 def __str__(self):
157 return "Ticket {user} {product}".format(
158 user=self.order.user, product=self.product
159 )
160
161 def get_absolute_url(self):
162 return str(reverse_lazy("tickets:shopticket_edit", kwargs={"pk": self.pk}))
163
164 @property
165 def shortname(self):
166 return "shop"
167
168 def get_orp(self):
169 return OrderProductRelation.objects.get(product=self.product, order=self.order)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/tickets/models.py b/src/tickets/models.py
--- a/src/tickets/models.py
+++ b/src/tickets/models.py
@@ -93,8 +93,7 @@
formatdict = {"ticket": self}
if self.ticket_type.single_ticket_per_product and self.shortname == "shop":
- orp = self.get_orp()
- formatdict["quantity"] = orp.quantity
+ formatdict["quantity"] = self.orp.quantity
return generate_pdf_letter(
filename="{}_ticket_{}.pdf".format(self.shortname, self.pk),
@@ -165,5 +164,6 @@
def shortname(self):
return "shop"
- def get_orp(self):
+ @property
+ def orp(self):
return OrderProductRelation.objects.get(product=self.product, order=self.order)
|
{"golden_diff": "diff --git a/src/tickets/models.py b/src/tickets/models.py\n--- a/src/tickets/models.py\n+++ b/src/tickets/models.py\n@@ -93,8 +93,7 @@\n formatdict = {\"ticket\": self}\n \n if self.ticket_type.single_ticket_per_product and self.shortname == \"shop\":\n- orp = self.get_orp()\n- formatdict[\"quantity\"] = orp.quantity\n+ formatdict[\"quantity\"] = self.orp.quantity\n \n return generate_pdf_letter(\n filename=\"{}_ticket_{}.pdf\".format(self.shortname, self.pk),\n@@ -165,5 +164,6 @@\n def shortname(self):\n return \"shop\"\n \n- def get_orp(self):\n+ @property\n+ def orp(self):\n return OrderProductRelation.objects.get(product=self.product, order=self.order)\n", "issue": "backoffice: show quantity of products ordered after scanning qr code\n\nbackoffice: show quantity of products ordered after scanning qr code\n\n", "before_files": [{"content": "import io\nimport hashlib\nimport base64\nimport qrcode\nfrom django.conf import settings\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom shop.models import OrderProductRelation\nfrom utils.models import UUIDModel, CampRelatedModel\nfrom utils.pdf import generate_pdf_letter\nfrom django.db import models\nimport logging\n\nlogger = logging.getLogger(\"bornhack.%s\" % __name__)\n\n\n# TicketType can be full week, one day. etc.\nclass TicketType(CampRelatedModel, UUIDModel):\n name = models.TextField()\n camp = models.ForeignKey(\"camps.Camp\", on_delete=models.PROTECT)\n includes_badge = models.BooleanField(default=False)\n single_ticket_per_product = models.BooleanField(\n default=False,\n help_text=(\n \"Only create one ticket for a product/order pair no matter the quantity. \"\n \"Useful for products which are bought in larger quantity (ie. village chairs)\"\n ),\n )\n\n def __str__(self):\n return \"{} ({})\".format(self.name, self.camp.title)\n\n\ndef create_ticket_token(string):\n return hashlib.sha256(string).hexdigest()\n\n\ndef qr_code_base64(token):\n qr = qrcode.make(\n token, version=1, error_correction=qrcode.constants.ERROR_CORRECT_H\n ).resize((250, 250))\n file_like = io.BytesIO()\n qr.save(file_like, format=\"png\")\n qrcode_base64 = base64.b64encode(file_like.getvalue())\n return qrcode_base64\n\n\nclass BaseTicket(CampRelatedModel, UUIDModel):\n ticket_type = models.ForeignKey(\"TicketType\", on_delete=models.PROTECT)\n used = models.BooleanField(default=False)\n badge_handed_out = models.BooleanField(default=False)\n token = models.CharField(max_length=64, blank=True)\n badge_token = models.CharField(max_length=64, blank=True)\n\n class Meta:\n abstract = True\n\n @property\n def camp(self):\n return self.ticket_type.camp\n\n def save(self, **kwargs):\n self.token = self._get_token()\n self.badge_token = self._get_badge_token()\n super().save(**kwargs)\n\n def _get_token(self):\n return create_ticket_token(\n \"{_id}{secret_key}\".format(\n _id=self.uuid, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n )\n\n def _get_badge_token(self):\n return create_ticket_token(\n \"{_id}{secret_key}-badge\".format(\n _id=self.uuid, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n )\n\n def get_qr_code_url(self):\n return \"data:image/png;base64,{}\".format(\n qr_code_base64(self._get_token()).decode(\"utf-8\")\n )\n\n def get_qr_badge_code_url(self):\n return \"data:image/png;base64,{}\".format(\n qr_code_base64(self._get_badge_token()).decode(\"utf-8\")\n )\n\n def generate_pdf(self):\n formatdict = {\"ticket\": self}\n\n if self.ticket_type.single_ticket_per_product and self.shortname == \"shop\":\n orp = self.get_orp()\n formatdict[\"quantity\"] = orp.quantity\n\n return generate_pdf_letter(\n filename=\"{}_ticket_{}.pdf\".format(self.shortname, self.pk),\n formatdict=formatdict,\n template=\"pdf/ticket.html\",\n )\n\n\nclass SponsorTicket(BaseTicket):\n sponsor = models.ForeignKey(\"sponsors.Sponsor\", on_delete=models.PROTECT)\n\n def __str__(self):\n return \"SponsorTicket: {}\".format(self.pk)\n\n @property\n def shortname(self):\n return \"sponsor\"\n\n\nclass DiscountTicket(BaseTicket):\n price = models.IntegerField(\n help_text=_(\"Price of the discounted ticket (in DKK, including VAT).\")\n )\n\n def __str__(self):\n return \"DiscountTicket: {}\".format(self.pk)\n\n @property\n def shortname(self):\n return \"discount\"\n\n\nclass ShopTicket(BaseTicket):\n order = models.ForeignKey(\n \"shop.Order\", related_name=\"shoptickets\", on_delete=models.PROTECT\n )\n product = models.ForeignKey(\"shop.Product\", on_delete=models.PROTECT)\n\n name = models.CharField(\n max_length=100,\n help_text=(\n \"Name of the person this ticket belongs to. \"\n \"This can be different from the buying user.\"\n ),\n null=True,\n blank=True,\n )\n\n email = models.EmailField(null=True, blank=True)\n\n # overwrite the _get_token method because old tickets use the user_id\n def _get_token(self):\n return hashlib.sha256(\n \"{_id}{user_id}{secret_key}\".format(\n _id=self.pk, user_id=self.order.user.pk, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n ).hexdigest()\n\n def __str__(self):\n return \"Ticket {user} {product}\".format(\n user=self.order.user, product=self.product\n )\n\n def get_absolute_url(self):\n return str(reverse_lazy(\"tickets:shopticket_edit\", kwargs={\"pk\": self.pk}))\n\n @property\n def shortname(self):\n return \"shop\"\n\n def get_orp(self):\n return OrderProductRelation.objects.get(product=self.product, order=self.order)\n", "path": "src/tickets/models.py"}], "after_files": [{"content": "import io\nimport hashlib\nimport base64\nimport qrcode\nfrom django.conf import settings\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom shop.models import OrderProductRelation\nfrom utils.models import UUIDModel, CampRelatedModel\nfrom utils.pdf import generate_pdf_letter\nfrom django.db import models\nimport logging\n\nlogger = logging.getLogger(\"bornhack.%s\" % __name__)\n\n\n# TicketType can be full week, one day. etc.\nclass TicketType(CampRelatedModel, UUIDModel):\n name = models.TextField()\n camp = models.ForeignKey(\"camps.Camp\", on_delete=models.PROTECT)\n includes_badge = models.BooleanField(default=False)\n single_ticket_per_product = models.BooleanField(\n default=False,\n help_text=(\n \"Only create one ticket for a product/order pair no matter the quantity. \"\n \"Useful for products which are bought in larger quantity (ie. village chairs)\"\n ),\n )\n\n def __str__(self):\n return \"{} ({})\".format(self.name, self.camp.title)\n\n\ndef create_ticket_token(string):\n return hashlib.sha256(string).hexdigest()\n\n\ndef qr_code_base64(token):\n qr = qrcode.make(\n token, version=1, error_correction=qrcode.constants.ERROR_CORRECT_H\n ).resize((250, 250))\n file_like = io.BytesIO()\n qr.save(file_like, format=\"png\")\n qrcode_base64 = base64.b64encode(file_like.getvalue())\n return qrcode_base64\n\n\nclass BaseTicket(CampRelatedModel, UUIDModel):\n ticket_type = models.ForeignKey(\"TicketType\", on_delete=models.PROTECT)\n used = models.BooleanField(default=False)\n badge_handed_out = models.BooleanField(default=False)\n token = models.CharField(max_length=64, blank=True)\n badge_token = models.CharField(max_length=64, blank=True)\n\n class Meta:\n abstract = True\n\n @property\n def camp(self):\n return self.ticket_type.camp\n\n def save(self, **kwargs):\n self.token = self._get_token()\n self.badge_token = self._get_badge_token()\n super().save(**kwargs)\n\n def _get_token(self):\n return create_ticket_token(\n \"{_id}{secret_key}\".format(\n _id=self.uuid, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n )\n\n def _get_badge_token(self):\n return create_ticket_token(\n \"{_id}{secret_key}-badge\".format(\n _id=self.uuid, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n )\n\n def get_qr_code_url(self):\n return \"data:image/png;base64,{}\".format(\n qr_code_base64(self._get_token()).decode(\"utf-8\")\n )\n\n def get_qr_badge_code_url(self):\n return \"data:image/png;base64,{}\".format(\n qr_code_base64(self._get_badge_token()).decode(\"utf-8\")\n )\n\n def generate_pdf(self):\n formatdict = {\"ticket\": self}\n\n if self.ticket_type.single_ticket_per_product and self.shortname == \"shop\":\n formatdict[\"quantity\"] = self.orp.quantity\n\n return generate_pdf_letter(\n filename=\"{}_ticket_{}.pdf\".format(self.shortname, self.pk),\n formatdict=formatdict,\n template=\"pdf/ticket.html\",\n )\n\n\nclass SponsorTicket(BaseTicket):\n sponsor = models.ForeignKey(\"sponsors.Sponsor\", on_delete=models.PROTECT)\n\n def __str__(self):\n return \"SponsorTicket: {}\".format(self.pk)\n\n @property\n def shortname(self):\n return \"sponsor\"\n\n\nclass DiscountTicket(BaseTicket):\n price = models.IntegerField(\n help_text=_(\"Price of the discounted ticket (in DKK, including VAT).\")\n )\n\n def __str__(self):\n return \"DiscountTicket: {}\".format(self.pk)\n\n @property\n def shortname(self):\n return \"discount\"\n\n\nclass ShopTicket(BaseTicket):\n order = models.ForeignKey(\n \"shop.Order\", related_name=\"shoptickets\", on_delete=models.PROTECT\n )\n product = models.ForeignKey(\"shop.Product\", on_delete=models.PROTECT)\n\n name = models.CharField(\n max_length=100,\n help_text=(\n \"Name of the person this ticket belongs to. \"\n \"This can be different from the buying user.\"\n ),\n null=True,\n blank=True,\n )\n\n email = models.EmailField(null=True, blank=True)\n\n # overwrite the _get_token method because old tickets use the user_id\n def _get_token(self):\n return hashlib.sha256(\n \"{_id}{user_id}{secret_key}\".format(\n _id=self.pk, user_id=self.order.user.pk, secret_key=settings.SECRET_KEY\n ).encode(\"utf-8\")\n ).hexdigest()\n\n def __str__(self):\n return \"Ticket {user} {product}\".format(\n user=self.order.user, product=self.product\n )\n\n def get_absolute_url(self):\n return str(reverse_lazy(\"tickets:shopticket_edit\", kwargs={\"pk\": self.pk}))\n\n @property\n def shortname(self):\n return \"shop\"\n\n @property\n def orp(self):\n return OrderProductRelation.objects.get(product=self.product, order=self.order)\n", "path": "src/tickets/models.py"}]}
| 1,873 | 190 |
gh_patches_debug_643
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1925
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.107
On the docket:
+ [x] `git` username replaced with `****` redaction in lockfile for `git+ssh` direct references #1918
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.106"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.106"
+__version__ = "2.1.107"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.106\"\n+__version__ = \"2.1.107\"\n", "issue": "Release 2.1.107\nOn the docket:\r\n+ [x] `git` username replaced with `****` redaction in lockfile for `git+ssh` direct references #1918\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.106\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.107\"\n", "path": "pex/version.py"}]}
| 355 | 98 |
gh_patches_debug_1877
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-2921
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
local cache inconsistent after enabling short_paths in a recipe
To help us debug your issue please explain:
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
Conan Version 1.3.3
Windows 10
With a package in local cache whose recipe does NOT have `short_paths=True`, modify in normal development folder the recipe and set `short_paths=True` and run conan create.
Folders in local cache become inconsistent showing both folders from previous conan create run and .conan_link files pointing to the short paths folders.
This seems no not affect conan tool behavior when running commands and works well if `short_paths` is removed once again.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/util/windows.py`
Content:
```
1 import os
2 import subprocess
3
4 from conans.util.files import load, mkdir, save, rmdir
5 import tempfile
6
7
8 CONAN_LINK = ".conan_link"
9
10
11 def conan_expand_user(path):
12 """ wrapper to the original expanduser function, to workaround python returning
13 verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar
14 """
15 # In win these variables should exist and point to user directory, which
16 # must exist. Using context to avoid permanent modification of os.environ
17 old_env = dict(os.environ)
18 try:
19 home = os.environ.get("HOME")
20 # Problematic cases of wrong HOME variable
21 # - HOME = %USERPROFILE% verbatim, as messed by some other tools
22 # - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx
23 # In these cases, it is safe to remove it and rely on USERPROFILE directly
24 if home and (not os.path.exists(home) or
25 (os.getenv("MSYSTEM") and os.getenv("USERPROFILE"))):
26 del os.environ["HOME"]
27 result = os.path.expanduser(path)
28 finally:
29 os.environ.clear()
30 os.environ.update(old_env)
31 return result
32
33
34 def path_shortener(path, short_paths):
35 """ short_paths is 4-state:
36 False: Never shorten the path
37 True: Always shorten the path, create link if not existing
38 None: Use shorten path only if already exists, not create
39 """
40 if short_paths is False or os.getenv("CONAN_USER_HOME_SHORT") == "None":
41 return path
42 link = os.path.join(path, CONAN_LINK)
43 if os.path.exists(link):
44 return load(link)
45 elif short_paths is None:
46 return path
47
48 short_home = os.getenv("CONAN_USER_HOME_SHORT")
49 if not short_home:
50 drive = os.path.splitdrive(path)[0]
51 short_home = drive + "/.conan"
52 mkdir(short_home)
53
54 # Workaround for short_home living in NTFS file systems. Give full control permission to current user to avoid
55 # access problems in cygwin/msys2 windows subsystems when using short_home folder
56 try:
57 username = os.getenv("USERDOMAIN")
58 domainname = "%s\%s" % (username, os.environ["USERNAME"]) if username else os.environ["USERNAME"]
59 cmd = r'cacls %s /E /G "%s":F' % (short_home, domainname)
60 subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, make command quiet
61 except subprocess.CalledProcessError:
62 # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.
63 pass
64
65 redirect = tempfile.mkdtemp(dir=short_home, prefix="")
66 # This "1" is the way to have a non-existing directory, so commands like
67 # shutil.copytree() to it, works. It can be removed without compromising the
68 # temp folder generator and conan-links consistency
69 redirect = os.path.join(redirect, "1")
70 save(link, redirect)
71 return redirect
72
73
74 def ignore_long_path_files(src_folder, build_folder, output):
75 def _filter(src, files):
76 filtered_files = []
77 for the_file in files:
78 source_path = os.path.join(src, the_file)
79 # Without storage path, just relative
80 rel_path = os.path.relpath(source_path, src_folder)
81 dest_path = os.path.normpath(os.path.join(build_folder, rel_path))
82 # it is NOT that "/" is counted as "\\" so it counts double
83 # seems a bug in python, overflows paths near the limit of 260,
84 if len(dest_path) >= 249:
85 filtered_files.append(the_file)
86 output.warn("Filename too long, file excluded: %s" % dest_path)
87 return filtered_files
88 return _filter
89
90
91 def rm_conandir(path):
92 """removal of a directory that might contain a link to a short path"""
93 link = os.path.join(path, CONAN_LINK)
94 if os.path.exists(link):
95 short_path = load(link)
96 rmdir(os.path.dirname(short_path))
97 rmdir(path)
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/util/windows.py b/conans/util/windows.py
--- a/conans/util/windows.py
+++ b/conans/util/windows.py
@@ -45,6 +45,9 @@
elif short_paths is None:
return path
+ if os.path.exists(path):
+ rmdir(path)
+
short_home = os.getenv("CONAN_USER_HOME_SHORT")
if not short_home:
drive = os.path.splitdrive(path)[0]
|
{"golden_diff": "diff --git a/conans/util/windows.py b/conans/util/windows.py\n--- a/conans/util/windows.py\n+++ b/conans/util/windows.py\n@@ -45,6 +45,9 @@\n elif short_paths is None:\n return path\n \n+ if os.path.exists(path):\n+ rmdir(path)\n+\n short_home = os.getenv(\"CONAN_USER_HOME_SHORT\")\n if not short_home:\n drive = os.path.splitdrive(path)[0]\n", "issue": "local cache inconsistent after enabling short_paths in a recipe\nTo help us debug your issue please explain:\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\nConan Version 1.3.3\r\nWindows 10\r\n\r\nWith a package in local cache whose recipe does NOT have `short_paths=True`, modify in normal development folder the recipe and set `short_paths=True` and run conan create.\r\n\r\nFolders in local cache become inconsistent showing both folders from previous conan create run and .conan_link files pointing to the short paths folders.\r\n\r\nThis seems no not affect conan tool behavior when running commands and works well if `short_paths` is removed once again.\r\n\r\n\n", "before_files": [{"content": "import os\nimport subprocess\n\nfrom conans.util.files import load, mkdir, save, rmdir\nimport tempfile\n\n\nCONAN_LINK = \".conan_link\"\n\n\ndef conan_expand_user(path):\n \"\"\" wrapper to the original expanduser function, to workaround python returning\n verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar\n \"\"\"\n # In win these variables should exist and point to user directory, which\n # must exist. Using context to avoid permanent modification of os.environ\n old_env = dict(os.environ)\n try:\n home = os.environ.get(\"HOME\")\n # Problematic cases of wrong HOME variable\n # - HOME = %USERPROFILE% verbatim, as messed by some other tools\n # - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx\n # In these cases, it is safe to remove it and rely on USERPROFILE directly\n if home and (not os.path.exists(home) or\n (os.getenv(\"MSYSTEM\") and os.getenv(\"USERPROFILE\"))):\n del os.environ[\"HOME\"]\n result = os.path.expanduser(path)\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n return result\n\n\ndef path_shortener(path, short_paths):\n \"\"\" short_paths is 4-state:\n False: Never shorten the path\n True: Always shorten the path, create link if not existing\n None: Use shorten path only if already exists, not create\n \"\"\"\n if short_paths is False or os.getenv(\"CONAN_USER_HOME_SHORT\") == \"None\":\n return path\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n return load(link)\n elif short_paths is None:\n return path\n\n short_home = os.getenv(\"CONAN_USER_HOME_SHORT\")\n if not short_home:\n drive = os.path.splitdrive(path)[0]\n short_home = drive + \"/.conan\"\n mkdir(short_home)\n\n # Workaround for short_home living in NTFS file systems. Give full control permission to current user to avoid\n # access problems in cygwin/msys2 windows subsystems when using short_home folder\n try:\n username = os.getenv(\"USERDOMAIN\")\n domainname = \"%s\\%s\" % (username, os.environ[\"USERNAME\"]) if username else os.environ[\"USERNAME\"]\n cmd = r'cacls %s /E /G \"%s\":F' % (short_home, domainname)\n subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, make command quiet\n except subprocess.CalledProcessError:\n # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.\n pass\n\n redirect = tempfile.mkdtemp(dir=short_home, prefix=\"\")\n # This \"1\" is the way to have a non-existing directory, so commands like\n # shutil.copytree() to it, works. It can be removed without compromising the\n # temp folder generator and conan-links consistency\n redirect = os.path.join(redirect, \"1\")\n save(link, redirect)\n return redirect\n\n\ndef ignore_long_path_files(src_folder, build_folder, output):\n def _filter(src, files):\n filtered_files = []\n for the_file in files:\n source_path = os.path.join(src, the_file)\n # Without storage path, just relative\n rel_path = os.path.relpath(source_path, src_folder)\n dest_path = os.path.normpath(os.path.join(build_folder, rel_path))\n # it is NOT that \"/\" is counted as \"\\\\\" so it counts double\n # seems a bug in python, overflows paths near the limit of 260,\n if len(dest_path) >= 249:\n filtered_files.append(the_file)\n output.warn(\"Filename too long, file excluded: %s\" % dest_path)\n return filtered_files\n return _filter\n\n\ndef rm_conandir(path):\n \"\"\"removal of a directory that might contain a link to a short path\"\"\"\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n short_path = load(link)\n rmdir(os.path.dirname(short_path))\n rmdir(path)\n", "path": "conans/util/windows.py"}], "after_files": [{"content": "import os\nimport subprocess\n\nfrom conans.util.files import load, mkdir, save, rmdir\nimport tempfile\n\n\nCONAN_LINK = \".conan_link\"\n\n\ndef conan_expand_user(path):\n \"\"\" wrapper to the original expanduser function, to workaround python returning\n verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar\n \"\"\"\n # In win these variables should exist and point to user directory, which\n # must exist. Using context to avoid permanent modification of os.environ\n old_env = dict(os.environ)\n try:\n home = os.environ.get(\"HOME\")\n # Problematic cases of wrong HOME variable\n # - HOME = %USERPROFILE% verbatim, as messed by some other tools\n # - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx\n # In these cases, it is safe to remove it and rely on USERPROFILE directly\n if home and (not os.path.exists(home) or\n (os.getenv(\"MSYSTEM\") and os.getenv(\"USERPROFILE\"))):\n del os.environ[\"HOME\"]\n result = os.path.expanduser(path)\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n return result\n\n\ndef path_shortener(path, short_paths):\n \"\"\" short_paths is 4-state:\n False: Never shorten the path\n True: Always shorten the path, create link if not existing\n None: Use shorten path only if already exists, not create\n \"\"\"\n if short_paths is False or os.getenv(\"CONAN_USER_HOME_SHORT\") == \"None\":\n return path\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n return load(link)\n elif short_paths is None:\n return path\n\n if os.path.exists(path):\n rmdir(path)\n\n short_home = os.getenv(\"CONAN_USER_HOME_SHORT\")\n if not short_home:\n drive = os.path.splitdrive(path)[0]\n short_home = drive + \"/.conan\"\n mkdir(short_home)\n\n # Workaround for short_home living in NTFS file systems. Give full control permission to current user to avoid\n # access problems in cygwin/msys2 windows subsystems when using short_home folder\n try:\n username = os.getenv(\"USERDOMAIN\")\n domainname = \"%s\\%s\" % (username, os.environ[\"USERNAME\"]) if username else os.environ[\"USERNAME\"]\n cmd = r'cacls %s /E /G \"%s\":F' % (short_home, domainname)\n subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, make command quiet\n except subprocess.CalledProcessError:\n # cmd can fail if trying to set ACL in non NTFS drives, ignoring it.\n pass\n\n redirect = tempfile.mkdtemp(dir=short_home, prefix=\"\")\n # This \"1\" is the way to have a non-existing directory, so commands like\n # shutil.copytree() to it, works. It can be removed without compromising the\n # temp folder generator and conan-links consistency\n redirect = os.path.join(redirect, \"1\")\n save(link, redirect)\n return redirect\n\n\ndef ignore_long_path_files(src_folder, build_folder, output):\n def _filter(src, files):\n filtered_files = []\n for the_file in files:\n source_path = os.path.join(src, the_file)\n # Without storage path, just relative\n rel_path = os.path.relpath(source_path, src_folder)\n dest_path = os.path.normpath(os.path.join(build_folder, rel_path))\n # it is NOT that \"/\" is counted as \"\\\\\" so it counts double\n # seems a bug in python, overflows paths near the limit of 260,\n if len(dest_path) >= 249:\n filtered_files.append(the_file)\n output.warn(\"Filename too long, file excluded: %s\" % dest_path)\n return filtered_files\n return _filter\n\n\ndef rm_conandir(path):\n \"\"\"removal of a directory that might contain a link to a short path\"\"\"\n link = os.path.join(path, CONAN_LINK)\n if os.path.exists(link):\n short_path = load(link)\n rmdir(os.path.dirname(short_path))\n rmdir(path)\n", "path": "conans/util/windows.py"}]}
| 1,576 | 100 |
gh_patches_debug_28256
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-8355
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feature: Install multiple plugins of any type
### Feature scope
CLI (options, error messages, logging, etc.)
### Description
Currently, you can only leverage `meltano install` in parallel for all plugin types or all plugins of a specific type:
```sh
# all plugins
meltano install
# all extractors
meltano install [extractor|extractors]
# all loaders
meltano install [loader|loaders]
```
It would be great if you could install multiple plugins of any type - something like:
```sh
meltano install <extractor> <loader> <transformer> <utility>
```
This change would remove the need to specify a plugin type at all, since a plugin name is already unique to a Meltano project. This is currently not possible without a breaking change however, since a plugin type is required as the first argument to `meltano install` when specifying plugin names. #8228 introduced the `--from-file` option for `meltano config <plugin> set`, which accepts a special character `-` to refer to stdin - `meltano install` could reuse this or a similar concept to skip the plugin type argument and leverage parallel install:
```sh
meltano install - <extractor> <loader> <transformer> <utility>
```
Once a convention is established, this feature could be extended to `meltano remove` as well.
I have a POC of this working for `meltano install` locally, so happy to open a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/cli/install.py`
Content:
```
1 """CLI command `meltano install`."""
2
3 from __future__ import annotations
4
5 import typing as t
6
7 import click
8 import structlog
9
10 from meltano.cli.params import pass_project
11 from meltano.cli.utils import CliError, PartialInstrumentedCmd, install_plugins
12 from meltano.core.block.parser import BlockParser
13 from meltano.core.plugin import PluginType
14 from meltano.core.schedule_service import ScheduleService
15 from meltano.core.tracking.contexts import CliEvent, PluginsTrackingContext
16
17 if t.TYPE_CHECKING:
18 from meltano.core.project import Project
19 from meltano.core.tracking import Tracker
20
21 logger = structlog.getLogger(__name__)
22
23
24 @click.command(cls=PartialInstrumentedCmd, short_help="Install project dependencies.")
25 @click.argument(
26 "plugin_type",
27 type=click.Choice(PluginType.cli_arguments()),
28 required=False,
29 )
30 @click.argument("plugin_name", nargs=-1, required=False)
31 @click.option(
32 "--clean",
33 is_flag=True,
34 help="Completely reinstall a plugin rather than simply upgrading if necessary.",
35 )
36 @click.option(
37 "--parallelism",
38 "-p",
39 type=click.INT,
40 default=None,
41 help=(
42 "Limit the number of plugins to install in parallel. "
43 "Defaults to the number of cores."
44 ),
45 )
46 @click.option(
47 "--force",
48 "-f",
49 is_flag=True,
50 help="Ignore the required Python version declared by the plugins.",
51 )
52 @click.option(
53 "--schedule",
54 "-s",
55 "schedule_name",
56 help="Install all plugins from the given schedule.",
57 )
58 @click.pass_context
59 @pass_project(migrate=True)
60 def install( # noqa: C901
61 project: Project,
62 ctx: click.Context,
63 plugin_type: str,
64 plugin_name: str,
65 clean: bool,
66 parallelism: int,
67 force: bool,
68 schedule_name: str,
69 ):
70 """
71 Install all the dependencies of your project based on the meltano.yml file.
72
73 \b\nRead more at https://docs.meltano.com/reference/command-line-interface#install
74 """
75 tracker: Tracker = ctx.obj["tracker"]
76 try:
77 if plugin_type:
78 plugin_type = PluginType.from_cli_argument(plugin_type)
79 plugins = project.plugins.get_plugins_of_type(plugin_type)
80 if plugin_name:
81 plugins = [plugin for plugin in plugins if plugin.name in plugin_name]
82 else:
83 plugins = list(project.plugins.plugins())
84
85 if schedule_name:
86 schedule_plugins = _get_schedule_plugins(
87 ctx.obj["project"],
88 schedule_name,
89 )
90 plugins = list(set(plugins) & set(schedule_plugins))
91 except Exception:
92 tracker.track_command_event(CliEvent.aborted)
93 raise
94
95 click.echo(f"Installing {len(plugins)} plugins...")
96 tracker.add_contexts(
97 PluginsTrackingContext([(candidate, None) for candidate in plugins]),
98 )
99 tracker.track_command_event(CliEvent.inflight)
100
101 success = install_plugins(
102 project,
103 plugins,
104 parallelism=parallelism,
105 clean=clean,
106 force=force,
107 )
108 if not success:
109 tracker.track_command_event(CliEvent.failed)
110 raise CliError("Failed to install plugin(s)") # noqa: EM101
111 tracker.track_command_event(CliEvent.completed)
112
113
114 def _get_schedule_plugins(project: Project, schedule_name: str):
115 schedule_service = ScheduleService(project)
116 schedule_obj = schedule_service.find_schedule(schedule_name)
117 schedule_plugins = set()
118 if schedule_obj.elt_schedule:
119 for plugin_name in (schedule_obj.extractor, schedule_obj.loader):
120 schedule_plugins.add(project.plugins.find_plugin(plugin_name))
121 else:
122 task_sets = schedule_service.task_sets_service.get(schedule_obj.job)
123 for blocks in task_sets.flat_args_per_set:
124 parser = BlockParser(logger, project, blocks)
125 for plugin in parser.plugins:
126 schedule_plugins.add(
127 project.plugins.find_plugin(plugin.info.get("name"))
128 if plugin.type == PluginType.MAPPERS
129 else plugin,
130 )
131 return schedule_plugins
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/meltano/cli/install.py b/src/meltano/cli/install.py
--- a/src/meltano/cli/install.py
+++ b/src/meltano/cli/install.py
@@ -18,13 +18,15 @@
from meltano.core.project import Project
from meltano.core.tracking import Tracker
+ANY = "-"
+
logger = structlog.getLogger(__name__)
@click.command(cls=PartialInstrumentedCmd, short_help="Install project dependencies.")
@click.argument(
"plugin_type",
- type=click.Choice(PluginType.cli_arguments()),
+ type=click.Choice((*PluginType.cli_arguments(), ANY)),
required=False,
)
@click.argument("plugin_name", nargs=-1, required=False)
@@ -74,14 +76,15 @@
"""
tracker: Tracker = ctx.obj["tracker"]
try:
- if plugin_type:
+ if plugin_type and plugin_type != ANY:
plugin_type = PluginType.from_cli_argument(plugin_type)
plugins = project.plugins.get_plugins_of_type(plugin_type)
- if plugin_name:
- plugins = [plugin for plugin in plugins if plugin.name in plugin_name]
else:
plugins = list(project.plugins.plugins())
+ if plugin_name:
+ plugins = [plugin for plugin in plugins if plugin.name in plugin_name]
+
if schedule_name:
schedule_plugins = _get_schedule_plugins(
ctx.obj["project"],
|
{"golden_diff": "diff --git a/src/meltano/cli/install.py b/src/meltano/cli/install.py\n--- a/src/meltano/cli/install.py\n+++ b/src/meltano/cli/install.py\n@@ -18,13 +18,15 @@\n from meltano.core.project import Project\n from meltano.core.tracking import Tracker\n \n+ANY = \"-\"\n+\n logger = structlog.getLogger(__name__)\n \n \n @click.command(cls=PartialInstrumentedCmd, short_help=\"Install project dependencies.\")\n @click.argument(\n \"plugin_type\",\n- type=click.Choice(PluginType.cli_arguments()),\n+ type=click.Choice((*PluginType.cli_arguments(), ANY)),\n required=False,\n )\n @click.argument(\"plugin_name\", nargs=-1, required=False)\n@@ -74,14 +76,15 @@\n \"\"\"\n tracker: Tracker = ctx.obj[\"tracker\"]\n try:\n- if plugin_type:\n+ if plugin_type and plugin_type != ANY:\n plugin_type = PluginType.from_cli_argument(plugin_type)\n plugins = project.plugins.get_plugins_of_type(plugin_type)\n- if plugin_name:\n- plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n else:\n plugins = list(project.plugins.plugins())\n \n+ if plugin_name:\n+ plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n+\n if schedule_name:\n schedule_plugins = _get_schedule_plugins(\n ctx.obj[\"project\"],\n", "issue": "feature: Install multiple plugins of any type\n### Feature scope\r\n\r\nCLI (options, error messages, logging, etc.)\r\n\r\n### Description\r\n\r\nCurrently, you can only leverage `meltano install` in parallel for all plugin types or all plugins of a specific type:\r\n\r\n```sh\r\n# all plugins\r\nmeltano install\r\n\r\n# all extractors\r\nmeltano install [extractor|extractors]\r\n\r\n# all loaders\r\nmeltano install [loader|loaders]\r\n```\r\n\r\nIt would be great if you could install multiple plugins of any type - something like:\r\n\r\n```sh\r\nmeltano install <extractor> <loader> <transformer> <utility>\r\n```\r\n\r\nThis change would remove the need to specify a plugin type at all, since a plugin name is already unique to a Meltano project. This is currently not possible without a breaking change however, since a plugin type is required as the first argument to `meltano install` when specifying plugin names. #8228 introduced the `--from-file` option for `meltano config <plugin> set`, which accepts a special character `-` to refer to stdin - `meltano install` could reuse this or a similar concept to skip the plugin type argument and leverage parallel install:\r\n\r\n```sh\r\nmeltano install - <extractor> <loader> <transformer> <utility>\r\n```\r\n\r\nOnce a convention is established, this feature could be extended to `meltano remove` as well.\r\n\r\nI have a POC of this working for `meltano install` locally, so happy to open a PR.\n", "before_files": [{"content": "\"\"\"CLI command `meltano install`.\"\"\"\n\nfrom __future__ import annotations\n\nimport typing as t\n\nimport click\nimport structlog\n\nfrom meltano.cli.params import pass_project\nfrom meltano.cli.utils import CliError, PartialInstrumentedCmd, install_plugins\nfrom meltano.core.block.parser import BlockParser\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.schedule_service import ScheduleService\nfrom meltano.core.tracking.contexts import CliEvent, PluginsTrackingContext\n\nif t.TYPE_CHECKING:\n from meltano.core.project import Project\n from meltano.core.tracking import Tracker\n\nlogger = structlog.getLogger(__name__)\n\n\[email protected](cls=PartialInstrumentedCmd, short_help=\"Install project dependencies.\")\[email protected](\n \"plugin_type\",\n type=click.Choice(PluginType.cli_arguments()),\n required=False,\n)\[email protected](\"plugin_name\", nargs=-1, required=False)\[email protected](\n \"--clean\",\n is_flag=True,\n help=\"Completely reinstall a plugin rather than simply upgrading if necessary.\",\n)\[email protected](\n \"--parallelism\",\n \"-p\",\n type=click.INT,\n default=None,\n help=(\n \"Limit the number of plugins to install in parallel. \"\n \"Defaults to the number of cores.\"\n ),\n)\[email protected](\n \"--force\",\n \"-f\",\n is_flag=True,\n help=\"Ignore the required Python version declared by the plugins.\",\n)\[email protected](\n \"--schedule\",\n \"-s\",\n \"schedule_name\",\n help=\"Install all plugins from the given schedule.\",\n)\[email protected]_context\n@pass_project(migrate=True)\ndef install( # noqa: C901\n project: Project,\n ctx: click.Context,\n plugin_type: str,\n plugin_name: str,\n clean: bool,\n parallelism: int,\n force: bool,\n schedule_name: str,\n):\n \"\"\"\n Install all the dependencies of your project based on the meltano.yml file.\n\n \\b\\nRead more at https://docs.meltano.com/reference/command-line-interface#install\n \"\"\"\n tracker: Tracker = ctx.obj[\"tracker\"]\n try:\n if plugin_type:\n plugin_type = PluginType.from_cli_argument(plugin_type)\n plugins = project.plugins.get_plugins_of_type(plugin_type)\n if plugin_name:\n plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n else:\n plugins = list(project.plugins.plugins())\n\n if schedule_name:\n schedule_plugins = _get_schedule_plugins(\n ctx.obj[\"project\"],\n schedule_name,\n )\n plugins = list(set(plugins) & set(schedule_plugins))\n except Exception:\n tracker.track_command_event(CliEvent.aborted)\n raise\n\n click.echo(f\"Installing {len(plugins)} plugins...\")\n tracker.add_contexts(\n PluginsTrackingContext([(candidate, None) for candidate in plugins]),\n )\n tracker.track_command_event(CliEvent.inflight)\n\n success = install_plugins(\n project,\n plugins,\n parallelism=parallelism,\n clean=clean,\n force=force,\n )\n if not success:\n tracker.track_command_event(CliEvent.failed)\n raise CliError(\"Failed to install plugin(s)\") # noqa: EM101\n tracker.track_command_event(CliEvent.completed)\n\n\ndef _get_schedule_plugins(project: Project, schedule_name: str):\n schedule_service = ScheduleService(project)\n schedule_obj = schedule_service.find_schedule(schedule_name)\n schedule_plugins = set()\n if schedule_obj.elt_schedule:\n for plugin_name in (schedule_obj.extractor, schedule_obj.loader):\n schedule_plugins.add(project.plugins.find_plugin(plugin_name))\n else:\n task_sets = schedule_service.task_sets_service.get(schedule_obj.job)\n for blocks in task_sets.flat_args_per_set:\n parser = BlockParser(logger, project, blocks)\n for plugin in parser.plugins:\n schedule_plugins.add(\n project.plugins.find_plugin(plugin.info.get(\"name\"))\n if plugin.type == PluginType.MAPPERS\n else plugin,\n )\n return schedule_plugins\n", "path": "src/meltano/cli/install.py"}], "after_files": [{"content": "\"\"\"CLI command `meltano install`.\"\"\"\n\nfrom __future__ import annotations\n\nimport typing as t\n\nimport click\nimport structlog\n\nfrom meltano.cli.params import pass_project\nfrom meltano.cli.utils import CliError, PartialInstrumentedCmd, install_plugins\nfrom meltano.core.block.parser import BlockParser\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.schedule_service import ScheduleService\nfrom meltano.core.tracking.contexts import CliEvent, PluginsTrackingContext\n\nif t.TYPE_CHECKING:\n from meltano.core.project import Project\n from meltano.core.tracking import Tracker\n\nANY = \"-\"\n\nlogger = structlog.getLogger(__name__)\n\n\[email protected](cls=PartialInstrumentedCmd, short_help=\"Install project dependencies.\")\[email protected](\n \"plugin_type\",\n type=click.Choice((*PluginType.cli_arguments(), ANY)),\n required=False,\n)\[email protected](\"plugin_name\", nargs=-1, required=False)\[email protected](\n \"--clean\",\n is_flag=True,\n help=\"Completely reinstall a plugin rather than simply upgrading if necessary.\",\n)\[email protected](\n \"--parallelism\",\n \"-p\",\n type=click.INT,\n default=None,\n help=(\n \"Limit the number of plugins to install in parallel. \"\n \"Defaults to the number of cores.\"\n ),\n)\[email protected](\n \"--force\",\n \"-f\",\n is_flag=True,\n help=\"Ignore the required Python version declared by the plugins.\",\n)\[email protected](\n \"--schedule\",\n \"-s\",\n \"schedule_name\",\n help=\"Install all plugins from the given schedule.\",\n)\[email protected]_context\n@pass_project(migrate=True)\ndef install( # noqa: C901\n project: Project,\n ctx: click.Context,\n plugin_type: str,\n plugin_name: str,\n clean: bool,\n parallelism: int,\n force: bool,\n schedule_name: str,\n):\n \"\"\"\n Install all the dependencies of your project based on the meltano.yml file.\n\n \\b\\nRead more at https://docs.meltano.com/reference/command-line-interface#install\n \"\"\"\n tracker: Tracker = ctx.obj[\"tracker\"]\n try:\n if plugin_type and plugin_type != ANY:\n plugin_type = PluginType.from_cli_argument(plugin_type)\n plugins = project.plugins.get_plugins_of_type(plugin_type)\n else:\n plugins = list(project.plugins.plugins())\n\n if plugin_name:\n plugins = [plugin for plugin in plugins if plugin.name in plugin_name]\n\n if schedule_name:\n schedule_plugins = _get_schedule_plugins(\n ctx.obj[\"project\"],\n schedule_name,\n )\n plugins = list(set(plugins) & set(schedule_plugins))\n except Exception:\n tracker.track_command_event(CliEvent.aborted)\n raise\n\n click.echo(f\"Installing {len(plugins)} plugins...\")\n tracker.add_contexts(\n PluginsTrackingContext([(candidate, None) for candidate in plugins]),\n )\n tracker.track_command_event(CliEvent.inflight)\n\n success = install_plugins(\n project,\n plugins,\n parallelism=parallelism,\n clean=clean,\n force=force,\n )\n if not success:\n tracker.track_command_event(CliEvent.failed)\n raise CliError(\"Failed to install plugin(s)\") # noqa: EM101\n tracker.track_command_event(CliEvent.completed)\n\n\ndef _get_schedule_plugins(project: Project, schedule_name: str):\n schedule_service = ScheduleService(project)\n schedule_obj = schedule_service.find_schedule(schedule_name)\n schedule_plugins = set()\n if schedule_obj.elt_schedule:\n for plugin_name in (schedule_obj.extractor, schedule_obj.loader):\n schedule_plugins.add(project.plugins.find_plugin(plugin_name))\n else:\n task_sets = schedule_service.task_sets_service.get(schedule_obj.job)\n for blocks in task_sets.flat_args_per_set:\n parser = BlockParser(logger, project, blocks)\n for plugin in parser.plugins:\n schedule_plugins.add(\n project.plugins.find_plugin(plugin.info.get(\"name\"))\n if plugin.type == PluginType.MAPPERS\n else plugin,\n )\n return schedule_plugins\n", "path": "src/meltano/cli/install.py"}]}
| 1,742 | 309 |
gh_patches_debug_3785
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-744
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Why a print statement in this code?
@kaiyou, according git blame this is yours. I wonder if it was included for debugging purposes and not taken out anymore?
https://github.com/Mailu/Mailu/blob/5c2439011413a114c470862f95206f8487fa4568/core/admin/mailu/internal/views/dovecot.py#L18
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/internal/views/dovecot.py`
Content:
```
1 from mailu import models
2 from mailu.internal import internal
3 from flask import current_app as app
4
5 import flask
6 import socket
7 import os
8
9 @internal.route("/dovecot/passdb/<user_email>")
10 def dovecot_passdb_dict(user_email):
11 user = models.User.query.get(user_email) or flask.abort(404)
12 allow_nets = []
13 allow_nets.append(app.config["SUBNET"])
14 if app.config["POD_ADDRESS_RANGE"]:
15 allow_nets.append(app.config["POD_ADDRESS_RANGE"])
16 print(allow_nets)
17 return flask.jsonify({
18 "password": None,
19 "nopassword": "Y",
20 "allow_nets": ",".join(allow_nets)
21 })
22
23
24 @internal.route("/dovecot/userdb/<user_email>")
25 def dovecot_userdb_dict(user_email):
26 user = models.User.query.get(user_email) or flask.abort(404)
27 return flask.jsonify({
28 "quota_rule": "*:bytes={}".format(user.quota_bytes)
29 })
30
31
32 @internal.route("/dovecot/quota/<ns>/<user_email>", methods=["POST"])
33 def dovecot_quota(ns, user_email):
34 user = models.User.query.get(user_email) or flask.abort(404)
35 if ns == "storage":
36 user.quota_bytes_used = flask.request.get_json()
37 models.db.session.commit()
38 return flask.jsonify(None)
39
40
41 @internal.route("/dovecot/sieve/name/<script>/<user_email>")
42 def dovecot_sieve_name(script, user_email):
43 return flask.jsonify(script)
44
45
46 @internal.route("/dovecot/sieve/data/default/<user_email>")
47 def dovecot_sieve_data(user_email):
48 user = models.User.query.get(user_email) or flask.abort(404)
49 return flask.jsonify(flask.render_template("default.sieve", user=user))
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py
--- a/core/admin/mailu/internal/views/dovecot.py
+++ b/core/admin/mailu/internal/views/dovecot.py
@@ -13,7 +13,6 @@
allow_nets.append(app.config["SUBNET"])
if app.config["POD_ADDRESS_RANGE"]:
allow_nets.append(app.config["POD_ADDRESS_RANGE"])
- print(allow_nets)
return flask.jsonify({
"password": None,
"nopassword": "Y",
|
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py\n--- a/core/admin/mailu/internal/views/dovecot.py\n+++ b/core/admin/mailu/internal/views/dovecot.py\n@@ -13,7 +13,6 @@\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"POD_ADDRESS_RANGE\"]:\n allow_nets.append(app.config[\"POD_ADDRESS_RANGE\"])\n- print(allow_nets)\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n", "issue": "Why a print statement in this code?\n@kaiyou, according git blame this is yours. I wonder if it was included for debugging purposes and not taken out anymore?\r\n\r\nhttps://github.com/Mailu/Mailu/blob/5c2439011413a114c470862f95206f8487fa4568/core/admin/mailu/internal/views/dovecot.py#L18\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\nfrom flask import current_app as app\n\nimport flask\nimport socket\nimport os\n\[email protected](\"/dovecot/passdb/<user_email>\")\ndef dovecot_passdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n allow_nets = []\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"POD_ADDRESS_RANGE\"]:\n allow_nets.append(app.config[\"POD_ADDRESS_RANGE\"])\n print(allow_nets)\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n \"allow_nets\": \",\".join(allow_nets)\n })\n\n\[email protected](\"/dovecot/userdb/<user_email>\")\ndef dovecot_userdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify({\n \"quota_rule\": \"*:bytes={}\".format(user.quota_bytes)\n })\n\n\[email protected](\"/dovecot/quota/<ns>/<user_email>\", methods=[\"POST\"])\ndef dovecot_quota(ns, user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n if ns == \"storage\":\n user.quota_bytes_used = flask.request.get_json()\n models.db.session.commit()\n return flask.jsonify(None)\n\n\[email protected](\"/dovecot/sieve/name/<script>/<user_email>\")\ndef dovecot_sieve_name(script, user_email):\n return flask.jsonify(script)\n\n\[email protected](\"/dovecot/sieve/data/default/<user_email>\")\ndef dovecot_sieve_data(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify(flask.render_template(\"default.sieve\", user=user))\n", "path": "core/admin/mailu/internal/views/dovecot.py"}], "after_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\nfrom flask import current_app as app\n\nimport flask\nimport socket\nimport os\n\[email protected](\"/dovecot/passdb/<user_email>\")\ndef dovecot_passdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n allow_nets = []\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"POD_ADDRESS_RANGE\"]:\n allow_nets.append(app.config[\"POD_ADDRESS_RANGE\"])\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n \"allow_nets\": \",\".join(allow_nets)\n })\n\n\[email protected](\"/dovecot/userdb/<user_email>\")\ndef dovecot_userdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify({\n \"quota_rule\": \"*:bytes={}\".format(user.quota_bytes)\n })\n\n\[email protected](\"/dovecot/quota/<ns>/<user_email>\", methods=[\"POST\"])\ndef dovecot_quota(ns, user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n if ns == \"storage\":\n user.quota_bytes_used = flask.request.get_json()\n models.db.session.commit()\n return flask.jsonify(None)\n\n\[email protected](\"/dovecot/sieve/name/<script>/<user_email>\")\ndef dovecot_sieve_name(script, user_email):\n return flask.jsonify(script)\n\n\[email protected](\"/dovecot/sieve/data/default/<user_email>\")\ndef dovecot_sieve_data(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify(flask.render_template(\"default.sieve\", user=user))\n", "path": "core/admin/mailu/internal/views/dovecot.py"}]}
| 860 | 126 |
gh_patches_debug_23963
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-3182
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve visualization tutorial
<!-- Please write a clear and concise description of what content in https://optuna.readthedocs.io/ is an issue. -->
I suggest updating the [visualization tutorial](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/005_visualization.html) as follows
- Add missing [`visualization.plot_pareto_front`](https://optuna.readthedocs.io/en/stable/reference/visualization/generated/optuna.visualization.plot_pareto_front.html#optuna.visualization.plot_pareto_front) example; since this function needs multi-objective function unlike other visualization examples, we might need to define such an objective function after the other examples. If adding such an example is not appropriate, at least we need to mention the existence of `visualization.plot_pareto_front`.
- Mention the availability of matplotlib version in the first paragraph.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tutorial/10_key_features/005_visualization.py`
Content:
```
1 """
2 .. _visualization:
3
4 Quick Visualization for Hyperparameter Optimization Analysis
5 ============================================================
6
7 Optuna provides various visualization features in :mod:`optuna.visualization` to analyze optimization results visually.
8
9 This tutorial walks you through this module by visualizing the history of lightgbm model for breast cancer dataset.
10 """
11
12 ###################################################################################################
13 import lightgbm as lgb
14 import numpy as np
15 import sklearn.datasets
16 import sklearn.metrics
17 from sklearn.model_selection import train_test_split
18
19 import optuna
20 from optuna.visualization import plot_contour
21 from optuna.visualization import plot_edf
22 from optuna.visualization import plot_intermediate_values
23 from optuna.visualization import plot_optimization_history
24 from optuna.visualization import plot_parallel_coordinate
25 from optuna.visualization import plot_param_importances
26 from optuna.visualization import plot_slice
27
28 SEED = 42
29
30 np.random.seed(SEED)
31
32
33 ###################################################################################################
34 # Define the objective function.
35 def objective(trial):
36 data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)
37 train_x, valid_x, train_y, valid_y = train_test_split(data, target, test_size=0.25)
38 dtrain = lgb.Dataset(train_x, label=train_y)
39 dvalid = lgb.Dataset(valid_x, label=valid_y)
40
41 param = {
42 "objective": "binary",
43 "metric": "auc",
44 "verbosity": -1,
45 "boosting_type": "gbdt",
46 "bagging_fraction": trial.suggest_float("bagging_fraction", 0.4, 1.0),
47 "bagging_freq": trial.suggest_int("bagging_freq", 1, 7),
48 "min_child_samples": trial.suggest_int("min_child_samples", 5, 100),
49 }
50
51 # Add a callback for pruning.
52 pruning_callback = optuna.integration.LightGBMPruningCallback(trial, "auc")
53 gbm = lgb.train(
54 param, dtrain, valid_sets=[dvalid], verbose_eval=False, callbacks=[pruning_callback]
55 )
56
57 preds = gbm.predict(valid_x)
58 pred_labels = np.rint(preds)
59 accuracy = sklearn.metrics.accuracy_score(valid_y, pred_labels)
60 return accuracy
61
62
63 ###################################################################################################
64 study = optuna.create_study(
65 direction="maximize",
66 sampler=optuna.samplers.TPESampler(seed=SEED),
67 pruner=optuna.pruners.MedianPruner(n_warmup_steps=10),
68 )
69 study.optimize(objective, n_trials=100, timeout=600)
70
71 ###################################################################################################
72 # Plot functions
73 # --------------
74 # Visualize the optimization history. See :func:`~optuna.visualization.plot_optimization_history` for the details.
75 plot_optimization_history(study)
76
77 ###################################################################################################
78 # Visualize the learning curves of the trials. See :func:`~optuna.visualization.plot_intermediate_values` for the details.
79 plot_intermediate_values(study)
80
81 ###################################################################################################
82 # Visualize high-dimensional parameter relationships. See :func:`~optuna.visualization.plot_parallel_coordinate` for the details.
83 plot_parallel_coordinate(study)
84
85 ###################################################################################################
86 # Select parameters to visualize.
87 plot_parallel_coordinate(study, params=["bagging_freq", "bagging_fraction"])
88
89 ###################################################################################################
90 # Visualize hyperparameter relationships. See :func:`~optuna.visualization.plot_contour` for the details.
91 plot_contour(study)
92
93 ###################################################################################################
94 # Select parameters to visualize.
95 plot_contour(study, params=["bagging_freq", "bagging_fraction"])
96
97 ###################################################################################################
98 # Visualize individual hyperparameters as slice plot. See :func:`~optuna.visualization.plot_slice` for the details.
99 plot_slice(study)
100
101 ###################################################################################################
102 # Select parameters to visualize.
103 plot_slice(study, params=["bagging_freq", "bagging_fraction"])
104
105 ###################################################################################################
106 # Visualize parameter importances. See :func:`~optuna.visualization.plot_param_importances` for the details.
107 plot_param_importances(study)
108
109 ###################################################################################################
110 # Learn which hyperparameters are affecting the trial duration with hyperparameter importance.
111 optuna.visualization.plot_param_importances(
112 study, target=lambda t: t.duration.total_seconds(), target_name="duration"
113 )
114
115 ###################################################################################################
116 # Visualize empirical distribution function. See :func:`~optuna.visualization.plot_edf` for the details.
117 plot_edf(study)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tutorial/10_key_features/005_visualization.py b/tutorial/10_key_features/005_visualization.py
--- a/tutorial/10_key_features/005_visualization.py
+++ b/tutorial/10_key_features/005_visualization.py
@@ -7,6 +7,9 @@
Optuna provides various visualization features in :mod:`optuna.visualization` to analyze optimization results visually.
This tutorial walks you through this module by visualizing the history of lightgbm model for breast cancer dataset.
+
+For visualizing multi-objective optimization (i.e., the usage of :func:`optuna.visualization.plot_pareto_front`),
+please refer to the tutorial of :ref:`multi_objective`.
"""
###################################################################################################
@@ -17,6 +20,9 @@
from sklearn.model_selection import train_test_split
import optuna
+
+# You can use Matplotlib instead of Plotly for visualization by simply replacing `optuna.visualization` with
+# `optuna.visualization.matplotlib` in the following examples.
from optuna.visualization import plot_contour
from optuna.visualization import plot_edf
from optuna.visualization import plot_intermediate_values
|
{"golden_diff": "diff --git a/tutorial/10_key_features/005_visualization.py b/tutorial/10_key_features/005_visualization.py\n--- a/tutorial/10_key_features/005_visualization.py\n+++ b/tutorial/10_key_features/005_visualization.py\n@@ -7,6 +7,9 @@\n Optuna provides various visualization features in :mod:`optuna.visualization` to analyze optimization results visually.\n \n This tutorial walks you through this module by visualizing the history of lightgbm model for breast cancer dataset.\n+\n+For visualizing multi-objective optimization (i.e., the usage of :func:`optuna.visualization.plot_pareto_front`),\n+please refer to the tutorial of :ref:`multi_objective`.\n \"\"\"\n \n ###################################################################################################\n@@ -17,6 +20,9 @@\n from sklearn.model_selection import train_test_split\n \n import optuna\n+\n+# You can use Matplotlib instead of Plotly for visualization by simply replacing `optuna.visualization` with\n+# `optuna.visualization.matplotlib` in the following examples.\n from optuna.visualization import plot_contour\n from optuna.visualization import plot_edf\n from optuna.visualization import plot_intermediate_values\n", "issue": "Improve visualization tutorial\n<!-- Please write a clear and concise description of what content in https://optuna.readthedocs.io/ is an issue. -->\r\n\r\nI suggest updating the [visualization tutorial](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/005_visualization.html) as follows\r\n\r\n- Add missing [`visualization.plot_pareto_front`](https://optuna.readthedocs.io/en/stable/reference/visualization/generated/optuna.visualization.plot_pareto_front.html#optuna.visualization.plot_pareto_front) example; since this function needs multi-objective function unlike other visualization examples, we might need to define such an objective function after the other examples. If adding such an example is not appropriate, at least we need to mention the existence of `visualization.plot_pareto_front`.\r\n- Mention the availability of matplotlib version in the first paragraph. \r\n\n", "before_files": [{"content": "\"\"\"\n.. _visualization:\n\nQuick Visualization for Hyperparameter Optimization Analysis\n============================================================\n\nOptuna provides various visualization features in :mod:`optuna.visualization` to analyze optimization results visually.\n\nThis tutorial walks you through this module by visualizing the history of lightgbm model for breast cancer dataset.\n\"\"\"\n\n###################################################################################################\nimport lightgbm as lgb\nimport numpy as np\nimport sklearn.datasets\nimport sklearn.metrics\nfrom sklearn.model_selection import train_test_split\n\nimport optuna\nfrom optuna.visualization import plot_contour\nfrom optuna.visualization import plot_edf\nfrom optuna.visualization import plot_intermediate_values\nfrom optuna.visualization import plot_optimization_history\nfrom optuna.visualization import plot_parallel_coordinate\nfrom optuna.visualization import plot_param_importances\nfrom optuna.visualization import plot_slice\n\nSEED = 42\n\nnp.random.seed(SEED)\n\n\n###################################################################################################\n# Define the objective function.\ndef objective(trial):\n data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)\n train_x, valid_x, train_y, valid_y = train_test_split(data, target, test_size=0.25)\n dtrain = lgb.Dataset(train_x, label=train_y)\n dvalid = lgb.Dataset(valid_x, label=valid_y)\n\n param = {\n \"objective\": \"binary\",\n \"metric\": \"auc\",\n \"verbosity\": -1,\n \"boosting_type\": \"gbdt\",\n \"bagging_fraction\": trial.suggest_float(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n }\n\n # Add a callback for pruning.\n pruning_callback = optuna.integration.LightGBMPruningCallback(trial, \"auc\")\n gbm = lgb.train(\n param, dtrain, valid_sets=[dvalid], verbose_eval=False, callbacks=[pruning_callback]\n )\n\n preds = gbm.predict(valid_x)\n pred_labels = np.rint(preds)\n accuracy = sklearn.metrics.accuracy_score(valid_y, pred_labels)\n return accuracy\n\n\n###################################################################################################\nstudy = optuna.create_study(\n direction=\"maximize\",\n sampler=optuna.samplers.TPESampler(seed=SEED),\n pruner=optuna.pruners.MedianPruner(n_warmup_steps=10),\n)\nstudy.optimize(objective, n_trials=100, timeout=600)\n\n###################################################################################################\n# Plot functions\n# --------------\n# Visualize the optimization history. See :func:`~optuna.visualization.plot_optimization_history` for the details.\nplot_optimization_history(study)\n\n###################################################################################################\n# Visualize the learning curves of the trials. See :func:`~optuna.visualization.plot_intermediate_values` for the details.\nplot_intermediate_values(study)\n\n###################################################################################################\n# Visualize high-dimensional parameter relationships. See :func:`~optuna.visualization.plot_parallel_coordinate` for the details.\nplot_parallel_coordinate(study)\n\n###################################################################################################\n# Select parameters to visualize.\nplot_parallel_coordinate(study, params=[\"bagging_freq\", \"bagging_fraction\"])\n\n###################################################################################################\n# Visualize hyperparameter relationships. See :func:`~optuna.visualization.plot_contour` for the details.\nplot_contour(study)\n\n###################################################################################################\n# Select parameters to visualize.\nplot_contour(study, params=[\"bagging_freq\", \"bagging_fraction\"])\n\n###################################################################################################\n# Visualize individual hyperparameters as slice plot. See :func:`~optuna.visualization.plot_slice` for the details.\nplot_slice(study)\n\n###################################################################################################\n# Select parameters to visualize.\nplot_slice(study, params=[\"bagging_freq\", \"bagging_fraction\"])\n\n###################################################################################################\n# Visualize parameter importances. See :func:`~optuna.visualization.plot_param_importances` for the details.\nplot_param_importances(study)\n\n###################################################################################################\n# Learn which hyperparameters are affecting the trial duration with hyperparameter importance.\noptuna.visualization.plot_param_importances(\n study, target=lambda t: t.duration.total_seconds(), target_name=\"duration\"\n)\n\n###################################################################################################\n# Visualize empirical distribution function. See :func:`~optuna.visualization.plot_edf` for the details.\nplot_edf(study)\n", "path": "tutorial/10_key_features/005_visualization.py"}], "after_files": [{"content": "\"\"\"\n.. _visualization:\n\nQuick Visualization for Hyperparameter Optimization Analysis\n============================================================\n\nOptuna provides various visualization features in :mod:`optuna.visualization` to analyze optimization results visually.\n\nThis tutorial walks you through this module by visualizing the history of lightgbm model for breast cancer dataset.\n\nFor visualizing multi-objective optimization (i.e., the usage of :func:`optuna.visualization.plot_pareto_front`),\nplease refer to the tutorial of :ref:`multi_objective`.\n\"\"\"\n\n###################################################################################################\nimport lightgbm as lgb\nimport numpy as np\nimport sklearn.datasets\nimport sklearn.metrics\nfrom sklearn.model_selection import train_test_split\n\nimport optuna\n\n# You can use Matplotlib instead of Plotly for visualization by simply replacing `optuna.visualization` with\n# `optuna.visualization.matplotlib` in the following examples.\nfrom optuna.visualization import plot_contour\nfrom optuna.visualization import plot_edf\nfrom optuna.visualization import plot_intermediate_values\nfrom optuna.visualization import plot_optimization_history\nfrom optuna.visualization import plot_parallel_coordinate\nfrom optuna.visualization import plot_param_importances\nfrom optuna.visualization import plot_slice\n\nSEED = 42\n\nnp.random.seed(SEED)\n\n\n###################################################################################################\n# Define the objective function.\ndef objective(trial):\n data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)\n train_x, valid_x, train_y, valid_y = train_test_split(data, target, test_size=0.25)\n dtrain = lgb.Dataset(train_x, label=train_y)\n dvalid = lgb.Dataset(valid_x, label=valid_y)\n\n param = {\n \"objective\": \"binary\",\n \"metric\": \"auc\",\n \"verbosity\": -1,\n \"boosting_type\": \"gbdt\",\n \"bagging_fraction\": trial.suggest_float(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n }\n\n # Add a callback for pruning.\n pruning_callback = optuna.integration.LightGBMPruningCallback(trial, \"auc\")\n gbm = lgb.train(\n param, dtrain, valid_sets=[dvalid], verbose_eval=False, callbacks=[pruning_callback]\n )\n\n preds = gbm.predict(valid_x)\n pred_labels = np.rint(preds)\n accuracy = sklearn.metrics.accuracy_score(valid_y, pred_labels)\n return accuracy\n\n\n###################################################################################################\nstudy = optuna.create_study(\n direction=\"maximize\",\n sampler=optuna.samplers.TPESampler(seed=SEED),\n pruner=optuna.pruners.MedianPruner(n_warmup_steps=10),\n)\nstudy.optimize(objective, n_trials=100, timeout=600)\n\n###################################################################################################\n# Plot functions\n# --------------\n# Visualize the optimization history. See :func:`~optuna.visualization.plot_optimization_history` for the details.\nplot_optimization_history(study)\n\n###################################################################################################\n# Visualize the learning curves of the trials. See :func:`~optuna.visualization.plot_intermediate_values` for the details.\nplot_intermediate_values(study)\n\n###################################################################################################\n# Visualize high-dimensional parameter relationships. See :func:`~optuna.visualization.plot_parallel_coordinate` for the details.\nplot_parallel_coordinate(study)\n\n###################################################################################################\n# Select parameters to visualize.\nplot_parallel_coordinate(study, params=[\"bagging_freq\", \"bagging_fraction\"])\n\n###################################################################################################\n# Visualize hyperparameter relationships. See :func:`~optuna.visualization.plot_contour` for the details.\nplot_contour(study)\n\n###################################################################################################\n# Select parameters to visualize.\nplot_contour(study, params=[\"bagging_freq\", \"bagging_fraction\"])\n\n###################################################################################################\n# Visualize individual hyperparameters as slice plot. See :func:`~optuna.visualization.plot_slice` for the details.\nplot_slice(study)\n\n###################################################################################################\n# Select parameters to visualize.\nplot_slice(study, params=[\"bagging_freq\", \"bagging_fraction\"])\n\n###################################################################################################\n# Visualize parameter importances. See :func:`~optuna.visualization.plot_param_importances` for the details.\nplot_param_importances(study)\n\n###################################################################################################\n# Learn which hyperparameters are affecting the trial duration with hyperparameter importance.\noptuna.visualization.plot_param_importances(\n study, target=lambda t: t.duration.total_seconds(), target_name=\"duration\"\n)\n\n###################################################################################################\n# Visualize empirical distribution function. See :func:`~optuna.visualization.plot_edf` for the details.\nplot_edf(study)\n", "path": "tutorial/10_key_features/005_visualization.py"}]}
| 1,613 | 253 |
gh_patches_debug_58736
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-6081
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not sure that OAuth2 client source should use authorization header
I've been testing authentik using an Auth0 openIDC source as well as a google source. I have gotten both to work, but Auth0 was not working before a fix to prevent adding the access token to the authorizaton Bearer headers. Google auth works fine with or without this fix.
https://auth0.com/blog/id-token-access-token-what-is-the-difference/ suggests that many endpoints should not be given the access token. Not sure this is relevant.
I think Auth0 is less permissive and prefers the access_token param rather than the Authorization Bearer token
in sources/oauth/clients/oauth2.py
```
class OAuth2Client(BaseOAuthClient):
"""OAuth2 Client"""
...
def do_request(self, method: str, url: str, **kwargs) -> Response:
"""Build remote url request. Constructs necessary auth."""
if "token" in kwargs:
token = kwargs.pop("token")
params = kwargs.get("params", {})
params["access_token"] = token["access_token"]
kwargs["params"] = params
headers = kwargs.get("headers", {})
# Note this fix
# headers["Authorization"] = f"{token['token_type']} {token['access_token']}"
kwargs["headers"] = headers
return super().do_request(method, url, **kwargs)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/oauth/types/oidc.py`
Content:
```
1 """OpenID Connect OAuth Views"""
2 from typing import Any
3
4 from authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient
5 from authentik.sources.oauth.models import OAuthSource
6 from authentik.sources.oauth.types.registry import SourceType, registry
7 from authentik.sources.oauth.views.callback import OAuthCallback
8 from authentik.sources.oauth.views.redirect import OAuthRedirect
9
10
11 class OpenIDConnectOAuthRedirect(OAuthRedirect):
12 """OpenIDConnect OAuth2 Redirect"""
13
14 def get_additional_parameters(self, source: OAuthSource): # pragma: no cover
15 return {
16 "scope": ["openid", "email", "profile"],
17 }
18
19
20 class OpenIDConnectOAuth2Callback(OAuthCallback):
21 """OpenIDConnect OAuth2 Callback"""
22
23 client_class: UserprofileHeaderAuthClient
24
25 def get_user_id(self, info: dict[str, str]) -> str:
26 return info.get("sub", "")
27
28 def get_user_enroll_context(
29 self,
30 info: dict[str, Any],
31 ) -> dict[str, Any]:
32 return {
33 "username": info.get("nickname", info.get("preferred_username")),
34 "email": info.get("email"),
35 "name": info.get("name"),
36 }
37
38
39 @registry.register()
40 class OpenIDConnectType(SourceType):
41 """OpenIDConnect Type definition"""
42
43 callback_view = OpenIDConnectOAuth2Callback
44 redirect_view = OpenIDConnectOAuthRedirect
45 name = "OpenID Connect"
46 slug = "openidconnect"
47
48 urls_customizable = True
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/sources/oauth/types/oidc.py b/authentik/sources/oauth/types/oidc.py
--- a/authentik/sources/oauth/types/oidc.py
+++ b/authentik/sources/oauth/types/oidc.py
@@ -20,7 +20,7 @@
class OpenIDConnectOAuth2Callback(OAuthCallback):
"""OpenIDConnect OAuth2 Callback"""
- client_class: UserprofileHeaderAuthClient
+ client_class = UserprofileHeaderAuthClient
def get_user_id(self, info: dict[str, str]) -> str:
return info.get("sub", "")
|
{"golden_diff": "diff --git a/authentik/sources/oauth/types/oidc.py b/authentik/sources/oauth/types/oidc.py\n--- a/authentik/sources/oauth/types/oidc.py\n+++ b/authentik/sources/oauth/types/oidc.py\n@@ -20,7 +20,7 @@\n class OpenIDConnectOAuth2Callback(OAuthCallback):\n \"\"\"OpenIDConnect OAuth2 Callback\"\"\"\n \n- client_class: UserprofileHeaderAuthClient\n+ client_class = UserprofileHeaderAuthClient\n \n def get_user_id(self, info: dict[str, str]) -> str:\n return info.get(\"sub\", \"\")\n", "issue": "Not sure that OAuth2 client source should use authorization header\nI've been testing authentik using an Auth0 openIDC source as well as a google source. I have gotten both to work, but Auth0 was not working before a fix to prevent adding the access token to the authorizaton Bearer headers. Google auth works fine with or without this fix. \r\n\r\nhttps://auth0.com/blog/id-token-access-token-what-is-the-difference/ suggests that many endpoints should not be given the access token. Not sure this is relevant.\r\n\r\nI think Auth0 is less permissive and prefers the access_token param rather than the Authorization Bearer token\r\n\r\nin sources/oauth/clients/oauth2.py\r\n```\r\nclass OAuth2Client(BaseOAuthClient):\r\n \"\"\"OAuth2 Client\"\"\"\r\n\r\n ...\r\n\r\n def do_request(self, method: str, url: str, **kwargs) -> Response:\r\n \"\"\"Build remote url request. Constructs necessary auth.\"\"\"\r\n if \"token\" in kwargs:\r\n token = kwargs.pop(\"token\")\r\n\r\n params = kwargs.get(\"params\", {})\r\n params[\"access_token\"] = token[\"access_token\"]\r\n kwargs[\"params\"] = params\r\n\r\n headers = kwargs.get(\"headers\", {})\r\n # Note this fix\r\n # headers[\"Authorization\"] = f\"{token['token_type']} {token['access_token']}\"\r\n kwargs[\"headers\"] = headers\r\n return super().do_request(method, url, **kwargs)\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"OpenID Connect OAuth Views\"\"\"\nfrom typing import Any\n\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\nfrom authentik.sources.oauth.models import OAuthSource\nfrom authentik.sources.oauth.types.registry import SourceType, registry\nfrom authentik.sources.oauth.views.callback import OAuthCallback\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\n\nclass OpenIDConnectOAuthRedirect(OAuthRedirect):\n \"\"\"OpenIDConnect OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source: OAuthSource): # pragma: no cover\n return {\n \"scope\": [\"openid\", \"email\", \"profile\"],\n }\n\n\nclass OpenIDConnectOAuth2Callback(OAuthCallback):\n \"\"\"OpenIDConnect OAuth2 Callback\"\"\"\n\n client_class: UserprofileHeaderAuthClient\n\n def get_user_id(self, info: dict[str, str]) -> str:\n return info.get(\"sub\", \"\")\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n \"username\": info.get(\"nickname\", info.get(\"preferred_username\")),\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"name\"),\n }\n\n\[email protected]()\nclass OpenIDConnectType(SourceType):\n \"\"\"OpenIDConnect Type definition\"\"\"\n\n callback_view = OpenIDConnectOAuth2Callback\n redirect_view = OpenIDConnectOAuthRedirect\n name = \"OpenID Connect\"\n slug = \"openidconnect\"\n\n urls_customizable = True\n", "path": "authentik/sources/oauth/types/oidc.py"}], "after_files": [{"content": "\"\"\"OpenID Connect OAuth Views\"\"\"\nfrom typing import Any\n\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\nfrom authentik.sources.oauth.models import OAuthSource\nfrom authentik.sources.oauth.types.registry import SourceType, registry\nfrom authentik.sources.oauth.views.callback import OAuthCallback\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\n\nclass OpenIDConnectOAuthRedirect(OAuthRedirect):\n \"\"\"OpenIDConnect OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source: OAuthSource): # pragma: no cover\n return {\n \"scope\": [\"openid\", \"email\", \"profile\"],\n }\n\n\nclass OpenIDConnectOAuth2Callback(OAuthCallback):\n \"\"\"OpenIDConnect OAuth2 Callback\"\"\"\n\n client_class = UserprofileHeaderAuthClient\n\n def get_user_id(self, info: dict[str, str]) -> str:\n return info.get(\"sub\", \"\")\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n \"username\": info.get(\"nickname\", info.get(\"preferred_username\")),\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"name\"),\n }\n\n\[email protected]()\nclass OpenIDConnectType(SourceType):\n \"\"\"OpenIDConnect Type definition\"\"\"\n\n callback_view = OpenIDConnectOAuth2Callback\n redirect_view = OpenIDConnectOAuthRedirect\n name = \"OpenID Connect\"\n slug = \"openidconnect\"\n\n urls_customizable = True\n", "path": "authentik/sources/oauth/types/oidc.py"}]}
| 983 | 132 |
gh_patches_debug_18320
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-1453
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tornado 5.0 raises error on install with older Python versions.
changed to `"tornado>=4.1,<5.0"` in setup.py
This broke installation via pip for me.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import print_function
5 from setuptools import setup
6 import re
7 import os
8 import sys
9
10
11 long_description = (
12 "MkDocs is a fast, simple and downright gorgeous static site generator "
13 "that's geared towards building project documentation. Documentation "
14 "source files are written in Markdown, and configured with a single YAML "
15 "configuration file."
16 )
17
18
19 def get_version(package):
20 """Return package version as listed in `__version__` in `init.py`."""
21 init_py = open(os.path.join(package, '__init__.py')).read()
22 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
23
24
25 def get_packages(package):
26 """Return root package and all sub-packages."""
27 return [dirpath
28 for dirpath, dirnames, filenames in os.walk(package)
29 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
30
31
32 if sys.argv[-1] == 'publish':
33 if os.system("pip freeze | grep wheel"):
34 print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
35 sys.exit()
36 if os.system("pip freeze | grep twine"):
37 print("twine not installed.\nUse `pip install twine`.\nExiting.")
38 sys.exit()
39 os.system("python setup.py sdist bdist_wheel")
40 os.system("twine upload dist/*")
41 print("You probably want to also tag the version now:")
42 print(" git tag -a {0} -m 'version {0}'".format(get_version("mkdocs")))
43 print(" git push --tags")
44 sys.exit()
45
46
47 setup(
48 name="mkdocs",
49 version=get_version("mkdocs"),
50 url='http://www.mkdocs.org',
51 license='BSD',
52 description='Project documentation with Markdown.',
53 long_description=long_description,
54 author='Tom Christie',
55 author_email='[email protected]', # SEE NOTE BELOW (*)
56 packages=get_packages("mkdocs"),
57 include_package_data=True,
58 install_requires=[
59 'click>=3.3',
60 'Jinja2>=2.7.1',
61 'livereload>=2.5.1',
62 'Markdown>=2.3.1',
63 'PyYAML>=3.10',
64 'tornado>=4.1',
65 ],
66 entry_points={
67 'console_scripts': [
68 'mkdocs = mkdocs.__main__:cli',
69 ],
70 'mkdocs.themes': [
71 'mkdocs = mkdocs.themes.mkdocs',
72 'readthedocs = mkdocs.themes.readthedocs',
73 ],
74 'mkdocs.plugins': [
75 'search = mkdocs.contrib.search:SearchPlugin',
76 ],
77 },
78 classifiers=[
79 'Development Status :: 5 - Production/Stable',
80 'Environment :: Console',
81 'Environment :: Web Environment',
82 'Intended Audience :: Developers',
83 'License :: OSI Approved :: BSD License',
84 'Operating System :: OS Independent',
85 'Programming Language :: Python',
86 'Programming Language :: Python :: 2',
87 'Programming Language :: Python :: 2.7',
88 'Programming Language :: Python :: 3',
89 'Programming Language :: Python :: 3.3',
90 'Programming Language :: Python :: 3.4',
91 'Programming Language :: Python :: 3.5',
92 'Programming Language :: Python :: 3.6',
93 "Programming Language :: Python :: Implementation :: CPython",
94 "Programming Language :: Python :: Implementation :: PyPy",
95 'Topic :: Documentation',
96 'Topic :: Text Processing',
97 ],
98 zip_safe=False,
99 )
100
101 # (*) Please direct queries to the discussion group:
102 # https://groups.google.com/forum/#!forum/mkdocs
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,8 +61,9 @@
'livereload>=2.5.1',
'Markdown>=2.3.1',
'PyYAML>=3.10',
- 'tornado>=4.1',
+ 'tornado>=5.0',
],
+ python_requires='>=2.7.9,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
entry_points={
'console_scripts': [
'mkdocs = mkdocs.__main__:cli',
@@ -86,7 +87,6 @@
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,8 +61,9 @@\n 'livereload>=2.5.1',\n 'Markdown>=2.3.1',\n 'PyYAML>=3.10',\n- 'tornado>=4.1',\n+ 'tornado>=5.0',\n ],\n+ python_requires='>=2.7.9,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n@@ -86,7 +87,6 @@\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n", "issue": "Tornado 5.0 raises error on install with older Python versions.\nchanged to `\"tornado>=4.1,<5.0\"` in setup.py\r\n\r\nThis broke installation via pip for me. \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(get_version(\"mkdocs\")))\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='http://www.mkdocs.org',\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.7.1',\n 'livereload>=2.5.1',\n 'Markdown>=2.3.1',\n 'PyYAML>=3.10',\n 'tornado>=4.1',\n ],\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ],\n 'mkdocs.plugins': [\n 'search = mkdocs.contrib.search:SearchPlugin',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(get_version(\"mkdocs\")))\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='http://www.mkdocs.org',\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.7.1',\n 'livereload>=2.5.1',\n 'Markdown>=2.3.1',\n 'PyYAML>=3.10',\n 'tornado>=5.0',\n ],\n python_requires='>=2.7.9,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ],\n 'mkdocs.plugins': [\n 'search = mkdocs.contrib.search:SearchPlugin',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n", "path": "setup.py"}]}
| 1,320 | 235 |
gh_patches_debug_38298
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-121
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
redirect loop on invalid single-user auth token
when the single-user server's API request fails with 403, it's handled as failed login (302) causing a redirect loop, when it should be handled as "500: oh noes, I can't do anything!"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/singleuser.py`
Content:
```
1 #!/usr/bin/env python
2 """Extend regular notebook server to be aware of multiuser things."""
3
4 # Copyright (c) Jupyter Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 import os
8
9 import requests
10
11 from tornado import ioloop
12
13 from IPython.utils.traitlets import Unicode
14
15 from IPython.html.notebookapp import NotebookApp
16 from IPython.html.auth.login import LoginHandler
17 from IPython.html.auth.logout import LogoutHandler
18
19 from IPython.html.utils import url_path_join
20
21
22 from distutils.version import LooseVersion as V
23
24 import IPython
25 if V(IPython.__version__) < V('3.0'):
26 raise ImportError("JupyterHub Requires IPython >= 3.0, found %s" % IPython.__version__)
27
28 # Define two methods to attach to AuthenticatedHandler,
29 # which authenticate via the central auth server.
30
31 class JupyterHubLoginHandler(LoginHandler):
32 @staticmethod
33 def login_available(settings):
34 return True
35
36 @staticmethod
37 def verify_token(self, cookie_name, encrypted_cookie):
38 """monkeypatch method for token verification"""
39 cookie_cache = self.settings['cookie_cache']
40 if encrypted_cookie in cookie_cache:
41 # we've seen this token before, don't ask upstream again
42 return cookie_cache[encrypted_cookie]
43
44 hub_api_url = self.settings['hub_api_url']
45 hub_api_key = self.settings['hub_api_key']
46 r = requests.get(url_path_join(
47 hub_api_url, "authorizations/cookie", cookie_name,
48 ),
49 headers = {'Authorization' : 'token %s' % hub_api_key},
50 data=encrypted_cookie,
51 )
52 if r.status_code == 404:
53 data = {'user' : ''}
54 elif r.status_code >= 400:
55 self.log.warn("Failed to check authorization: [%i] %s", r.status_code, r.reason)
56 data = None
57 else:
58 data = r.json()
59 cookie_cache[encrypted_cookie] = data
60 return data
61
62 @staticmethod
63 def get_user(self):
64 """alternative get_current_user to query the central server"""
65 my_user = self.settings['user']
66 encrypted_cookie = self.get_cookie(self.cookie_name)
67 if encrypted_cookie:
68 auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)
69 if not auth_data:
70 # treat invalid token the same as no token
71 return None
72 user = auth_data['user']
73 if user == my_user:
74 return user
75 else:
76 return None
77 else:
78 self.log.debug("No token cookie")
79 return None
80
81
82 class JupyterHubLogoutHandler(LogoutHandler):
83 def get(self):
84 self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))
85
86
87 # register new hub related command-line aliases
88 aliases = NotebookApp.aliases.get_default_value()
89 aliases.update({
90 'user' : 'SingleUserNotebookApp.user',
91 'cookie-name': 'SingleUserNotebookApp.cookie_name',
92 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',
93 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',
94 'base-url': 'SingleUserNotebookApp.base_url',
95 })
96
97 class SingleUserNotebookApp(NotebookApp):
98 """A Subclass of the regular NotebookApp that is aware of the parent multiuser context."""
99 user = Unicode(config=True)
100 cookie_name = Unicode(config=True)
101 hub_prefix = Unicode(config=True)
102 hub_api_url = Unicode(config=True)
103 aliases = aliases
104 open_browser = False
105 login_handler_class = JupyterHubLoginHandler
106 logout_handler_class = JupyterHubLogoutHandler
107
108 def _confirm_exit(self):
109 # disable the exit confirmation for background notebook processes
110 ioloop.IOLoop.instance().stop()
111
112 def init_webapp(self):
113 # load the hub related settings into the tornado settings dict
114 env = os.environ
115 s = self.tornado_settings
116 s['cookie_cache'] = {}
117 s['user'] = self.user
118 s['hub_api_key'] = env.pop('JPY_API_TOKEN')
119 s['hub_prefix'] = self.hub_prefix
120 s['cookie_name'] = self.cookie_name
121 s['login_url'] = url_path_join(self.hub_prefix, 'login')
122 s['hub_api_url'] = self.hub_api_url
123 super(SingleUserNotebookApp, self).init_webapp()
124
125
126 def main():
127 return SingleUserNotebookApp.launch_instance()
128
129
130 if __name__ == "__main__":
131 main()
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jupyterhub/singleuser.py b/jupyterhub/singleuser.py
--- a/jupyterhub/singleuser.py
+++ b/jupyterhub/singleuser.py
@@ -9,6 +9,7 @@
import requests
from tornado import ioloop
+from tornado.web import HTTPError
from IPython.utils.traitlets import Unicode
@@ -35,7 +36,7 @@
@staticmethod
def verify_token(self, cookie_name, encrypted_cookie):
- """monkeypatch method for token verification"""
+ """method for token verification"""
cookie_cache = self.settings['cookie_cache']
if encrypted_cookie in cookie_cache:
# we've seen this token before, don't ask upstream again
@@ -51,9 +52,15 @@
)
if r.status_code == 404:
data = {'user' : ''}
+ if r.status_code == 403:
+ self.log.error("I don't have permission to verify cookies, my auth token may have expired: [%i] %s", r.status_code, r.reason)
+ raise HTTPError(500, "Permission failure checking authorization, I may need to be restarted")
+ elif r.status_code >= 500:
+ self.log.error("Upstream failure verifying auth token: [%i] %s", r.status_code, r.reason)
+ raise HTTPError(502, "Failed to check authorization (upstream problem)")
elif r.status_code >= 400:
self.log.warn("Failed to check authorization: [%i] %s", r.status_code, r.reason)
- data = None
+ raise HTTPError(500, "Failed to check authorization")
else:
data = r.json()
cookie_cache[encrypted_cookie] = data
@@ -62,6 +69,13 @@
@staticmethod
def get_user(self):
"""alternative get_current_user to query the central server"""
+ # only allow this to be called once per handler
+ # avoids issues if an error is raised,
+ # since this may be called again when trying to render the error page
+ if hasattr(self, '_cached_user'):
+ return self._cached_user
+
+ self._cached_user = None
my_user = self.settings['user']
encrypted_cookie = self.get_cookie(self.cookie_name)
if encrypted_cookie:
@@ -71,6 +85,7 @@
return None
user = auth_data['user']
if user == my_user:
+ self._cached_user = user
return user
else:
return None
|
{"golden_diff": "diff --git a/jupyterhub/singleuser.py b/jupyterhub/singleuser.py\n--- a/jupyterhub/singleuser.py\n+++ b/jupyterhub/singleuser.py\n@@ -9,6 +9,7 @@\n import requests\n \n from tornado import ioloop\n+from tornado.web import HTTPError\n \n from IPython.utils.traitlets import Unicode\n \n@@ -35,7 +36,7 @@\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n- \"\"\"monkeypatch method for token verification\"\"\"\n+ \"\"\"method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n@@ -51,9 +52,15 @@\n )\n if r.status_code == 404:\n data = {'user' : ''}\n+ if r.status_code == 403:\n+ self.log.error(\"I don't have permission to verify cookies, my auth token may have expired: [%i] %s\", r.status_code, r.reason)\n+ raise HTTPError(500, \"Permission failure checking authorization, I may need to be restarted\")\n+ elif r.status_code >= 500:\n+ self.log.error(\"Upstream failure verifying auth token: [%i] %s\", r.status_code, r.reason)\n+ raise HTTPError(502, \"Failed to check authorization (upstream problem)\")\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n- data = None\n+ raise HTTPError(500, \"Failed to check authorization\")\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n@@ -62,6 +69,13 @@\n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n+ # only allow this to be called once per handler\n+ # avoids issues if an error is raised,\n+ # since this may be called again when trying to render the error page\n+ if hasattr(self, '_cached_user'):\n+ return self._cached_user\n+ \n+ self._cached_user = None\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n@@ -71,6 +85,7 @@\n return None\n user = auth_data['user']\n if user == my_user:\n+ self._cached_user = user\n return user\n else:\n return None\n", "issue": "redirect loop on invalid single-user auth token\nwhen the single-user server's API request fails with 403, it's handled as failed login (302) causing a redirect loop, when it should be handled as \"500: oh noes, I can't do anything!\"\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\n\nimport requests\n\nfrom tornado import ioloop\n\nfrom IPython.utils.traitlets import Unicode\n\nfrom IPython.html.notebookapp import NotebookApp\nfrom IPython.html.auth.login import LoginHandler\nfrom IPython.html.auth.logout import LogoutHandler\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('3.0'):\n raise ImportError(\"JupyterHub Requires IPython >= 3.0, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\nclass JupyterHubLoginHandler(LoginHandler):\n @staticmethod\n def login_available(settings):\n return True\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n \"\"\"monkeypatch method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n return cookie_cache[encrypted_cookie]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations/cookie\", cookie_name,\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key},\n data=encrypted_cookie,\n )\n if r.status_code == 404:\n data = {'user' : ''}\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n data = None\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n return data\n \n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['user']\n if user == my_user:\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\nclass JupyterHubLogoutHandler(LogoutHandler):\n def get(self):\n self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))\n\n\n# register new hub related command-line aliases\naliases = NotebookApp.aliases.get_default_value()\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = Unicode(config=True)\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n login_handler_class = JupyterHubLoginHandler\n logout_handler_class = JupyterHubLogoutHandler\n \n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n \n def init_webapp(self):\n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.tornado_settings\n s['cookie_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN')\n s['hub_prefix'] = self.hub_prefix\n s['cookie_name'] = self.cookie_name\n s['login_url'] = url_path_join(self.hub_prefix, 'login')\n s['hub_api_url'] = self.hub_api_url\n super(SingleUserNotebookApp, self).init_webapp()\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuser.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\n\nimport requests\n\nfrom tornado import ioloop\nfrom tornado.web import HTTPError\n\nfrom IPython.utils.traitlets import Unicode\n\nfrom IPython.html.notebookapp import NotebookApp\nfrom IPython.html.auth.login import LoginHandler\nfrom IPython.html.auth.logout import LogoutHandler\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('3.0'):\n raise ImportError(\"JupyterHub Requires IPython >= 3.0, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\nclass JupyterHubLoginHandler(LoginHandler):\n @staticmethod\n def login_available(settings):\n return True\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n \"\"\"method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n return cookie_cache[encrypted_cookie]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations/cookie\", cookie_name,\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key},\n data=encrypted_cookie,\n )\n if r.status_code == 404:\n data = {'user' : ''}\n if r.status_code == 403:\n self.log.error(\"I don't have permission to verify cookies, my auth token may have expired: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Permission failure checking authorization, I may need to be restarted\")\n elif r.status_code >= 500:\n self.log.error(\"Upstream failure verifying auth token: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(502, \"Failed to check authorization (upstream problem)\")\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Failed to check authorization\")\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n return data\n \n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n # only allow this to be called once per handler\n # avoids issues if an error is raised,\n # since this may be called again when trying to render the error page\n if hasattr(self, '_cached_user'):\n return self._cached_user\n \n self._cached_user = None\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['user']\n if user == my_user:\n self._cached_user = user\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\nclass JupyterHubLogoutHandler(LogoutHandler):\n def get(self):\n self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))\n\n\n# register new hub related command-line aliases\naliases = NotebookApp.aliases.get_default_value()\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = Unicode(config=True)\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n login_handler_class = JupyterHubLoginHandler\n logout_handler_class = JupyterHubLogoutHandler\n \n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n \n def init_webapp(self):\n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.tornado_settings\n s['cookie_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN')\n s['hub_prefix'] = self.hub_prefix\n s['cookie_name'] = self.cookie_name\n s['login_url'] = url_path_join(self.hub_prefix, 'login')\n s['hub_api_url'] = self.hub_api_url\n super(SingleUserNotebookApp, self).init_webapp()\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuser.py"}]}
| 1,601 | 573 |
gh_patches_debug_40215
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2869
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider regis_uk is broken
During the global build at 2021-05-26-14-42-23, spider **regis_uk** failed with **33 features** and **35 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/regis_uk.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/regis_uk.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/regis_uk.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/regis_salon_uk.py`
Content:
```
1 import scrapy
2 from locations.items import GeojsonPointItem
3 import re
4
5 regex_am = r"\s?([Aa][Mm])"
6 regex_pm = r"\s?([Pp][Mm])"
7
8
9 class RegisUKSpider(scrapy.Spider):
10 name = "regis_uk"
11 item_attributes = { 'brand': "Regis Salon" }
12 allowed_domains = ["www.regissalons.co.uk"]
13 start_urls = ['https://www.regissalons.co.uk/salon-locator?show-all=yes']
14
15 def convert_hours(self, hours):
16 hours = [x.strip() for x in hours]
17 hours = [x for x in hours if x]
18 for i in range(len(hours)):
19 converted_times = ''
20 if hours[i] != "Closed":
21 from_hr, to_hr = [hr.strip() for hr in hours[i].split('–')]
22 if re.search(regex_am, from_hr):
23 from_hr = re.sub(regex_am, '', from_hr)
24 hour_min = from_hr.split(':')
25 if len(hour_min[0]) < 2:
26 hour_min[0].zfill(2)
27 converted_times += (":".join(hour_min)) + ' - '
28 else:
29 from_hr = re.sub(regex_pm, '', from_hr)
30 hour_min = from_hr.split(':')
31 if int(hour_min[0]) < 12:
32 hour_min[0] = str(12 + int(hour_min[0]))
33 converted_times += (":".join(hour_min)) + ' - '
34
35 if re.search(regex_am, to_hr):
36 to_hr = re.sub(regex_am, '', to_hr)
37 hour_min = to_hr.split(':')
38 if len(hour_min[0]) < 2:
39 hour_min[0].zfill(2)
40 if int(hour_min[0]) == 12:
41 hour_min[0] = '00'
42 converted_times += (":".join(hour_min))
43 else:
44 to_hr = re.sub(regex_pm, '', to_hr)
45 hour_min = to_hr.split(':')
46 if int(hour_min[0]) < 12:
47 hour_min[0] = str(12 + int(hour_min[0]))
48 converted_times += (":".join(hour_min))
49 else:
50 converted_times += "off"
51 hours[i] = converted_times
52 days = ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]
53 hours = ''.join('{} {} '.format(*t) for t in zip(days, hours))
54 return hours
55
56 def parse_store(self, response):
57 phone = response.xpath(
58 '//a[@class="phone-tracked-link"]/text()').extract_first().strip()
59 lat = response.xpath(
60 '//div[@id="map-aside"]/@data-lat').extract_first()
61 lon = response.xpath(
62 '//div[@id="map-aside"]/@data-lng').extract_first()
63 hours = response.xpath(
64 '//div[@class="container"]//p[contains(., "am")'
65 ' or contains(., "Closed")]/text()').extract()
66 hours = self.convert_hours(hours)
67
68 yield GeojsonPointItem(
69 ref=response.url,
70 phone=phone,
71 lat=lat,
72 lon=lon,
73 opening_hours=hours,
74 website=response.url
75 )
76
77 def parse(self, response):
78 stores = response.xpath('//ul[@class="list"]//a/@href').extract()
79 for store in stores:
80 yield scrapy.Request(store, callback=self.parse_store)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/regis_salon_uk.py b/locations/spiders/regis_salon_uk.py
--- a/locations/spiders/regis_salon_uk.py
+++ b/locations/spiders/regis_salon_uk.py
@@ -11,6 +11,7 @@
item_attributes = { 'brand': "Regis Salon" }
allowed_domains = ["www.regissalons.co.uk"]
start_urls = ['https://www.regissalons.co.uk/salon-locator?show-all=yes']
+ download_delay = 4.0
def convert_hours(self, hours):
hours = [x.strip() for x in hours]
@@ -21,20 +22,20 @@
from_hr, to_hr = [hr.strip() for hr in hours[i].split('–')]
if re.search(regex_am, from_hr):
from_hr = re.sub(regex_am, '', from_hr)
- hour_min = from_hr.split(':')
+ hour_min = re.split('[:.]', from_hr)
if len(hour_min[0]) < 2:
hour_min[0].zfill(2)
converted_times += (":".join(hour_min)) + ' - '
else:
from_hr = re.sub(regex_pm, '', from_hr)
- hour_min = from_hr.split(':')
+ hour_min = re.split('[:.]', from_hr)
if int(hour_min[0]) < 12:
hour_min[0] = str(12 + int(hour_min[0]))
converted_times += (":".join(hour_min)) + ' - '
if re.search(regex_am, to_hr):
to_hr = re.sub(regex_am, '', to_hr)
- hour_min = to_hr.split(':')
+ hour_min = re.split('[:.]', to_hr)
if len(hour_min[0]) < 2:
hour_min[0].zfill(2)
if int(hour_min[0]) == 12:
@@ -42,7 +43,7 @@
converted_times += (":".join(hour_min))
else:
to_hr = re.sub(regex_pm, '', to_hr)
- hour_min = to_hr.split(':')
+ hour_min = re.split('[:.]', to_hr)
if int(hour_min[0]) < 12:
hour_min[0] = str(12 + int(hour_min[0]))
converted_times += (":".join(hour_min))
@@ -77,4 +78,6 @@
def parse(self, response):
stores = response.xpath('//ul[@class="list"]//a/@href').extract()
for store in stores:
+ if '/salon-region/' in store:
+ continue
yield scrapy.Request(store, callback=self.parse_store)
|
{"golden_diff": "diff --git a/locations/spiders/regis_salon_uk.py b/locations/spiders/regis_salon_uk.py\n--- a/locations/spiders/regis_salon_uk.py\n+++ b/locations/spiders/regis_salon_uk.py\n@@ -11,6 +11,7 @@\n item_attributes = { 'brand': \"Regis Salon\" }\n allowed_domains = [\"www.regissalons.co.uk\"]\n start_urls = ['https://www.regissalons.co.uk/salon-locator?show-all=yes']\n+ download_delay = 4.0\n \n def convert_hours(self, hours):\n hours = [x.strip() for x in hours]\n@@ -21,20 +22,20 @@\n from_hr, to_hr = [hr.strip() for hr in hours[i].split('\u2013')]\n if re.search(regex_am, from_hr):\n from_hr = re.sub(regex_am, '', from_hr)\n- hour_min = from_hr.split(':')\n+ hour_min = re.split('[:.]', from_hr)\n if len(hour_min[0]) < 2:\n hour_min[0].zfill(2)\n converted_times += (\":\".join(hour_min)) + ' - '\n else:\n from_hr = re.sub(regex_pm, '', from_hr)\n- hour_min = from_hr.split(':')\n+ hour_min = re.split('[:.]', from_hr)\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n converted_times += (\":\".join(hour_min)) + ' - '\n \n if re.search(regex_am, to_hr):\n to_hr = re.sub(regex_am, '', to_hr)\n- hour_min = to_hr.split(':')\n+ hour_min = re.split('[:.]', to_hr)\n if len(hour_min[0]) < 2:\n hour_min[0].zfill(2)\n if int(hour_min[0]) == 12:\n@@ -42,7 +43,7 @@\n converted_times += (\":\".join(hour_min))\n else:\n to_hr = re.sub(regex_pm, '', to_hr)\n- hour_min = to_hr.split(':')\n+ hour_min = re.split('[:.]', to_hr)\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n converted_times += (\":\".join(hour_min))\n@@ -77,4 +78,6 @@\n def parse(self, response):\n stores = response.xpath('//ul[@class=\"list\"]//a/@href').extract()\n for store in stores:\n+ if '/salon-region/' in store:\n+ continue\n yield scrapy.Request(store, callback=self.parse_store)\n", "issue": "Spider regis_uk is broken\nDuring the global build at 2021-05-26-14-42-23, spider **regis_uk** failed with **33 features** and **35 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/regis_uk.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/regis_uk.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/regis_uk.geojson))\n", "before_files": [{"content": "import scrapy\nfrom locations.items import GeojsonPointItem\nimport re\n\nregex_am = r\"\\s?([Aa][Mm])\"\nregex_pm = r\"\\s?([Pp][Mm])\"\n\n\nclass RegisUKSpider(scrapy.Spider):\n name = \"regis_uk\"\n item_attributes = { 'brand': \"Regis Salon\" }\n allowed_domains = [\"www.regissalons.co.uk\"]\n start_urls = ['https://www.regissalons.co.uk/salon-locator?show-all=yes']\n\n def convert_hours(self, hours):\n hours = [x.strip() for x in hours]\n hours = [x for x in hours if x]\n for i in range(len(hours)):\n converted_times = ''\n if hours[i] != \"Closed\":\n from_hr, to_hr = [hr.strip() for hr in hours[i].split('\u2013')]\n if re.search(regex_am, from_hr):\n from_hr = re.sub(regex_am, '', from_hr)\n hour_min = from_hr.split(':')\n if len(hour_min[0]) < 2:\n hour_min[0].zfill(2)\n converted_times += (\":\".join(hour_min)) + ' - '\n else:\n from_hr = re.sub(regex_pm, '', from_hr)\n hour_min = from_hr.split(':')\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n converted_times += (\":\".join(hour_min)) + ' - '\n\n if re.search(regex_am, to_hr):\n to_hr = re.sub(regex_am, '', to_hr)\n hour_min = to_hr.split(':')\n if len(hour_min[0]) < 2:\n hour_min[0].zfill(2)\n if int(hour_min[0]) == 12:\n hour_min[0] = '00'\n converted_times += (\":\".join(hour_min))\n else:\n to_hr = re.sub(regex_pm, '', to_hr)\n hour_min = to_hr.split(':')\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n converted_times += (\":\".join(hour_min))\n else:\n converted_times += \"off\"\n hours[i] = converted_times\n days = [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\", \"Su\"]\n hours = ''.join('{} {} '.format(*t) for t in zip(days, hours))\n return hours\n\n def parse_store(self, response):\n phone = response.xpath(\n '//a[@class=\"phone-tracked-link\"]/text()').extract_first().strip()\n lat = response.xpath(\n '//div[@id=\"map-aside\"]/@data-lat').extract_first()\n lon = response.xpath(\n '//div[@id=\"map-aside\"]/@data-lng').extract_first()\n hours = response.xpath(\n '//div[@class=\"container\"]//p[contains(., \"am\")'\n ' or contains(., \"Closed\")]/text()').extract()\n hours = self.convert_hours(hours)\n\n yield GeojsonPointItem(\n ref=response.url,\n phone=phone,\n lat=lat,\n lon=lon,\n opening_hours=hours,\n website=response.url\n )\n\n def parse(self, response):\n stores = response.xpath('//ul[@class=\"list\"]//a/@href').extract()\n for store in stores:\n yield scrapy.Request(store, callback=self.parse_store)\n", "path": "locations/spiders/regis_salon_uk.py"}], "after_files": [{"content": "import scrapy\nfrom locations.items import GeojsonPointItem\nimport re\n\nregex_am = r\"\\s?([Aa][Mm])\"\nregex_pm = r\"\\s?([Pp][Mm])\"\n\n\nclass RegisUKSpider(scrapy.Spider):\n name = \"regis_uk\"\n item_attributes = { 'brand': \"Regis Salon\" }\n allowed_domains = [\"www.regissalons.co.uk\"]\n start_urls = ['https://www.regissalons.co.uk/salon-locator?show-all=yes']\n download_delay = 4.0\n\n def convert_hours(self, hours):\n hours = [x.strip() for x in hours]\n hours = [x for x in hours if x]\n for i in range(len(hours)):\n converted_times = ''\n if hours[i] != \"Closed\":\n from_hr, to_hr = [hr.strip() for hr in hours[i].split('\u2013')]\n if re.search(regex_am, from_hr):\n from_hr = re.sub(regex_am, '', from_hr)\n hour_min = re.split('[:.]', from_hr)\n if len(hour_min[0]) < 2:\n hour_min[0].zfill(2)\n converted_times += (\":\".join(hour_min)) + ' - '\n else:\n from_hr = re.sub(regex_pm, '', from_hr)\n hour_min = re.split('[:.]', from_hr)\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n converted_times += (\":\".join(hour_min)) + ' - '\n\n if re.search(regex_am, to_hr):\n to_hr = re.sub(regex_am, '', to_hr)\n hour_min = re.split('[:.]', to_hr)\n if len(hour_min[0]) < 2:\n hour_min[0].zfill(2)\n if int(hour_min[0]) == 12:\n hour_min[0] = '00'\n converted_times += (\":\".join(hour_min))\n else:\n to_hr = re.sub(regex_pm, '', to_hr)\n hour_min = re.split('[:.]', to_hr)\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n converted_times += (\":\".join(hour_min))\n else:\n converted_times += \"off\"\n hours[i] = converted_times\n days = [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\", \"Su\"]\n hours = ''.join('{} {} '.format(*t) for t in zip(days, hours))\n return hours\n\n def parse_store(self, response):\n phone = response.xpath(\n '//a[@class=\"phone-tracked-link\"]/text()').extract_first().strip()\n lat = response.xpath(\n '//div[@id=\"map-aside\"]/@data-lat').extract_first()\n lon = response.xpath(\n '//div[@id=\"map-aside\"]/@data-lng').extract_first()\n hours = response.xpath(\n '//div[@class=\"container\"]//p[contains(., \"am\")'\n ' or contains(., \"Closed\")]/text()').extract()\n hours = self.convert_hours(hours)\n\n yield GeojsonPointItem(\n ref=response.url,\n phone=phone,\n lat=lat,\n lon=lon,\n opening_hours=hours,\n website=response.url\n )\n\n def parse(self, response):\n stores = response.xpath('//ul[@class=\"list\"]//a/@href').extract()\n for store in stores:\n if '/salon-region/' in store:\n continue\n yield scrapy.Request(store, callback=self.parse_store)\n", "path": "locations/spiders/regis_salon_uk.py"}]}
| 1,379 | 613 |
gh_patches_debug_27291
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-302
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django Debug Toolbar doesn't display in local Docker development environment
Created from work in #193.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/config/settings/local.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Django settings for local development environment.
4
5 - Run in Debug mode
6 - Add custom dev application
7 - Add Django Debug Toolbar
8 - Add django-extensions
9 - Use console backend for emails
10 """
11
12 import socket
13 import os
14 from .base import * # noqa: F403
15
16 # DATABASE CONFIGURATION
17 # ----------------------------------------------------------------------------
18 # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
19 DATABASES = {
20 'default': env.db('DATABASE_URL'), # noqa: F405
21 }
22 DATABASES['default']['ATOMIC_REQUESTS'] = True
23
24 # DEBUG
25 # ----------------------------------------------------------------------------
26 DEBUG = env.bool('DJANGO_DEBUG', default=True) # noqa: F405
27 TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa: F405
28
29 # SECRET CONFIGURATION
30 # ----------------------------------------------------------------------------
31 # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
32 # Note: This key only used for development and testing.
33 SECRET_KEY = env('DJANGO_SECRET_KEY', default='l@@)w&&%&u37+sjz^lsx^+29y_333oid3ygxzucar^8o(axo*f') # noqa: F405
34
35 # Mail settings
36 # ----------------------------------------------------------------------------
37
38 EMAIL_PORT = 1025
39
40 EMAIL_HOST = 'localhost'
41 EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend') # noqa: F405
42
43
44 # CACHING
45 # ----------------------------------------------------------------------------
46 CACHES = {
47 'default': {
48 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
49 'LOCATION': ''
50 }
51 }
52
53 # django-debug-toolbar
54 # ----------------------------------------------------------------------------
55 MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ] # noqa: F405
56 INSTALLED_APPS += ['debug_toolbar', ] # noqa: F405
57
58 INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
59 # tricks to have debug toolbar when developing with docker
60 if os.environ.get('USE_DOCKER') == 'yes':
61 ip = socket.gethostbyname(socket.gethostname())
62 INTERNAL_IPS += [ip[:-1] + '1']
63
64 DEBUG_TOOLBAR_CONFIG = {
65 'DISABLE_PANELS': [
66 'debug_toolbar.panels.redirects.RedirectsPanel',
67 ],
68 'SHOW_TEMPLATE_CONTEXT': True,
69 }
70
71 # django-extensions
72 # ----------------------------------------------------------------------------
73 INSTALLED_APPS += ['django_extensions', ]
74
75 # TESTING
76 # ----------------------------------------------------------------------------
77 TEST_RUNNER = 'django.test.runner.DiscoverRunner'
78
79
80 # Your local stuff: Below this line define 3rd party library settings
81 # ----------------------------------------------------------------------------
82 LOCAL_APPS + ['dev.apps.DevConfig'] # noqa: F405
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/csunplugged/config/settings/local.py b/csunplugged/config/settings/local.py
--- a/csunplugged/config/settings/local.py
+++ b/csunplugged/config/settings/local.py
@@ -9,8 +9,6 @@
- Use console backend for emails
"""
-import socket
-import os
from .base import * # noqa: F403
# DATABASE CONFIGURATION
@@ -56,16 +54,23 @@
INSTALLED_APPS += ['debug_toolbar', ] # noqa: F405
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
-# tricks to have debug toolbar when developing with docker
-if os.environ.get('USE_DOCKER') == 'yes':
- ip = socket.gethostbyname(socket.gethostname())
- INTERNAL_IPS += [ip[:-1] + '1']
+
+
+def show_django_debug_toolbar(request):
+ """Show Django Debug Toolbar in every request when running locally.
+
+ Args:
+ request: The request object.
+ """
+ return True
+
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
+ "SHOW_TOOLBAR_CALLBACK": show_django_debug_toolbar,
}
# django-extensions
@@ -79,4 +84,4 @@
# Your local stuff: Below this line define 3rd party library settings
# ----------------------------------------------------------------------------
-LOCAL_APPS + ['dev.apps.DevConfig'] # noqa: F405
+INSTALLED_APPS += ['dev.apps.DevConfig'] # noqa: F405
|
{"golden_diff": "diff --git a/csunplugged/config/settings/local.py b/csunplugged/config/settings/local.py\n--- a/csunplugged/config/settings/local.py\n+++ b/csunplugged/config/settings/local.py\n@@ -9,8 +9,6 @@\n - Use console backend for emails\n \"\"\"\n \n-import socket\n-import os\n from .base import * # noqa: F403\n \n # DATABASE CONFIGURATION\n@@ -56,16 +54,23 @@\n INSTALLED_APPS += ['debug_toolbar', ] # noqa: F405\n \n INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]\n-# tricks to have debug toolbar when developing with docker\n-if os.environ.get('USE_DOCKER') == 'yes':\n- ip = socket.gethostbyname(socket.gethostname())\n- INTERNAL_IPS += [ip[:-1] + '1']\n+\n+\n+def show_django_debug_toolbar(request):\n+ \"\"\"Show Django Debug Toolbar in every request when running locally.\n+\n+ Args:\n+ request: The request object.\n+ \"\"\"\n+ return True\n+\n \n DEBUG_TOOLBAR_CONFIG = {\n 'DISABLE_PANELS': [\n 'debug_toolbar.panels.redirects.RedirectsPanel',\n ],\n 'SHOW_TEMPLATE_CONTEXT': True,\n+ \"SHOW_TOOLBAR_CALLBACK\": show_django_debug_toolbar,\n }\n \n # django-extensions\n@@ -79,4 +84,4 @@\n \n # Your local stuff: Below this line define 3rd party library settings\n # ----------------------------------------------------------------------------\n-LOCAL_APPS + ['dev.apps.DevConfig'] # noqa: F405\n+INSTALLED_APPS += ['dev.apps.DevConfig'] # noqa: F405\n", "issue": "Django Debug Toolbar doesn't display in local Docker development environment\nCreated from work in #193.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for local development environment.\n\n- Run in Debug mode\n- Add custom dev application\n- Add Django Debug Toolbar\n- Add django-extensions\n- Use console backend for emails\n\"\"\"\n\nimport socket\nimport os\nfrom .base import * # noqa: F403\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n 'default': env.db('DATABASE_URL'), # noqa: F405\n}\nDATABASES['default']['ATOMIC_REQUESTS'] = True\n\n# DEBUG\n# ----------------------------------------------------------------------------\nDEBUG = env.bool('DJANGO_DEBUG', default=True) # noqa: F405\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa: F405\n\n# SECRET CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Note: This key only used for development and testing.\nSECRET_KEY = env('DJANGO_SECRET_KEY', default='l@@)w&&%&u37+sjz^lsx^+29y_333oid3ygxzucar^8o(axo*f') # noqa: F405\n\n# Mail settings\n# ----------------------------------------------------------------------------\n\nEMAIL_PORT = 1025\n\nEMAIL_HOST = 'localhost'\nEMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend') # noqa: F405\n\n\n# CACHING\n# ----------------------------------------------------------------------------\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': ''\n }\n}\n\n# django-debug-toolbar\n# ----------------------------------------------------------------------------\nMIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ] # noqa: F405\nINSTALLED_APPS += ['debug_toolbar', ] # noqa: F405\n\nINTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]\n# tricks to have debug toolbar when developing with docker\nif os.environ.get('USE_DOCKER') == 'yes':\n ip = socket.gethostbyname(socket.gethostname())\n INTERNAL_IPS += [ip[:-1] + '1']\n\nDEBUG_TOOLBAR_CONFIG = {\n 'DISABLE_PANELS': [\n 'debug_toolbar.panels.redirects.RedirectsPanel',\n ],\n 'SHOW_TEMPLATE_CONTEXT': True,\n}\n\n# django-extensions\n# ----------------------------------------------------------------------------\nINSTALLED_APPS += ['django_extensions', ]\n\n# TESTING\n# ----------------------------------------------------------------------------\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n\n# Your local stuff: Below this line define 3rd party library settings\n# ----------------------------------------------------------------------------\nLOCAL_APPS + ['dev.apps.DevConfig'] # noqa: F405\n", "path": "csunplugged/config/settings/local.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for local development environment.\n\n- Run in Debug mode\n- Add custom dev application\n- Add Django Debug Toolbar\n- Add django-extensions\n- Use console backend for emails\n\"\"\"\n\nfrom .base import * # noqa: F403\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n 'default': env.db('DATABASE_URL'), # noqa: F405\n}\nDATABASES['default']['ATOMIC_REQUESTS'] = True\n\n# DEBUG\n# ----------------------------------------------------------------------------\nDEBUG = env.bool('DJANGO_DEBUG', default=True) # noqa: F405\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa: F405\n\n# SECRET CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Note: This key only used for development and testing.\nSECRET_KEY = env('DJANGO_SECRET_KEY', default='l@@)w&&%&u37+sjz^lsx^+29y_333oid3ygxzucar^8o(axo*f') # noqa: F405\n\n# Mail settings\n# ----------------------------------------------------------------------------\n\nEMAIL_PORT = 1025\n\nEMAIL_HOST = 'localhost'\nEMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend') # noqa: F405\n\n\n# CACHING\n# ----------------------------------------------------------------------------\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': ''\n }\n}\n\n# django-debug-toolbar\n# ----------------------------------------------------------------------------\nMIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ] # noqa: F405\nINSTALLED_APPS += ['debug_toolbar', ] # noqa: F405\n\nINTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]\n\n\ndef show_django_debug_toolbar(request):\n \"\"\"Show Django Debug Toolbar in every request when running locally.\n\n Args:\n request: The request object.\n \"\"\"\n return True\n\n\nDEBUG_TOOLBAR_CONFIG = {\n 'DISABLE_PANELS': [\n 'debug_toolbar.panels.redirects.RedirectsPanel',\n ],\n 'SHOW_TEMPLATE_CONTEXT': True,\n \"SHOW_TOOLBAR_CALLBACK\": show_django_debug_toolbar,\n}\n\n# django-extensions\n# ----------------------------------------------------------------------------\nINSTALLED_APPS += ['django_extensions', ]\n\n# TESTING\n# ----------------------------------------------------------------------------\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n\n# Your local stuff: Below this line define 3rd party library settings\n# ----------------------------------------------------------------------------\nINSTALLED_APPS += ['dev.apps.DevConfig'] # noqa: F405\n", "path": "csunplugged/config/settings/local.py"}]}
| 1,046 | 373 |
gh_patches_debug_12025
|
rasdani/github-patches
|
git_diff
|
Showndarya__Hacktoberfest-435
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Travis test ignore first letter of filename for some reason
I'll try and figure out why, thought about simply renaming every file in the travis script but that requires alot of work and overhead for little gain, it is certainly doable, you have to configure the git on the travis instance and make a new commit etc.
Might as well have a cron job or something to it recursively and periodically over the entirety of the repo and make a single commit...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `.travis.py`
Content:
```
1 import json
2 import os
3 import re
4 import subprocess
5
6 # Get a diff between master and current.
7 try:
8 commit_range = os.environ["TRAVIS_COMMIT_RANGE"]
9 changed_files = subprocess.check_output(["git", "diff", "--name-only", commit_range])
10 except KeyError:
11 print("🔥 This should be run on Travis. Otherwise make sure TRAVIS_BRANCH is set.")
12 exit(1)
13
14 # Filter JSON files only.
15 changed_files_json = []
16 if changed_files:
17 changed_files = changed_files.decode()
18 for changed_file in changed_files.split('\n'):
19 if re.search(r"\.json$", changed_file):
20 changed_files_json.append(changed_file)
21
22
23 # Iterate over list of changed JSON files.
24 for changed_file_json in changed_files_json:
25 print(f"Checking file {changed_file_json}...")
26 there_was_an_error = False
27
28 if not changed_file_json[0].isupper():
29 there_was_an_error = True
30 print("🔥 File name not capitalized.")
31
32 try:
33 with open(changed_file_json) as data_file:
34 file_content = json.loads(data_file.read())
35 except json.decoder.JSONDecodeError:
36 there_was_an_error = True
37 print("🔥 JSON could not be parsed.")
38
39 if 'word' not in file_content:
40 there_was_an_error = True
41 print("🔥 Key 'word' not found.")
42
43 if not file_content["word"]:
44 there_was_an_error = True
45 print("🔥 Value for 'word' appears to be empty.")
46
47 if 'definitions' not in file_content:
48 there_was_an_error = True
49 print("🔥 Key 'definitions' not found.")
50
51 if not file_content["definitions"]:
52 there_was_an_error = True
53 print("🔥 Value for 'definitions' appears to be empty.")
54
55 if 'parts-of-speech' not in file_content:
56 there_was_an_error = True
57 print("🔥 Key 'parts-of-speech' not found.")
58
59 if not file_content["parts-of-speech"]:
60 there_was_an_error = True
61 print("🔥 Value for 'parts-of-speech' appears to be empty.")
62
63 if there_was_an_error:
64 exit(1)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/.travis.py b/.travis.py
--- a/.travis.py
+++ b/.travis.py
@@ -19,13 +19,13 @@
if re.search(r"\.json$", changed_file):
changed_files_json.append(changed_file)
-
+
# Iterate over list of changed JSON files.
for changed_file_json in changed_files_json:
print(f"Checking file {changed_file_json}...")
there_was_an_error = False
- if not changed_file_json[0].isupper():
+ if not os.path.basename(changed_file_json)[0].isupper():
there_was_an_error = True
print("🔥 File name not capitalized.")
|
{"golden_diff": "diff --git a/.travis.py b/.travis.py\n--- a/.travis.py\n+++ b/.travis.py\n@@ -19,13 +19,13 @@\n if re.search(r\"\\.json$\", changed_file):\n changed_files_json.append(changed_file)\n \n-\n+ \n # Iterate over list of changed JSON files.\n for changed_file_json in changed_files_json:\n print(f\"Checking file {changed_file_json}...\")\n there_was_an_error = False\n \n- if not changed_file_json[0].isupper():\n+ if not os.path.basename(changed_file_json)[0].isupper():\n there_was_an_error = True\n print(\"\ud83d\udd25 File name not capitalized.\")\n", "issue": "Travis test ignore first letter of filename for some reason\nI'll try and figure out why, thought about simply renaming every file in the travis script but that requires alot of work and overhead for little gain, it is certainly doable, you have to configure the git on the travis instance and make a new commit etc.\r\nMight as well have a cron job or something to it recursively and periodically over the entirety of the repo and make a single commit...\n", "before_files": [{"content": "import json\nimport os\nimport re\nimport subprocess\n\n# Get a diff between master and current.\ntry:\n commit_range = os.environ[\"TRAVIS_COMMIT_RANGE\"]\n changed_files = subprocess.check_output([\"git\", \"diff\", \"--name-only\", commit_range])\nexcept KeyError:\n print(\"\ud83d\udd25 This should be run on Travis. Otherwise make sure TRAVIS_BRANCH is set.\")\n exit(1)\n\n# Filter JSON files only.\nchanged_files_json = []\nif changed_files:\n changed_files = changed_files.decode()\n for changed_file in changed_files.split('\\n'):\n if re.search(r\"\\.json$\", changed_file):\n changed_files_json.append(changed_file)\n\n\n# Iterate over list of changed JSON files.\nfor changed_file_json in changed_files_json:\n print(f\"Checking file {changed_file_json}...\")\n there_was_an_error = False\n\n if not changed_file_json[0].isupper():\n there_was_an_error = True\n print(\"\ud83d\udd25 File name not capitalized.\")\n\n try:\n with open(changed_file_json) as data_file:\n file_content = json.loads(data_file.read())\n except json.decoder.JSONDecodeError:\n there_was_an_error = True\n print(\"\ud83d\udd25 JSON could not be parsed.\")\n\n if 'word' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'word' not found.\")\n\n if not file_content[\"word\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'word' appears to be empty.\")\n\n if 'definitions' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'definitions' not found.\")\n\n if not file_content[\"definitions\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'definitions' appears to be empty.\")\n\n if 'parts-of-speech' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'parts-of-speech' not found.\")\n\n if not file_content[\"parts-of-speech\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'parts-of-speech' appears to be empty.\")\n\n if there_was_an_error:\n exit(1)\n", "path": ".travis.py"}], "after_files": [{"content": "import json\nimport os\nimport re\nimport subprocess\n\n# Get a diff between master and current.\ntry:\n commit_range = os.environ[\"TRAVIS_COMMIT_RANGE\"]\n changed_files = subprocess.check_output([\"git\", \"diff\", \"--name-only\", commit_range])\nexcept KeyError:\n print(\"\ud83d\udd25 This should be run on Travis. Otherwise make sure TRAVIS_BRANCH is set.\")\n exit(1)\n\n# Filter JSON files only.\nchanged_files_json = []\nif changed_files:\n changed_files = changed_files.decode()\n for changed_file in changed_files.split('\\n'):\n if re.search(r\"\\.json$\", changed_file):\n changed_files_json.append(changed_file)\n\n \n# Iterate over list of changed JSON files.\nfor changed_file_json in changed_files_json:\n print(f\"Checking file {changed_file_json}...\")\n there_was_an_error = False\n\n if not os.path.basename(changed_file_json)[0].isupper():\n there_was_an_error = True\n print(\"\ud83d\udd25 File name not capitalized.\")\n\n try:\n with open(changed_file_json) as data_file:\n file_content = json.loads(data_file.read())\n except json.decoder.JSONDecodeError:\n there_was_an_error = True\n print(\"\ud83d\udd25 JSON could not be parsed.\")\n\n if 'word' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'word' not found.\")\n\n if not file_content[\"word\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'word' appears to be empty.\")\n\n if 'definitions' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'definitions' not found.\")\n\n if not file_content[\"definitions\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'definitions' appears to be empty.\")\n\n if 'parts-of-speech' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'parts-of-speech' not found.\")\n\n if not file_content[\"parts-of-speech\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'parts-of-speech' appears to be empty.\")\n\n if there_was_an_error:\n exit(1)\n", "path": ".travis.py"}]}
| 942 | 152 |
gh_patches_debug_1870
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-1743
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support for Snowflake Secure Views
### Adding support for Secure View in Snowflake
When using the Materialize feature where setting the type of materialization, adding secure-view to the {{ config(materialized='secure-view') }} would be beneficial.
### Current Work-around
Currently the solution for Snowflake secure views is running post-hook events to set the targeted views as secure, example: `alter view sv_mySecureTest set secure;`
This works, and each view that needs to be secured will need to be added to the post-hook event.
### Affects only Snowflake
This feature is specific to the Snowflake Cloud Data warehouse.
[https://docs.snowflake.net/manuals/user-guide/views-secure.html](url)
### This will help DBT Snowflake Developer / Non Developers
When creating a secure view in Snowflake, a developer can use 2 syntax commands
1. CREATE OR REPLACE SECURE VIEW...
2. Alter view <view_name> Set Secure
The first method will allow non-dbt user to render the DDL with the secure declaration as part of the DDL, the second statement is added to the end of the generated DDL however it may be ignored by developers unfamiliar with Snowflake Syntax, causing possible security issues, allowing unauthorized access to the View DDL by Read-Only roles in Snowflake.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/snowflake/dbt/adapters/snowflake/impl.py`
Content:
```
1 from dbt.adapters.sql import SQLAdapter
2 from dbt.adapters.snowflake import SnowflakeConnectionManager
3 from dbt.adapters.snowflake import SnowflakeRelation
4 from dbt.utils import filter_null_values
5
6
7 class SnowflakeAdapter(SQLAdapter):
8 Relation = SnowflakeRelation
9 ConnectionManager = SnowflakeConnectionManager
10
11 AdapterSpecificConfigs = frozenset(
12 {"transient", "cluster_by", "automatic_clustering"}
13 )
14
15 @classmethod
16 def date_function(cls):
17 return "CURRENT_TIMESTAMP()"
18
19 @classmethod
20 def _catalog_filter_table(cls, table, manifest):
21 # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force
22 # the column names to their lowercased forms.
23 lowered = table.rename(
24 column_names=[c.lower() for c in table.column_names]
25 )
26 return super()._catalog_filter_table(lowered, manifest)
27
28 def _make_match_kwargs(self, database, schema, identifier):
29 quoting = self.config.quoting
30 if identifier is not None and quoting["identifier"] is False:
31 identifier = identifier.upper()
32
33 if schema is not None and quoting["schema"] is False:
34 schema = schema.upper()
35
36 if database is not None and quoting["database"] is False:
37 database = database.upper()
38
39 return filter_null_values(
40 {"identifier": identifier, "schema": schema, "database": database}
41 )
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py
--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py
+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py
@@ -9,7 +9,7 @@
ConnectionManager = SnowflakeConnectionManager
AdapterSpecificConfigs = frozenset(
- {"transient", "cluster_by", "automatic_clustering"}
+ {"transient", "cluster_by", "automatic_clustering", "secure"}
)
@classmethod
|
{"golden_diff": "diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py\n+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n@@ -9,7 +9,7 @@\n ConnectionManager = SnowflakeConnectionManager\n \n AdapterSpecificConfigs = frozenset(\n- {\"transient\", \"cluster_by\", \"automatic_clustering\"}\n+ {\"transient\", \"cluster_by\", \"automatic_clustering\", \"secure\"}\n )\n \n @classmethod\n", "issue": "Support for Snowflake Secure Views\n### Adding support for Secure View in Snowflake\r\nWhen using the Materialize feature where setting the type of materialization, adding secure-view to the {{ config(materialized='secure-view') }} would be beneficial.\r\n\r\n### Current Work-around\r\nCurrently the solution for Snowflake secure views is running post-hook events to set the targeted views as secure, example: `alter view sv_mySecureTest set secure;`\r\nThis works, and each view that needs to be secured will need to be added to the post-hook event.\r\n\r\n### Affects only Snowflake\r\nThis feature is specific to the Snowflake Cloud Data warehouse.\r\n[https://docs.snowflake.net/manuals/user-guide/views-secure.html](url)\r\n\r\n### This will help DBT Snowflake Developer / Non Developers\r\nWhen creating a secure view in Snowflake, a developer can use 2 syntax commands\r\n\r\n1. CREATE OR REPLACE SECURE VIEW...\r\n2. Alter view <view_name> Set Secure\r\n\r\nThe first method will allow non-dbt user to render the DDL with the secure declaration as part of the DDL, the second statement is added to the end of the generated DDL however it may be ignored by developers unfamiliar with Snowflake Syntax, causing possible security issues, allowing unauthorized access to the View DDL by Read-Only roles in Snowflake.\n", "before_files": [{"content": "from dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(cls, table, manifest):\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}], "after_files": [{"content": "from dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\", \"secure\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(cls, table, manifest):\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}]}
| 921 | 136 |
gh_patches_debug_1805
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-840
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document the new setup utility
Title says all
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup/server.py`
Content:
```
1 import flask
2 import flask_bootstrap
3 import redis
4 import json
5 import os
6 import jinja2
7 import uuid
8 import string
9 import random
10 import ipaddress
11 import hashlib
12
13
14 version = os.getenv("this_version")
15 static_url_path = "/" + version + "/static"
16 app = flask.Flask(__name__, static_url_path=static_url_path)
17 flask_bootstrap.Bootstrap(app)
18 db = redis.StrictRedis(host='redis', port=6379, db=0)
19
20
21 def render_flavor(flavor, template, data):
22 return flask.render_template(
23 os.path.join(flavor, template),
24 **data
25 )
26
27
28 @app.add_template_global
29 def secret(length=16):
30 charset = string.ascii_uppercase + string.digits
31 return ''.join(
32 random.SystemRandom().choice(charset)
33 for _ in range(length)
34 )
35
36
37 def build_app(path):
38
39 app.jinja_env.trim_blocks = True
40 app.jinja_env.lstrip_blocks = True
41
42 @app.context_processor
43 def app_context():
44 return dict(versions=os.getenv("VERSIONS","master").split(','))
45
46 prefix_bp = flask.Blueprint(version, __name__)
47 prefix_bp.jinja_loader = jinja2.ChoiceLoader([
48 jinja2.FileSystemLoader(os.path.join(path, "templates")),
49 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
50 ])
51
52 root_bp = flask.Blueprint("root", __name__)
53 root_bp.jinja_loader = jinja2.ChoiceLoader([
54 jinja2.FileSystemLoader(os.path.join(path, "templates")),
55 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
56 ])
57
58 @prefix_bp.context_processor
59 @root_bp.context_processor
60 def bp_context(version=version):
61 return dict(version=version)
62
63 @prefix_bp.route("/")
64 @root_bp.route("/")
65 def wizard():
66 return flask.render_template('wizard.html')
67
68 @prefix_bp.route("/submit_flavor", methods=["POST"])
69 @root_bp.route("/submit_flavor", methods=["POST"])
70 def submit_flavor():
71 data = flask.request.form.copy()
72 steps = sorted(os.listdir(os.path.join(path, "templates", "steps", data["flavor"])))
73 return flask.render_template('wizard.html', flavor=data["flavor"], steps=steps)
74
75 @prefix_bp.route("/submit", methods=["POST"])
76 @root_bp.route("/submit", methods=["POST"])
77 def submit():
78 data = flask.request.form.copy()
79 data['uid'] = str(uuid.uuid4())
80 data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])
81 db.set(data['uid'], json.dumps(data))
82 return flask.redirect(flask.url_for('.setup', uid=data['uid']))
83
84 @prefix_bp.route("/setup/<uid>", methods=["GET"])
85 @root_bp.route("/setup/<uid>", methods=["GET"])
86 def setup(uid):
87 data = json.loads(db.get(uid))
88 flavor = data.get("flavor", "compose")
89 rendered = render_flavor(flavor, "setup.html", data)
90 return flask.render_template("setup.html", contents=rendered)
91
92 @prefix_bp.route("/file/<uid>/<filepath>", methods=["GET"])
93 @root_bp.route("/file/<uid>/<filepath>", methods=["GET"])
94 def file(uid, filepath):
95 data = json.loads(db.get(uid))
96 flavor = data.get("flavor", "compose")
97 return flask.Response(
98 render_flavor(flavor, filepath, data),
99 mimetype="application/text"
100 )
101
102 app.register_blueprint(prefix_bp, url_prefix="/{}".format(version))
103 app.register_blueprint(root_bp)
104
105
106 if __name__ == "__main__":
107 build_app("/tmp/mailutest")
108 app.run(debug=True)
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup/server.py b/setup/server.py
--- a/setup/server.py
+++ b/setup/server.py
@@ -11,7 +11,7 @@
import hashlib
-version = os.getenv("this_version")
+version = os.getenv("this_version", "master")
static_url_path = "/" + version + "/static"
app = flask.Flask(__name__, static_url_path=static_url_path)
flask_bootstrap.Bootstrap(app)
|
{"golden_diff": "diff --git a/setup/server.py b/setup/server.py\n--- a/setup/server.py\n+++ b/setup/server.py\n@@ -11,7 +11,7 @@\n import hashlib\n \n \n-version = os.getenv(\"this_version\")\n+version = os.getenv(\"this_version\", \"master\")\n static_url_path = \"/\" + version + \"/static\"\n app = flask.Flask(__name__, static_url_path=static_url_path)\n flask_bootstrap.Bootstrap(app)\n", "issue": "Document the new setup utility\nTitle says all\n", "before_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\n\n\nversion = os.getenv(\"this_version\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}], "after_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}]}
| 1,296 | 93 |
gh_patches_debug_1293
|
rasdani/github-patches
|
git_diff
|
CTPUG__wafer-643
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for Django 4.0
Currently failing tests (See #632)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from glob import glob
2 import subprocess
3
4 from setuptools import find_packages, setup
5
6 REQUIRES = [
7 'Django>=2.2,<4',
8 'bleach',
9 'bleach-allowlist',
10 'diff-match-patch',
11 'django-bakery>=0.12.0',
12 'django-crispy-forms',
13 'django-markitup>=4.0.0',
14 'django-registration-redux',
15 'django-reversion',
16 'django-select2',
17 'djangorestframework',
18 'drf-extensions>=0.5.0',
19 'icalendar>=4.0',
20 'jsonfield',
21 'markdown>=2.5',
22 'pillow',
23 'py3dns',
24 'pyLibravatar',
25 'pytz',
26 'requests',
27 ]
28
29 SOURCES = []
30
31
32 with open('README.rst', 'r') as f:
33 long_description = f.read()
34
35
36 def compile_translations():
37 try:
38 subprocess.check_call(['./manage.py', 'compilemessages'])
39 except subprocess.CalledProcessError:
40 print("WARNING: cannot compile translations.")
41 return glob('wafer/locale/*/LC_MESSAGES/django.mo')
42
43
44 setup(
45 name="wafer",
46 version="0.13.1a",
47 url='http://github.com/CTPUG/wafer',
48 license='ISC',
49 description="A wafer-thin Django library for running small conferences.",
50 long_description=long_description,
51 long_description_content_type="text/x-rst",
52 author='CTPUG',
53 author_email='[email protected]',
54 packages=find_packages(),
55 include_package_data=True,
56 install_requires=REQUIRES,
57 dependency_links=SOURCES,
58 data_files=[
59 ('locale', compile_translations()),
60 ],
61 setup_requires=[
62 # Add setuptools-git, so we get correct behaviour for
63 # include_package_data
64 'setuptools_git >= 1.0',
65 ],
66 classifiers=[
67 'Development Status :: 4 - Beta',
68 'Intended Audience :: Developers',
69 'License :: OSI Approved :: ISC License (ISCL)',
70 'Operating System :: POSIX',
71 'Programming Language :: Python :: 3',
72 'Programming Language :: Python :: 3.6',
73 'Programming Language :: Python :: 3.7',
74 'Programming Language :: Python :: 3.8',
75 'Framework :: Django',
76 'Topic :: Software Development :: Libraries :: Python Modules',
77 'Topic :: Internet :: WWW/HTTP',
78 ],
79 )
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,7 @@
'bleach',
'bleach-allowlist',
'diff-match-patch',
- 'django-bakery>=0.12.0',
+ 'django-bakery>=0.13.0',
'django-crispy-forms',
'django-markitup>=4.0.0',
'django-registration-redux',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,7 +8,7 @@\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n- 'django-bakery>=0.12.0',\n+ 'django-bakery>=0.13.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-registration-redux',\n", "issue": "Add support for Django 4.0\nCurrently failing tests (See #632)\n", "before_files": [{"content": "from glob import glob\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nREQUIRES = [\n 'Django>=2.2,<4',\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n 'django-bakery>=0.12.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-registration-redux',\n 'django-reversion',\n 'django-select2',\n 'djangorestframework',\n 'drf-extensions>=0.5.0',\n 'icalendar>=4.0',\n 'jsonfield',\n 'markdown>=2.5',\n 'pillow',\n 'py3dns',\n 'pyLibravatar',\n 'pytz',\n 'requests',\n]\n\nSOURCES = []\n\n\nwith open('README.rst', 'r') as f:\n long_description = f.read()\n\n\ndef compile_translations():\n try:\n subprocess.check_call(['./manage.py', 'compilemessages'])\n except subprocess.CalledProcessError:\n print(\"WARNING: cannot compile translations.\")\n return glob('wafer/locale/*/LC_MESSAGES/django.mo')\n\n\nsetup(\n name=\"wafer\",\n version=\"0.13.1a\",\n url='http://github.com/CTPUG/wafer',\n license='ISC',\n description=\"A wafer-thin Django library for running small conferences.\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n author='CTPUG',\n author_email='[email protected]',\n packages=find_packages(),\n include_package_data=True,\n install_requires=REQUIRES,\n dependency_links=SOURCES,\n data_files=[\n ('locale', compile_translations()),\n ],\n setup_requires=[\n # Add setuptools-git, so we get correct behaviour for\n # include_package_data\n 'setuptools_git >= 1.0',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: Django',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from glob import glob\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nREQUIRES = [\n 'Django>=2.2,<4',\n 'bleach',\n 'bleach-allowlist',\n 'diff-match-patch',\n 'django-bakery>=0.13.0',\n 'django-crispy-forms',\n 'django-markitup>=4.0.0',\n 'django-registration-redux',\n 'django-reversion',\n 'django-select2',\n 'djangorestframework',\n 'drf-extensions>=0.5.0',\n 'icalendar>=4.0',\n 'jsonfield',\n 'markdown>=2.5',\n 'pillow',\n 'py3dns',\n 'pyLibravatar',\n 'pytz',\n 'requests',\n]\n\nSOURCES = []\n\n\nwith open('README.rst', 'r') as f:\n long_description = f.read()\n\n\ndef compile_translations():\n try:\n subprocess.check_call(['./manage.py', 'compilemessages'])\n except subprocess.CalledProcessError:\n print(\"WARNING: cannot compile translations.\")\n return glob('wafer/locale/*/LC_MESSAGES/django.mo')\n\n\nsetup(\n name=\"wafer\",\n version=\"0.13.1a\",\n url='http://github.com/CTPUG/wafer',\n license='ISC',\n description=\"A wafer-thin Django library for running small conferences.\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n author='CTPUG',\n author_email='[email protected]',\n packages=find_packages(),\n include_package_data=True,\n install_requires=REQUIRES,\n dependency_links=SOURCES,\n data_files=[\n ('locale', compile_translations()),\n ],\n setup_requires=[\n # Add setuptools-git, so we get correct behaviour for\n # include_package_data\n 'setuptools_git >= 1.0',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: Django',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}]}
| 969 | 108 |
gh_patches_debug_23755
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2111
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Budget without value makes IATI export crash
## Test plan
GIVEN the IATI export (bottom right on project page)
WHEN a budget of the project has no value, but does have a value date / currency / label
THEN the `value` tag of the budget item should not be generated
AND no internal server error should be shown
## Issue description
See http://sentry.support.akvo-ops.org/rsr/live/group/832/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/iati/exports/elements/budget.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from lxml import etree
8
9
10 def budget(project):
11 """
12 Generate the budget elements.
13
14 :param project: Project object
15 :return: A list of Etree elements
16 """
17 budget_elements = []
18
19 for budget_item in project.budget_items.all():
20 if budget_item.amount or budget_item.period_start or budget_item.period_end or \
21 budget_item.type or budget_item.status or budget_item.value_date or \
22 budget_item.currency or budget_item.other_extra or budget_item.label:
23 element = etree.Element("budget")
24
25 if budget_item.type:
26 element.attrib['type'] = budget_item.type
27
28 if budget_item.status:
29 element.attrib['status'] = budget_item.status
30
31 if budget_item.period_start:
32 period_start_element = etree.SubElement(element, "period-start")
33 period_start_element.attrib['iso-date'] = str(budget_item.period_start)
34
35 if budget_item.period_end:
36 period_end_element = etree.SubElement(element, "period-end")
37 period_end_element.attrib['iso-date'] = str(budget_item.period_end)
38
39 if budget_item.amount == 0 or budget_item.amount:
40 value_element = etree.SubElement(element, "value")
41 value_element.text = str(budget_item.amount)
42
43 if budget_item.value_date:
44 value_element.attrib['value-date'] = str(budget_item.value_date)
45
46 if budget_item.currency:
47 value_element.attrib['currency'] = budget_item.currency
48
49 if budget_item.other_extra:
50 value_element.attrib['{http://akvo.org/iati-activities}label'] = budget_item.\
51 other_extra
52 elif budget_item.label and budget_item.label.label:
53 value_element.attrib['{http://akvo.org/iati-activities}label'] = budget_item.label.\
54 label
55
56 budget_elements.append(element)
57
58 return budget_elements
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/iati/exports/elements/budget.py b/akvo/iati/exports/elements/budget.py
--- a/akvo/iati/exports/elements/budget.py
+++ b/akvo/iati/exports/elements/budget.py
@@ -40,18 +40,17 @@
value_element = etree.SubElement(element, "value")
value_element.text = str(budget_item.amount)
- if budget_item.value_date:
- value_element.attrib['value-date'] = str(budget_item.value_date)
-
- if budget_item.currency:
- value_element.attrib['currency'] = budget_item.currency
-
- if budget_item.other_extra:
- value_element.attrib['{http://akvo.org/iati-activities}label'] = budget_item.\
- other_extra
- elif budget_item.label and budget_item.label.label:
- value_element.attrib['{http://akvo.org/iati-activities}label'] = budget_item.label.\
- label
+ if budget_item.value_date:
+ value_element.attrib['value-date'] = str(budget_item.value_date)
+
+ if budget_item.currency:
+ value_element.attrib['currency'] = budget_item.currency
+
+ akvo_label = '{http://akvo.org/iati-activities}label'
+ if budget_item.other_extra:
+ value_element.attrib[akvo_label] = budget_item.other_extra
+ elif budget_item.label and budget_item.label.label:
+ value_element.attrib[akvo_label] = budget_item.label.label
budget_elements.append(element)
|
{"golden_diff": "diff --git a/akvo/iati/exports/elements/budget.py b/akvo/iati/exports/elements/budget.py\n--- a/akvo/iati/exports/elements/budget.py\n+++ b/akvo/iati/exports/elements/budget.py\n@@ -40,18 +40,17 @@\n value_element = etree.SubElement(element, \"value\")\n value_element.text = str(budget_item.amount)\n \n- if budget_item.value_date:\n- value_element.attrib['value-date'] = str(budget_item.value_date)\n-\n- if budget_item.currency:\n- value_element.attrib['currency'] = budget_item.currency\n-\n- if budget_item.other_extra:\n- value_element.attrib['{http://akvo.org/iati-activities}label'] = budget_item.\\\n- other_extra\n- elif budget_item.label and budget_item.label.label:\n- value_element.attrib['{http://akvo.org/iati-activities}label'] = budget_item.label.\\\n- label\n+ if budget_item.value_date:\n+ value_element.attrib['value-date'] = str(budget_item.value_date)\n+\n+ if budget_item.currency:\n+ value_element.attrib['currency'] = budget_item.currency\n+\n+ akvo_label = '{http://akvo.org/iati-activities}label'\n+ if budget_item.other_extra:\n+ value_element.attrib[akvo_label] = budget_item.other_extra\n+ elif budget_item.label and budget_item.label.label:\n+ value_element.attrib[akvo_label] = budget_item.label.label\n \n budget_elements.append(element)\n", "issue": "Budget without value makes IATI export crash\n## Test plan\n\nGIVEN the IATI export (bottom right on project page)\nWHEN a budget of the project has no value, but does have a value date / currency / label\nTHEN the `value` tag of the budget item should not be generated\nAND no internal server error should be shown\n## Issue description\n\nSee http://sentry.support.akvo-ops.org/rsr/live/group/832/\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom lxml import etree\n\n\ndef budget(project):\n \"\"\"\n Generate the budget elements.\n\n :param project: Project object\n :return: A list of Etree elements\n \"\"\"\n budget_elements = []\n\n for budget_item in project.budget_items.all():\n if budget_item.amount or budget_item.period_start or budget_item.period_end or \\\n budget_item.type or budget_item.status or budget_item.value_date or \\\n budget_item.currency or budget_item.other_extra or budget_item.label:\n element = etree.Element(\"budget\")\n\n if budget_item.type:\n element.attrib['type'] = budget_item.type\n\n if budget_item.status:\n element.attrib['status'] = budget_item.status\n\n if budget_item.period_start:\n period_start_element = etree.SubElement(element, \"period-start\")\n period_start_element.attrib['iso-date'] = str(budget_item.period_start)\n\n if budget_item.period_end:\n period_end_element = etree.SubElement(element, \"period-end\")\n period_end_element.attrib['iso-date'] = str(budget_item.period_end)\n\n if budget_item.amount == 0 or budget_item.amount:\n value_element = etree.SubElement(element, \"value\")\n value_element.text = str(budget_item.amount)\n\n if budget_item.value_date:\n value_element.attrib['value-date'] = str(budget_item.value_date)\n\n if budget_item.currency:\n value_element.attrib['currency'] = budget_item.currency\n\n if budget_item.other_extra:\n value_element.attrib['{http://akvo.org/iati-activities}label'] = budget_item.\\\n other_extra\n elif budget_item.label and budget_item.label.label:\n value_element.attrib['{http://akvo.org/iati-activities}label'] = budget_item.label.\\\n label\n\n budget_elements.append(element)\n\n return budget_elements\n", "path": "akvo/iati/exports/elements/budget.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom lxml import etree\n\n\ndef budget(project):\n \"\"\"\n Generate the budget elements.\n\n :param project: Project object\n :return: A list of Etree elements\n \"\"\"\n budget_elements = []\n\n for budget_item in project.budget_items.all():\n if budget_item.amount or budget_item.period_start or budget_item.period_end or \\\n budget_item.type or budget_item.status or budget_item.value_date or \\\n budget_item.currency or budget_item.other_extra or budget_item.label:\n element = etree.Element(\"budget\")\n\n if budget_item.type:\n element.attrib['type'] = budget_item.type\n\n if budget_item.status:\n element.attrib['status'] = budget_item.status\n\n if budget_item.period_start:\n period_start_element = etree.SubElement(element, \"period-start\")\n period_start_element.attrib['iso-date'] = str(budget_item.period_start)\n\n if budget_item.period_end:\n period_end_element = etree.SubElement(element, \"period-end\")\n period_end_element.attrib['iso-date'] = str(budget_item.period_end)\n\n if budget_item.amount == 0 or budget_item.amount:\n value_element = etree.SubElement(element, \"value\")\n value_element.text = str(budget_item.amount)\n\n if budget_item.value_date:\n value_element.attrib['value-date'] = str(budget_item.value_date)\n\n if budget_item.currency:\n value_element.attrib['currency'] = budget_item.currency\n\n akvo_label = '{http://akvo.org/iati-activities}label'\n if budget_item.other_extra:\n value_element.attrib[akvo_label] = budget_item.other_extra\n elif budget_item.label and budget_item.label.label:\n value_element.attrib[akvo_label] = budget_item.label.label\n\n budget_elements.append(element)\n\n return budget_elements\n", "path": "akvo/iati/exports/elements/budget.py"}]}
| 930 | 346 |
gh_patches_debug_14561
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-6276
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Address warning in Airflow plugin version check
> not super urgent, but as we move into supporting newer Python versions
> (https://github.com/meltano/meltano/pull/6135) and bumping Meltano's dependencies (https://github.com/meltano/meltano/issues/6264), we might break Airflow support.
>
> It's also probably a very quick (< 1 hour) fix by replacing distutils.StrictVersion with the packaging.version equivalent.
>
```
src/meltano/core/plugin/airflow.py:110: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead.
if StrictVersion(version) < StrictVersion("2.0.0")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/core/plugin/airflow.py`
Content:
```
1 """Plugin glue code for Airflow."""
2 import configparser
3 import logging
4 import os
5 import subprocess
6 from distutils.version import StrictVersion
7
8 from meltano.core.behavior.hookable import hook
9 from meltano.core.error import AsyncSubprocessError
10 from meltano.core.plugin_invoker import PluginInvoker
11 from meltano.core.utils import nest
12
13 from . import BasePlugin, PluginType
14
15
16 class AirflowInvoker(PluginInvoker):
17 """Invoker that prepares env for Airflow."""
18
19 def env(self):
20 """Environment variables for Airflow.
21
22 Returns:
23 Dictionary of environment variables.
24 """
25 env = super().env()
26
27 env["AIRFLOW_HOME"] = str(self.plugin_config_service.run_dir)
28 env["AIRFLOW_CONFIG"] = str(self.files["config"])
29
30 return env
31
32
33 class Airflow(BasePlugin):
34 """Plugin glue code for Airflow."""
35
36 __plugin_type__ = PluginType.ORCHESTRATORS
37
38 invoker_class = AirflowInvoker
39
40 @property
41 def config_files(self):
42 """Return the configuration files required by the plugin.
43
44 Returns:
45 Dictionary of config file identifiers and filenames
46 """
47 return {"config": "airflow.cfg"}
48
49 def process_config(self, flat_config):
50 """Unflatten the config.
51
52 Args:
53 flat_config: the flat config
54
55 Returns:
56 unflattened config
57 """
58 config = {}
59 for key, value in flat_config.items():
60 nest(config, key, str(value))
61 return config
62
63 @staticmethod
64 def update_config_file(invoker: AirflowInvoker) -> None:
65 """Update airflow.cfg with plugin configuration.
66
67 Args:
68 invoker: the active PluginInvoker
69 """
70 airflow_cfg_path = invoker.files["config"]
71 logging.debug(f"Generated default '{str(airflow_cfg_path)}'")
72
73 # open the configuration and update it
74 # now we let's update the config to use our stubs
75 airflow_cfg = configparser.ConfigParser()
76
77 with airflow_cfg_path.open() as airflow_cfg_file_to_read:
78 airflow_cfg.read_file(airflow_cfg_file_to_read)
79 logging.debug(f"Loaded '{str(airflow_cfg_path)}'")
80
81 config = invoker.plugin_config_processed
82 for section, section_config in config.items():
83 airflow_cfg[section].update(section_config)
84 logging.debug(f"\tUpdated section [{section}] with {section_config}")
85
86 with airflow_cfg_path.open("w") as airflow_cfg_file_to_write:
87 airflow_cfg.write(airflow_cfg_file_to_write)
88 logging.debug(f"Saved '{str(airflow_cfg_path)}'")
89
90 @hook("before_install")
91 async def setup_env(self, *args, **kwargs):
92 """Configure the env to make airflow installable without GPL deps.
93
94 Args:
95 args: Arbitrary args
96 kwargs: Arbitrary kwargs
97 """
98 os.environ["SLUGIFY_USES_TEXT_UNIDECODE"] = "yes"
99
100 @hook("before_configure")
101 async def before_configure(self, invoker: AirflowInvoker, session): # noqa: WPS217
102 """Generate config file and keep metadata database up-to-date.
103
104 Args:
105 invoker: the active PluginInvoker
106 session: metadata database session
107
108 Raises:
109 AsyncSubprocessError: if command failed to run
110 """
111 # generate the default `airflow.cfg`
112 handle = await invoker.invoke_async(
113 "--help",
114 require_preparation=False,
115 stdout=subprocess.DEVNULL,
116 stderr=subprocess.PIPE,
117 )
118 exit_code = await handle.wait()
119
120 if exit_code:
121 raise AsyncSubprocessError(
122 "Command `airflow --help` failed", process=handle
123 )
124
125 # Read and update airflow.cfg
126 self.update_config_file(invoker)
127
128 # we've changed the configuration here, so we need to call
129 # prepare again on the invoker so it re-reads the configuration
130 # for the Airflow plugin
131 await invoker.prepare(session)
132
133 # make sure we use correct db init
134 handle = await invoker.invoke_async(
135 "version",
136 stdout=subprocess.PIPE,
137 stderr=subprocess.PIPE,
138 )
139
140 stdout, stderr = await handle.communicate()
141
142 if handle.returncode:
143 raise AsyncSubprocessError(
144 "Command `airflow version` failed", process=handle
145 )
146
147 version = stdout.decode()
148 init_db_cmd = (
149 ["initdb"]
150 if StrictVersion(version) < StrictVersion("2.0.0")
151 else ["db", "init"]
152 )
153
154 handle = await invoker.invoke_async(
155 *init_db_cmd,
156 stdout=subprocess.PIPE,
157 stderr=subprocess.PIPE,
158 )
159 exit_code = await handle.wait()
160
161 if exit_code:
162 raise AsyncSubprocessError(
163 "Airflow metadata database could not be initialized: `airflow initdb` failed",
164 handle,
165 )
166
167 logging.debug("Completed `airflow initdb`")
168
169 @hook("before_cleanup")
170 async def before_cleanup(self, invoker: PluginInvoker):
171 """Delete the config file.
172
173 Args:
174 invoker: the active PluginInvoker
175 """
176 config_file = invoker.files["config"]
177 try:
178 config_file.unlink()
179 logging.debug(f"Deleted configuration at {config_file}")
180 except FileNotFoundError:
181 pass
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/meltano/core/plugin/airflow.py b/src/meltano/core/plugin/airflow.py
--- a/src/meltano/core/plugin/airflow.py
+++ b/src/meltano/core/plugin/airflow.py
@@ -3,7 +3,8 @@
import logging
import os
import subprocess
-from distutils.version import StrictVersion
+
+from packaging.version import Version
from meltano.core.behavior.hookable import hook
from meltano.core.error import AsyncSubprocessError
@@ -146,9 +147,7 @@
version = stdout.decode()
init_db_cmd = (
- ["initdb"]
- if StrictVersion(version) < StrictVersion("2.0.0")
- else ["db", "init"]
+ ["initdb"] if Version(version) < Version("2.0.0") else ["db", "init"]
)
handle = await invoker.invoke_async(
|
{"golden_diff": "diff --git a/src/meltano/core/plugin/airflow.py b/src/meltano/core/plugin/airflow.py\n--- a/src/meltano/core/plugin/airflow.py\n+++ b/src/meltano/core/plugin/airflow.py\n@@ -3,7 +3,8 @@\n import logging\n import os\n import subprocess\n-from distutils.version import StrictVersion\n+\n+from packaging.version import Version\n \n from meltano.core.behavior.hookable import hook\n from meltano.core.error import AsyncSubprocessError\n@@ -146,9 +147,7 @@\n \n version = stdout.decode()\n init_db_cmd = (\n- [\"initdb\"]\n- if StrictVersion(version) < StrictVersion(\"2.0.0\")\n- else [\"db\", \"init\"]\n+ [\"initdb\"] if Version(version) < Version(\"2.0.0\") else [\"db\", \"init\"]\n )\n \n handle = await invoker.invoke_async(\n", "issue": "Address warning in Airflow plugin version check\n> not super urgent, but as we move into supporting newer Python versions \r\n> (https://github.com/meltano/meltano/pull/6135) and bumping Meltano's dependencies (https://github.com/meltano/meltano/issues/6264), we might break Airflow support.\r\n> \r\n> It's also probably a very quick (< 1 hour) fix by replacing distutils.StrictVersion with the packaging.version equivalent.\r\n> \r\n\r\n```\r\nsrc/meltano/core/plugin/airflow.py:110: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead.\r\n if StrictVersion(version) < StrictVersion(\"2.0.0\")\r\n```\n", "before_files": [{"content": "\"\"\"Plugin glue code for Airflow.\"\"\"\nimport configparser\nimport logging\nimport os\nimport subprocess\nfrom distutils.version import StrictVersion\n\nfrom meltano.core.behavior.hookable import hook\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.plugin_invoker import PluginInvoker\nfrom meltano.core.utils import nest\n\nfrom . import BasePlugin, PluginType\n\n\nclass AirflowInvoker(PluginInvoker):\n \"\"\"Invoker that prepares env for Airflow.\"\"\"\n\n def env(self):\n \"\"\"Environment variables for Airflow.\n\n Returns:\n Dictionary of environment variables.\n \"\"\"\n env = super().env()\n\n env[\"AIRFLOW_HOME\"] = str(self.plugin_config_service.run_dir)\n env[\"AIRFLOW_CONFIG\"] = str(self.files[\"config\"])\n\n return env\n\n\nclass Airflow(BasePlugin):\n \"\"\"Plugin glue code for Airflow.\"\"\"\n\n __plugin_type__ = PluginType.ORCHESTRATORS\n\n invoker_class = AirflowInvoker\n\n @property\n def config_files(self):\n \"\"\"Return the configuration files required by the plugin.\n\n Returns:\n Dictionary of config file identifiers and filenames\n \"\"\"\n return {\"config\": \"airflow.cfg\"}\n\n def process_config(self, flat_config):\n \"\"\"Unflatten the config.\n\n Args:\n flat_config: the flat config\n\n Returns:\n unflattened config\n \"\"\"\n config = {}\n for key, value in flat_config.items():\n nest(config, key, str(value))\n return config\n\n @staticmethod\n def update_config_file(invoker: AirflowInvoker) -> None:\n \"\"\"Update airflow.cfg with plugin configuration.\n\n Args:\n invoker: the active PluginInvoker\n \"\"\"\n airflow_cfg_path = invoker.files[\"config\"]\n logging.debug(f\"Generated default '{str(airflow_cfg_path)}'\")\n\n # open the configuration and update it\n # now we let's update the config to use our stubs\n airflow_cfg = configparser.ConfigParser()\n\n with airflow_cfg_path.open() as airflow_cfg_file_to_read:\n airflow_cfg.read_file(airflow_cfg_file_to_read)\n logging.debug(f\"Loaded '{str(airflow_cfg_path)}'\")\n\n config = invoker.plugin_config_processed\n for section, section_config in config.items():\n airflow_cfg[section].update(section_config)\n logging.debug(f\"\\tUpdated section [{section}] with {section_config}\")\n\n with airflow_cfg_path.open(\"w\") as airflow_cfg_file_to_write:\n airflow_cfg.write(airflow_cfg_file_to_write)\n logging.debug(f\"Saved '{str(airflow_cfg_path)}'\")\n\n @hook(\"before_install\")\n async def setup_env(self, *args, **kwargs):\n \"\"\"Configure the env to make airflow installable without GPL deps.\n\n Args:\n args: Arbitrary args\n kwargs: Arbitrary kwargs\n \"\"\"\n os.environ[\"SLUGIFY_USES_TEXT_UNIDECODE\"] = \"yes\"\n\n @hook(\"before_configure\")\n async def before_configure(self, invoker: AirflowInvoker, session): # noqa: WPS217\n \"\"\"Generate config file and keep metadata database up-to-date.\n\n Args:\n invoker: the active PluginInvoker\n session: metadata database session\n\n Raises:\n AsyncSubprocessError: if command failed to run\n \"\"\"\n # generate the default `airflow.cfg`\n handle = await invoker.invoke_async(\n \"--help\",\n require_preparation=False,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n )\n exit_code = await handle.wait()\n\n if exit_code:\n raise AsyncSubprocessError(\n \"Command `airflow --help` failed\", process=handle\n )\n\n # Read and update airflow.cfg\n self.update_config_file(invoker)\n\n # we've changed the configuration here, so we need to call\n # prepare again on the invoker so it re-reads the configuration\n # for the Airflow plugin\n await invoker.prepare(session)\n\n # make sure we use correct db init\n handle = await invoker.invoke_async(\n \"version\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n stdout, stderr = await handle.communicate()\n\n if handle.returncode:\n raise AsyncSubprocessError(\n \"Command `airflow version` failed\", process=handle\n )\n\n version = stdout.decode()\n init_db_cmd = (\n [\"initdb\"]\n if StrictVersion(version) < StrictVersion(\"2.0.0\")\n else [\"db\", \"init\"]\n )\n\n handle = await invoker.invoke_async(\n *init_db_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n exit_code = await handle.wait()\n\n if exit_code:\n raise AsyncSubprocessError(\n \"Airflow metadata database could not be initialized: `airflow initdb` failed\",\n handle,\n )\n\n logging.debug(\"Completed `airflow initdb`\")\n\n @hook(\"before_cleanup\")\n async def before_cleanup(self, invoker: PluginInvoker):\n \"\"\"Delete the config file.\n\n Args:\n invoker: the active PluginInvoker\n \"\"\"\n config_file = invoker.files[\"config\"]\n try:\n config_file.unlink()\n logging.debug(f\"Deleted configuration at {config_file}\")\n except FileNotFoundError:\n pass\n", "path": "src/meltano/core/plugin/airflow.py"}], "after_files": [{"content": "\"\"\"Plugin glue code for Airflow.\"\"\"\nimport configparser\nimport logging\nimport os\nimport subprocess\n\nfrom packaging.version import Version\n\nfrom meltano.core.behavior.hookable import hook\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.plugin_invoker import PluginInvoker\nfrom meltano.core.utils import nest\n\nfrom . import BasePlugin, PluginType\n\n\nclass AirflowInvoker(PluginInvoker):\n \"\"\"Invoker that prepares env for Airflow.\"\"\"\n\n def env(self):\n \"\"\"Environment variables for Airflow.\n\n Returns:\n Dictionary of environment variables.\n \"\"\"\n env = super().env()\n\n env[\"AIRFLOW_HOME\"] = str(self.plugin_config_service.run_dir)\n env[\"AIRFLOW_CONFIG\"] = str(self.files[\"config\"])\n\n return env\n\n\nclass Airflow(BasePlugin):\n \"\"\"Plugin glue code for Airflow.\"\"\"\n\n __plugin_type__ = PluginType.ORCHESTRATORS\n\n invoker_class = AirflowInvoker\n\n @property\n def config_files(self):\n \"\"\"Return the configuration files required by the plugin.\n\n Returns:\n Dictionary of config file identifiers and filenames\n \"\"\"\n return {\"config\": \"airflow.cfg\"}\n\n def process_config(self, flat_config):\n \"\"\"Unflatten the config.\n\n Args:\n flat_config: the flat config\n\n Returns:\n unflattened config\n \"\"\"\n config = {}\n for key, value in flat_config.items():\n nest(config, key, str(value))\n return config\n\n @staticmethod\n def update_config_file(invoker: AirflowInvoker) -> None:\n \"\"\"Update airflow.cfg with plugin configuration.\n\n Args:\n invoker: the active PluginInvoker\n \"\"\"\n airflow_cfg_path = invoker.files[\"config\"]\n logging.debug(f\"Generated default '{str(airflow_cfg_path)}'\")\n\n # open the configuration and update it\n # now we let's update the config to use our stubs\n airflow_cfg = configparser.ConfigParser()\n\n with airflow_cfg_path.open() as airflow_cfg_file_to_read:\n airflow_cfg.read_file(airflow_cfg_file_to_read)\n logging.debug(f\"Loaded '{str(airflow_cfg_path)}'\")\n\n config = invoker.plugin_config_processed\n for section, section_config in config.items():\n airflow_cfg[section].update(section_config)\n logging.debug(f\"\\tUpdated section [{section}] with {section_config}\")\n\n with airflow_cfg_path.open(\"w\") as airflow_cfg_file_to_write:\n airflow_cfg.write(airflow_cfg_file_to_write)\n logging.debug(f\"Saved '{str(airflow_cfg_path)}'\")\n\n @hook(\"before_install\")\n async def setup_env(self, *args, **kwargs):\n \"\"\"Configure the env to make airflow installable without GPL deps.\n\n Args:\n args: Arbitrary args\n kwargs: Arbitrary kwargs\n \"\"\"\n os.environ[\"SLUGIFY_USES_TEXT_UNIDECODE\"] = \"yes\"\n\n @hook(\"before_configure\")\n async def before_configure(self, invoker: AirflowInvoker, session): # noqa: WPS217\n \"\"\"Generate config file and keep metadata database up-to-date.\n\n Args:\n invoker: the active PluginInvoker\n session: metadata database session\n\n Raises:\n AsyncSubprocessError: if command failed to run\n \"\"\"\n # generate the default `airflow.cfg`\n handle = await invoker.invoke_async(\n \"--help\",\n require_preparation=False,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n )\n exit_code = await handle.wait()\n\n if exit_code:\n raise AsyncSubprocessError(\n \"Command `airflow --help` failed\", process=handle\n )\n\n # Read and update airflow.cfg\n self.update_config_file(invoker)\n\n # we've changed the configuration here, so we need to call\n # prepare again on the invoker so it re-reads the configuration\n # for the Airflow plugin\n await invoker.prepare(session)\n\n # make sure we use correct db init\n handle = await invoker.invoke_async(\n \"version\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n stdout, stderr = await handle.communicate()\n\n if handle.returncode:\n raise AsyncSubprocessError(\n \"Command `airflow version` failed\", process=handle\n )\n\n version = stdout.decode()\n init_db_cmd = (\n [\"initdb\"] if Version(version) < Version(\"2.0.0\") else [\"db\", \"init\"]\n )\n\n handle = await invoker.invoke_async(\n *init_db_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n exit_code = await handle.wait()\n\n if exit_code:\n raise AsyncSubprocessError(\n \"Airflow metadata database could not be initialized: `airflow initdb` failed\",\n handle,\n )\n\n logging.debug(\"Completed `airflow initdb`\")\n\n @hook(\"before_cleanup\")\n async def before_cleanup(self, invoker: PluginInvoker):\n \"\"\"Delete the config file.\n\n Args:\n invoker: the active PluginInvoker\n \"\"\"\n config_file = invoker.files[\"config\"]\n try:\n config_file.unlink()\n logging.debug(f\"Deleted configuration at {config_file}\")\n except FileNotFoundError:\n pass\n", "path": "src/meltano/core/plugin/airflow.py"}]}
| 2,013 | 207 |
gh_patches_debug_13904
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-2930
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changed exportDialog
This pull request fixes #2367. The code has been changed as it was suggested in the issue.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/GraphicsScene/exportDialog.py`
Content:
```
1 from .. import exporters as exporters
2 from .. import functions as fn
3 from ..graphicsItems.PlotItem import PlotItem
4 from ..graphicsItems.ViewBox import ViewBox
5 from ..Qt import QtCore, QtWidgets
6
7 from . import exportDialogTemplate_generic as ui_template
8
9 class FormatExportListWidgetItem(QtWidgets.QListWidgetItem):
10 def __init__(self, expClass, *args, **kwargs):
11 QtWidgets.QListWidgetItem.__init__(self, *args, **kwargs)
12 self.expClass = expClass
13
14
15 class ExportDialog(QtWidgets.QWidget):
16 def __init__(self, scene):
17 QtWidgets.QWidget.__init__(self)
18 self.setVisible(False)
19 self.setWindowTitle("Export")
20 self.shown = False
21 self.currentExporter = None
22 self.scene = scene
23
24 self.selectBox = QtWidgets.QGraphicsRectItem()
25 self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.PenStyle.DashLine))
26 self.selectBox.hide()
27 self.scene.addItem(self.selectBox)
28
29 self.ui = ui_template.Ui_Form()
30 self.ui.setupUi(self)
31
32 self.ui.closeBtn.clicked.connect(self.close)
33 self.ui.exportBtn.clicked.connect(self.exportClicked)
34 self.ui.copyBtn.clicked.connect(self.copyClicked)
35 self.ui.itemTree.currentItemChanged.connect(self.exportItemChanged)
36 self.ui.formatList.currentItemChanged.connect(self.exportFormatChanged)
37
38
39 def show(self, item=None):
40 if item is not None:
41 ## Select next exportable parent of the item originally clicked on
42 while not isinstance(item, ViewBox) and not isinstance(item, PlotItem) and item is not None:
43 item = item.parentItem()
44 ## if this is a ViewBox inside a PlotItem, select the parent instead.
45 if isinstance(item, ViewBox) and isinstance(item.parentItem(), PlotItem):
46 item = item.parentItem()
47 self.updateItemList(select=item)
48 self.setVisible(True)
49 self.activateWindow()
50 self.raise_()
51 self.selectBox.setVisible(True)
52 if not self.shown:
53 self.shown = True
54 vcenter = self.scene.getViewWidget().geometry().center()
55 x = max(0, int(vcenter.x() - self.width() / 2))
56 y = max(0, int(vcenter.y() - self.height() / 2))
57 self.move(x, y)
58
59 def updateItemList(self, select=None):
60 self.ui.itemTree.clear()
61 si = QtWidgets.QTreeWidgetItem(["Entire Scene"])
62 si.gitem = self.scene
63 self.ui.itemTree.addTopLevelItem(si)
64 self.ui.itemTree.setCurrentItem(si)
65 si.setExpanded(True)
66 for child in self.scene.items():
67 if child.parentItem() is None:
68 self.updateItemTree(child, si, select=select)
69
70 def updateItemTree(self, item, treeItem, select=None):
71 si = None
72 if isinstance(item, ViewBox):
73 si = QtWidgets.QTreeWidgetItem(['ViewBox'])
74 elif isinstance(item, PlotItem):
75 si = QtWidgets.QTreeWidgetItem(['Plot'])
76
77 if si is not None:
78 si.gitem = item
79 treeItem.addChild(si)
80 treeItem = si
81 if si.gitem is select:
82 self.ui.itemTree.setCurrentItem(si)
83
84 for ch in item.childItems():
85 self.updateItemTree(ch, treeItem, select=select)
86
87
88 def exportItemChanged(self, item, prev):
89 if item is None:
90 return
91 if item.gitem is self.scene:
92 newBounds = self.scene.views()[0].viewRect()
93 else:
94 newBounds = item.gitem.sceneBoundingRect()
95 self.selectBox.setRect(newBounds)
96 self.selectBox.show()
97 self.updateFormatList()
98
99 def updateFormatList(self):
100 current = self.ui.formatList.currentItem()
101
102 self.ui.formatList.clear()
103 gotCurrent = False
104 for exp in exporters.listExporters():
105 item = FormatExportListWidgetItem(exp, QtCore.QCoreApplication.translate('Exporter', exp.Name))
106 self.ui.formatList.addItem(item)
107 if item is current:
108 self.ui.formatList.setCurrentRow(self.ui.formatList.count() - 1)
109 gotCurrent = True
110
111 if not gotCurrent:
112 self.ui.formatList.setCurrentRow(0)
113
114 def exportFormatChanged(self, item, prev):
115 if item is None:
116 self.currentExporter = None
117 self.ui.paramTree.clear()
118 return
119 expClass = item.expClass
120 exp = expClass(item=self.ui.itemTree.currentItem().gitem)
121
122 params = exp.parameters()
123
124 if params is None:
125 self.ui.paramTree.clear()
126 else:
127 self.ui.paramTree.setParameters(params)
128 self.currentExporter = exp
129 self.ui.copyBtn.setEnabled(exp.allowCopy)
130
131 def exportClicked(self):
132 self.selectBox.hide()
133 self.currentExporter.export()
134
135 def copyClicked(self):
136 self.selectBox.hide()
137 self.currentExporter.export(copy=True)
138
139 def close(self):
140 self.selectBox.setVisible(False)
141 self.setVisible(False)
142
143 def closeEvent(self, event):
144 self.close()
145 super().closeEvent(event)
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/GraphicsScene/exportDialog.py b/pyqtgraph/GraphicsScene/exportDialog.py
--- a/pyqtgraph/GraphicsScene/exportDialog.py
+++ b/pyqtgraph/GraphicsScene/exportDialog.py
@@ -51,10 +51,11 @@
self.selectBox.setVisible(True)
if not self.shown:
self.shown = True
- vcenter = self.scene.getViewWidget().geometry().center()
- x = max(0, int(vcenter.x() - self.width() / 2))
- y = max(0, int(vcenter.y() - self.height() / 2))
- self.move(x, y)
+ screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
+ centre = QtWidgets.QDesktopWidget().availableGeometry(screen).center()
+ frame = self.frameGeometry()
+ frame.moveCenter(centre)
+ self.move(frame.topLeft())
def updateItemList(self, select=None):
self.ui.itemTree.clear()
|
{"golden_diff": "diff --git a/pyqtgraph/GraphicsScene/exportDialog.py b/pyqtgraph/GraphicsScene/exportDialog.py\n--- a/pyqtgraph/GraphicsScene/exportDialog.py\n+++ b/pyqtgraph/GraphicsScene/exportDialog.py\n@@ -51,10 +51,11 @@\n self.selectBox.setVisible(True)\n if not self.shown:\n self.shown = True\n- vcenter = self.scene.getViewWidget().geometry().center()\n- x = max(0, int(vcenter.x() - self.width() / 2))\n- y = max(0, int(vcenter.y() - self.height() / 2))\n- self.move(x, y)\n+ screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())\n+ centre = QtWidgets.QDesktopWidget().availableGeometry(screen).center()\n+ frame = self.frameGeometry()\n+ frame.moveCenter(centre)\n+ self.move(frame.topLeft())\n \n def updateItemList(self, select=None):\n self.ui.itemTree.clear()\n", "issue": "Changed exportDialog\nThis pull request fixes #2367. The code has been changed as it was suggested in the issue.\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from .. import exporters as exporters\nfrom .. import functions as fn\nfrom ..graphicsItems.PlotItem import PlotItem\nfrom ..graphicsItems.ViewBox import ViewBox\nfrom ..Qt import QtCore, QtWidgets\n\nfrom . import exportDialogTemplate_generic as ui_template\n\nclass FormatExportListWidgetItem(QtWidgets.QListWidgetItem):\n def __init__(self, expClass, *args, **kwargs):\n QtWidgets.QListWidgetItem.__init__(self, *args, **kwargs)\n self.expClass = expClass\n\n\nclass ExportDialog(QtWidgets.QWidget):\n def __init__(self, scene):\n QtWidgets.QWidget.__init__(self)\n self.setVisible(False)\n self.setWindowTitle(\"Export\")\n self.shown = False\n self.currentExporter = None\n self.scene = scene\n\n self.selectBox = QtWidgets.QGraphicsRectItem()\n self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.PenStyle.DashLine))\n self.selectBox.hide()\n self.scene.addItem(self.selectBox)\n \n self.ui = ui_template.Ui_Form()\n self.ui.setupUi(self)\n \n self.ui.closeBtn.clicked.connect(self.close)\n self.ui.exportBtn.clicked.connect(self.exportClicked)\n self.ui.copyBtn.clicked.connect(self.copyClicked)\n self.ui.itemTree.currentItemChanged.connect(self.exportItemChanged)\n self.ui.formatList.currentItemChanged.connect(self.exportFormatChanged)\n \n\n def show(self, item=None):\n if item is not None:\n ## Select next exportable parent of the item originally clicked on\n while not isinstance(item, ViewBox) and not isinstance(item, PlotItem) and item is not None:\n item = item.parentItem()\n ## if this is a ViewBox inside a PlotItem, select the parent instead.\n if isinstance(item, ViewBox) and isinstance(item.parentItem(), PlotItem):\n item = item.parentItem()\n self.updateItemList(select=item)\n self.setVisible(True)\n self.activateWindow()\n self.raise_()\n self.selectBox.setVisible(True)\n if not self.shown:\n self.shown = True\n vcenter = self.scene.getViewWidget().geometry().center()\n x = max(0, int(vcenter.x() - self.width() / 2))\n y = max(0, int(vcenter.y() - self.height() / 2))\n self.move(x, y)\n \n def updateItemList(self, select=None):\n self.ui.itemTree.clear()\n si = QtWidgets.QTreeWidgetItem([\"Entire Scene\"])\n si.gitem = self.scene\n self.ui.itemTree.addTopLevelItem(si)\n self.ui.itemTree.setCurrentItem(si)\n si.setExpanded(True)\n for child in self.scene.items():\n if child.parentItem() is None:\n self.updateItemTree(child, si, select=select)\n \n def updateItemTree(self, item, treeItem, select=None):\n si = None\n if isinstance(item, ViewBox):\n si = QtWidgets.QTreeWidgetItem(['ViewBox'])\n elif isinstance(item, PlotItem):\n si = QtWidgets.QTreeWidgetItem(['Plot'])\n \n if si is not None:\n si.gitem = item\n treeItem.addChild(si)\n treeItem = si\n if si.gitem is select:\n self.ui.itemTree.setCurrentItem(si)\n \n for ch in item.childItems():\n self.updateItemTree(ch, treeItem, select=select)\n \n \n def exportItemChanged(self, item, prev):\n if item is None:\n return\n if item.gitem is self.scene:\n newBounds = self.scene.views()[0].viewRect()\n else:\n newBounds = item.gitem.sceneBoundingRect()\n self.selectBox.setRect(newBounds)\n self.selectBox.show()\n self.updateFormatList()\n \n def updateFormatList(self):\n current = self.ui.formatList.currentItem()\n\n self.ui.formatList.clear()\n gotCurrent = False\n for exp in exporters.listExporters():\n item = FormatExportListWidgetItem(exp, QtCore.QCoreApplication.translate('Exporter', exp.Name))\n self.ui.formatList.addItem(item)\n if item is current:\n self.ui.formatList.setCurrentRow(self.ui.formatList.count() - 1)\n gotCurrent = True\n \n if not gotCurrent:\n self.ui.formatList.setCurrentRow(0)\n \n def exportFormatChanged(self, item, prev):\n if item is None:\n self.currentExporter = None\n self.ui.paramTree.clear()\n return\n expClass = item.expClass\n exp = expClass(item=self.ui.itemTree.currentItem().gitem)\n\n params = exp.parameters()\n\n if params is None:\n self.ui.paramTree.clear()\n else:\n self.ui.paramTree.setParameters(params)\n self.currentExporter = exp\n self.ui.copyBtn.setEnabled(exp.allowCopy)\n \n def exportClicked(self):\n self.selectBox.hide()\n self.currentExporter.export()\n \n def copyClicked(self):\n self.selectBox.hide()\n self.currentExporter.export(copy=True)\n \n def close(self):\n self.selectBox.setVisible(False)\n self.setVisible(False)\n\n def closeEvent(self, event):\n self.close()\n super().closeEvent(event)\n", "path": "pyqtgraph/GraphicsScene/exportDialog.py"}], "after_files": [{"content": "from .. import exporters as exporters\nfrom .. import functions as fn\nfrom ..graphicsItems.PlotItem import PlotItem\nfrom ..graphicsItems.ViewBox import ViewBox\nfrom ..Qt import QtCore, QtWidgets\n\nfrom . import exportDialogTemplate_generic as ui_template\n\nclass FormatExportListWidgetItem(QtWidgets.QListWidgetItem):\n def __init__(self, expClass, *args, **kwargs):\n QtWidgets.QListWidgetItem.__init__(self, *args, **kwargs)\n self.expClass = expClass\n\n\nclass ExportDialog(QtWidgets.QWidget):\n def __init__(self, scene):\n QtWidgets.QWidget.__init__(self)\n self.setVisible(False)\n self.setWindowTitle(\"Export\")\n self.shown = False\n self.currentExporter = None\n self.scene = scene\n\n self.selectBox = QtWidgets.QGraphicsRectItem()\n self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.PenStyle.DashLine))\n self.selectBox.hide()\n self.scene.addItem(self.selectBox)\n \n self.ui = ui_template.Ui_Form()\n self.ui.setupUi(self)\n \n self.ui.closeBtn.clicked.connect(self.close)\n self.ui.exportBtn.clicked.connect(self.exportClicked)\n self.ui.copyBtn.clicked.connect(self.copyClicked)\n self.ui.itemTree.currentItemChanged.connect(self.exportItemChanged)\n self.ui.formatList.currentItemChanged.connect(self.exportFormatChanged)\n \n\n def show(self, item=None):\n if item is not None:\n ## Select next exportable parent of the item originally clicked on\n while not isinstance(item, ViewBox) and not isinstance(item, PlotItem) and item is not None:\n item = item.parentItem()\n ## if this is a ViewBox inside a PlotItem, select the parent instead.\n if isinstance(item, ViewBox) and isinstance(item.parentItem(), PlotItem):\n item = item.parentItem()\n self.updateItemList(select=item)\n self.setVisible(True)\n self.activateWindow()\n self.raise_()\n self.selectBox.setVisible(True)\n if not self.shown:\n self.shown = True\n screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())\n centre = QtWidgets.QDesktopWidget().availableGeometry(screen).center()\n frame = self.frameGeometry()\n frame.moveCenter(centre)\n self.move(frame.topLeft())\n \n def updateItemList(self, select=None):\n self.ui.itemTree.clear()\n si = QtWidgets.QTreeWidgetItem([\"Entire Scene\"])\n si.gitem = self.scene\n self.ui.itemTree.addTopLevelItem(si)\n self.ui.itemTree.setCurrentItem(si)\n si.setExpanded(True)\n for child in self.scene.items():\n if child.parentItem() is None:\n self.updateItemTree(child, si, select=select)\n \n def updateItemTree(self, item, treeItem, select=None):\n si = None\n if isinstance(item, ViewBox):\n si = QtWidgets.QTreeWidgetItem(['ViewBox'])\n elif isinstance(item, PlotItem):\n si = QtWidgets.QTreeWidgetItem(['Plot'])\n \n if si is not None:\n si.gitem = item\n treeItem.addChild(si)\n treeItem = si\n if si.gitem is select:\n self.ui.itemTree.setCurrentItem(si)\n \n for ch in item.childItems():\n self.updateItemTree(ch, treeItem, select=select)\n \n \n def exportItemChanged(self, item, prev):\n if item is None:\n return\n if item.gitem is self.scene:\n newBounds = self.scene.views()[0].viewRect()\n else:\n newBounds = item.gitem.sceneBoundingRect()\n self.selectBox.setRect(newBounds)\n self.selectBox.show()\n self.updateFormatList()\n \n def updateFormatList(self):\n current = self.ui.formatList.currentItem()\n\n self.ui.formatList.clear()\n gotCurrent = False\n for exp in exporters.listExporters():\n item = FormatExportListWidgetItem(exp, QtCore.QCoreApplication.translate('Exporter', exp.Name))\n self.ui.formatList.addItem(item)\n if item is current:\n self.ui.formatList.setCurrentRow(self.ui.formatList.count() - 1)\n gotCurrent = True\n \n if not gotCurrent:\n self.ui.formatList.setCurrentRow(0)\n \n def exportFormatChanged(self, item, prev):\n if item is None:\n self.currentExporter = None\n self.ui.paramTree.clear()\n return\n expClass = item.expClass\n exp = expClass(item=self.ui.itemTree.currentItem().gitem)\n\n params = exp.parameters()\n\n if params is None:\n self.ui.paramTree.clear()\n else:\n self.ui.paramTree.setParameters(params)\n self.currentExporter = exp\n self.ui.copyBtn.setEnabled(exp.allowCopy)\n \n def exportClicked(self):\n self.selectBox.hide()\n self.currentExporter.export()\n \n def copyClicked(self):\n self.selectBox.hide()\n self.currentExporter.export(copy=True)\n \n def close(self):\n self.selectBox.setVisible(False)\n self.setVisible(False)\n\n def closeEvent(self, event):\n self.close()\n super().closeEvent(event)\n", "path": "pyqtgraph/GraphicsScene/exportDialog.py"}]}
| 1,781 | 220 |
gh_patches_debug_21441
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-197
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In the DevDocNeeded model, consider bugs for which the dev-doc-needed keyword was removed as negative examples
See also #79.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/devdocneeded.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from imblearn.under_sampling import RandomUnderSampler
8 from sklearn.compose import ColumnTransformer
9 from sklearn.feature_extraction import DictVectorizer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import bug_features
13 from bugbug import bugzilla
14 from bugbug.model import Model
15
16
17 class DevDocNeededModel(Model):
18 def __init__(self, lemmatization=False):
19 Model.__init__(self, lemmatization)
20
21 self.sampler = RandomUnderSampler(random_state=0)
22
23 feature_extractors = [
24 bug_features.has_str(),
25 bug_features.has_regression_range(),
26 bug_features.severity(),
27 bug_features.keywords({'dev-doc-needed', 'dev-doc-complete'}),
28 bug_features.is_coverity_issue(),
29 bug_features.has_crash_signature(),
30 bug_features.has_url(),
31 bug_features.has_w3c_url(),
32 bug_features.has_github_url(),
33 bug_features.whiteboard(),
34 bug_features.patches(),
35 bug_features.landings(),
36 bug_features.title(),
37 bug_features.product(),
38 bug_features.component(),
39
40 bug_features.commit_added(),
41 bug_features.commit_deleted(),
42 bug_features.commit_types(),
43 ]
44
45 cleanup_functions = [
46 bug_features.cleanup_fileref,
47 bug_features.cleanup_url,
48 bug_features.cleanup_synonyms,
49 ]
50
51 self.extraction_pipeline = Pipeline([
52 ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback, commit_data=True)),
53 ('union', ColumnTransformer([
54 ('data', DictVectorizer(), 'data'),
55
56 ('title', self.text_vectorizer(), 'title'),
57
58 ('comments', self.text_vectorizer(), 'comments'),
59 ])),
60 ])
61
62 self.clf = xgboost.XGBClassifier(n_jobs=16)
63 self.clf.set_params(predictor='cpu_predictor')
64
65 def rollback(self, change):
66 return change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete'])
67
68 def get_labels(self):
69 classes = {}
70
71 for bug_data in bugzilla.get_bugs():
72 bug_id = int(bug_data['id'])
73
74 for entry in bug_data['history']:
75 for change in entry['changes']:
76 if change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete']):
77 classes[bug_id] = 1
78
79 if bug_id not in classes:
80 classes[bug_id] = 0
81
82 return classes
83
84 def get_feature_names(self):
85 return self.extraction_pipeline.named_steps['union'].get_feature_names()
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bugbug/models/devdocneeded.py b/bugbug/models/devdocneeded.py
--- a/bugbug/models/devdocneeded.py
+++ b/bugbug/models/devdocneeded.py
@@ -73,7 +73,14 @@
for entry in bug_data['history']:
for change in entry['changes']:
- if change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete']):
+ # Bugs that get dev-doc-needed removed from them at some point after it's been added (this suggests a false positive among human-analyzed bugs)
+ if change['field_name'] == 'keywords' and 'dev-doc-needed' in change['removed'] and 'dev-doc-complete' not in change['added']:
+ classes[bug_id] = 0
+ # Bugs that go from dev-doc-needed to dev-doc-complete are guaranteed to be good
+ # Bugs that go from not having dev-doc-needed to having dev-doc-complete are bugs
+ # that were missed by previous scans through content but someone realized it
+ # should have been flagged and updated the docs, found the docs already updated.
+ elif change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete']):
classes[bug_id] = 1
if bug_id not in classes:
|
{"golden_diff": "diff --git a/bugbug/models/devdocneeded.py b/bugbug/models/devdocneeded.py\n--- a/bugbug/models/devdocneeded.py\n+++ b/bugbug/models/devdocneeded.py\n@@ -73,7 +73,14 @@\n \n for entry in bug_data['history']:\n for change in entry['changes']:\n- if change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete']):\n+ # Bugs that get dev-doc-needed removed from them at some point after it's been added (this suggests a false positive among human-analyzed bugs)\n+ if change['field_name'] == 'keywords' and 'dev-doc-needed' in change['removed'] and 'dev-doc-complete' not in change['added']:\n+ classes[bug_id] = 0\n+ # Bugs that go from dev-doc-needed to dev-doc-complete are guaranteed to be good\n+ # Bugs that go from not having dev-doc-needed to having dev-doc-complete are bugs\n+ # that were missed by previous scans through content but someone realized it\n+ # should have been flagged and updated the docs, found the docs already updated.\n+ elif change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete']):\n classes[bug_id] = 1\n \n if bug_id not in classes:\n", "issue": "In the DevDocNeeded model, consider bugs for which the dev-doc-needed keyword was removed as negative examples\nSee also #79.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import bugzilla\nfrom bugbug.model import Model\n\n\nclass DevDocNeededModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords({'dev-doc-needed', 'dev-doc-complete'}),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.product(),\n bug_features.component(),\n\n bug_features.commit_added(),\n bug_features.commit_deleted(),\n bug_features.commit_types(),\n ]\n\n cleanup_functions = [\n bug_features.cleanup_fileref,\n bug_features.cleanup_url,\n bug_features.cleanup_synonyms,\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback, commit_data=True)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n\n ('title', self.text_vectorizer(), 'title'),\n\n ('comments', self.text_vectorizer(), 'comments'),\n ])),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n\n def rollback(self, change):\n return change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete'])\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data['id'])\n\n for entry in bug_data['history']:\n for change in entry['changes']:\n if change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete']):\n classes[bug_id] = 1\n\n if bug_id not in classes:\n classes[bug_id] = 0\n\n return classes\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps['union'].get_feature_names()\n", "path": "bugbug/models/devdocneeded.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import bugzilla\nfrom bugbug.model import Model\n\n\nclass DevDocNeededModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords({'dev-doc-needed', 'dev-doc-complete'}),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.product(),\n bug_features.component(),\n\n bug_features.commit_added(),\n bug_features.commit_deleted(),\n bug_features.commit_types(),\n ]\n\n cleanup_functions = [\n bug_features.cleanup_fileref,\n bug_features.cleanup_url,\n bug_features.cleanup_synonyms,\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback, commit_data=True)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n\n ('title', self.text_vectorizer(), 'title'),\n\n ('comments', self.text_vectorizer(), 'comments'),\n ])),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n\n def rollback(self, change):\n return change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete'])\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data['id'])\n\n for entry in bug_data['history']:\n for change in entry['changes']:\n # Bugs that get dev-doc-needed removed from them at some point after it's been added (this suggests a false positive among human-analyzed bugs)\n if change['field_name'] == 'keywords' and 'dev-doc-needed' in change['removed'] and 'dev-doc-complete' not in change['added']:\n classes[bug_id] = 0\n # Bugs that go from dev-doc-needed to dev-doc-complete are guaranteed to be good\n # Bugs that go from not having dev-doc-needed to having dev-doc-complete are bugs\n # that were missed by previous scans through content but someone realized it\n # should have been flagged and updated the docs, found the docs already updated.\n elif change['field_name'] == 'keywords' and any(keyword in change['added'] for keyword in ['dev-doc-needed', 'dev-doc-complete']):\n classes[bug_id] = 1\n\n if bug_id not in classes:\n classes[bug_id] = 0\n\n return classes\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps['union'].get_feature_names()\n", "path": "bugbug/models/devdocneeded.py"}]}
| 1,080 | 311 |
gh_patches_debug_34208
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-353
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Grade preview
There must be an option for FSR members to see the grades for a course before publishing it.
This should be available in "in evaluation", "evaluated" and "reviewed".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/results/views.py`
Content:
```
1 from django.conf import settings
2 from django.http import HttpResponse
3 from django.shortcuts import get_object_or_404, render_to_response
4 from django.template import RequestContext
5 from django.utils.translation import get_language
6
7 from evap.evaluation.auth import login_required, fsr_required
8 from evap.evaluation.models import Semester
9 from evap.evaluation.tools import calculate_results, calculate_average_and_medium_grades, TextResult, can_publish_grades
10
11 from evap.results.exporters import ExcelExporter
12
13
14 @login_required
15 def index(request):
16 semesters = Semester.get_all_with_published_courses()
17
18 return render_to_response(
19 "results_index.html",
20 dict(semesters=semesters),
21 context_instance=RequestContext(request))
22
23
24 @login_required
25 def semester_detail(request, semester_id):
26 semester = get_object_or_404(Semester, id=semester_id)
27 courses = list(semester.course_set.filter(state="published"))
28
29 # annotate each course object with its grades
30 for course in courses:
31 # first, make sure that there are no preexisting grade attributes
32 course.avg_grade, course.med_grade = calculate_average_and_medium_grades(course)
33 course.can_publish_grades = can_publish_grades(course, request.user.is_staff)
34
35 return render_to_response(
36 "results_semester_detail.html",
37 dict(
38 semester=semester,
39 courses=courses
40 ),
41 context_instance=RequestContext(request))
42
43
44 @fsr_required
45 def semester_export(request, semester_id):
46 semester = get_object_or_404(Semester, id=semester_id)
47
48 filename = "Evaluation-%s-%s.xls" % (semester.name, get_language())
49
50 response = HttpResponse(mimetype="application/vnd.ms-excel")
51 response["Content-Disposition"] = "attachment; filename=\"%s\"" % filename
52
53 exporter = ExcelExporter(semester)
54
55 if 'all' in request.GET:
56 exporter.export(response, True)
57 else:
58 exporter.export(response)
59
60 return response
61
62
63 @login_required
64 def course_detail(request, semester_id, course_id):
65 semester = get_object_or_404(Semester, id=semester_id)
66 course = get_object_or_404(semester.course_set.filter(state="published"), id=course_id)
67
68 sections = calculate_results(course, request.user.is_staff)
69
70 if (request.user.is_staff == False): # if user is not a student representative
71 # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate)
72 for section in sections:
73 if not user_can_see_textresults(request.user, course, section):
74 for index, result in list(enumerate(section.results))[::-1]:
75 if isinstance(section.results[index], TextResult):
76 del section.results[index]
77
78 # remove empty sections
79 sections = [section for section in sections if section.results]
80
81 # check whether results are published
82 published = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE
83
84 # show a publishing warning to fsr members when the results are not publicly available
85 warning = (not published) and request.user.is_staff
86
87 return render_to_response(
88 "results_course_detail.html",
89 dict(
90 course=course,
91 sections=sections,
92 warning=warning
93 ),
94 context_instance=RequestContext(request))
95
96
97 def user_can_see_textresults(user, course, section):
98 if section.contributor == user:
99 return True
100 if course.is_user_responsible_or_delegate(user):
101 return True
102
103 represented_userprofiles = user.represented_users.all()
104 represented_users = [profile.user for profile in represented_userprofiles]
105 if section.contributor in represented_users:
106 return True
107
108 return False
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/results/views.py b/evap/results/views.py
--- a/evap/results/views.py
+++ b/evap/results/views.py
@@ -63,7 +63,13 @@
@login_required
def course_detail(request, semester_id, course_id):
semester = get_object_or_404(Semester, id=semester_id)
- course = get_object_or_404(semester.course_set.filter(state="published"), id=course_id)
+
+ # staff can see preview of results even when course is not published
+ if request.user.is_staff:
+ course = get_object_or_404(semester.course_set.filter(state__in=['inEvaluation', 'evaluated', 'reviewed', 'published']), id=course_id)
+ # everyone else can only see published results
+ else:
+ course = get_object_or_404(semester.course_set.filter(state="published"), id=course_id)
sections = calculate_results(course, request.user.is_staff)
@@ -78,18 +84,24 @@
# remove empty sections
sections = [section for section in sections if section.results]
- # check whether results are published
- published = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE
+ # show a warning if course is still in evaluation (for staff preview)
+ evaluation_warning = course.state != 'published'
+
+ # check whether course has a sufficient number of votes for publishing it
+ sufficient_votes = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE
- # show a publishing warning to fsr members when the results are not publicly available
- warning = (not published) and request.user.is_staff
+ # results for a course might not be visible because there are not enough answers
+ # but it can still be "published" e.g. to show the comment results to lecturers
+ # the FSR can still see all results but gets a warning message
+ sufficient_votes_warning = (not sufficient_votes) and request.user.is_staff
return render_to_response(
"results_course_detail.html",
dict(
course=course,
sections=sections,
- warning=warning
+ evaluation_warning=evaluation_warning,
+ sufficient_votes_warning=sufficient_votes_warning
),
context_instance=RequestContext(request))
|
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -63,7 +63,13 @@\n @login_required\n def course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n- course = get_object_or_404(semester.course_set.filter(state=\"published\"), id=course_id)\n+ \n+ # staff can see preview of results even when course is not published\n+ if request.user.is_staff:\n+ course = get_object_or_404(semester.course_set.filter(state__in=['inEvaluation', 'evaluated', 'reviewed', 'published']), id=course_id)\n+ # everyone else can only see published results\n+ else:\n+ course = get_object_or_404(semester.course_set.filter(state=\"published\"), id=course_id)\n \n sections = calculate_results(course, request.user.is_staff)\n \n@@ -78,18 +84,24 @@\n # remove empty sections\n sections = [section for section in sections if section.results]\n \n- # check whether results are published\n- published = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE\n+ # show a warning if course is still in evaluation (for staff preview)\n+ evaluation_warning = course.state != 'published'\n+\n+ # check whether course has a sufficient number of votes for publishing it\n+ sufficient_votes = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE\n \n- # show a publishing warning to fsr members when the results are not publicly available\n- warning = (not published) and request.user.is_staff\n+ # results for a course might not be visible because there are not enough answers\n+ # but it can still be \"published\" e.g. to show the comment results to lecturers\n+ # the FSR can still see all results but gets a warning message\n+ sufficient_votes_warning = (not sufficient_votes) and request.user.is_staff\n \n return render_to_response(\n \"results_course_detail.html\",\n dict(\n course=course,\n sections=sections,\n- warning=warning\n+ evaluation_warning=evaluation_warning,\n+ sufficient_votes_warning=sufficient_votes_warning\n ),\n context_instance=RequestContext(request))\n", "issue": "Grade preview\nThere must be an option for FSR members to see the grades for a course before publishing it.\nThis should be available in \"in evaluation\", \"evaluated\" and \"reviewed\".\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import get_language\n\nfrom evap.evaluation.auth import login_required, fsr_required\nfrom evap.evaluation.models import Semester\nfrom evap.evaluation.tools import calculate_results, calculate_average_and_medium_grades, TextResult, can_publish_grades\n\nfrom evap.results.exporters import ExcelExporter\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render_to_response(\n \"results_index.html\",\n dict(semesters=semesters),\n context_instance=RequestContext(request))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n courses = list(semester.course_set.filter(state=\"published\"))\n\n # annotate each course object with its grades\n for course in courses:\n # first, make sure that there are no preexisting grade attributes\n course.avg_grade, course.med_grade = calculate_average_and_medium_grades(course)\n course.can_publish_grades = can_publish_grades(course, request.user.is_staff)\n\n return render_to_response(\n \"results_semester_detail.html\",\n dict(\n semester=semester,\n courses=courses\n ),\n context_instance=RequestContext(request))\n\n\n@fsr_required\ndef semester_export(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n\n filename = \"Evaluation-%s-%s.xls\" % (semester.name, get_language())\n\n response = HttpResponse(mimetype=\"application/vnd.ms-excel\")\n response[\"Content-Disposition\"] = \"attachment; filename=\\\"%s\\\"\" % filename\n\n exporter = ExcelExporter(semester)\n\n if 'all' in request.GET:\n exporter.export(response, True)\n else:\n exporter.export(response)\n\n return response\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set.filter(state=\"published\"), id=course_id)\n\n sections = calculate_results(course, request.user.is_staff)\n\n if (request.user.is_staff == False): # if user is not a student representative\n # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate)\n for section in sections:\n if not user_can_see_textresults(request.user, course, section):\n for index, result in list(enumerate(section.results))[::-1]:\n if isinstance(section.results[index], TextResult):\n del section.results[index]\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # check whether results are published\n published = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE\n\n # show a publishing warning to fsr members when the results are not publicly available\n warning = (not published) and request.user.is_staff\n\n return render_to_response(\n \"results_course_detail.html\",\n dict(\n course=course,\n sections=sections,\n warning=warning\n ),\n context_instance=RequestContext(request))\n\n\ndef user_can_see_textresults(user, course, section):\n if section.contributor == user:\n return True\n if course.is_user_responsible_or_delegate(user):\n return True\n\n represented_userprofiles = user.represented_users.all()\n represented_users = [profile.user for profile in represented_userprofiles]\n if section.contributor in represented_users:\n return True\n\n return False\n", "path": "evap/results/views.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import get_language\n\nfrom evap.evaluation.auth import login_required, fsr_required\nfrom evap.evaluation.models import Semester\nfrom evap.evaluation.tools import calculate_results, calculate_average_and_medium_grades, TextResult, can_publish_grades\n\nfrom evap.results.exporters import ExcelExporter\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render_to_response(\n \"results_index.html\",\n dict(semesters=semesters),\n context_instance=RequestContext(request))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n courses = list(semester.course_set.filter(state=\"published\"))\n\n # annotate each course object with its grades\n for course in courses:\n # first, make sure that there are no preexisting grade attributes\n course.avg_grade, course.med_grade = calculate_average_and_medium_grades(course)\n course.can_publish_grades = can_publish_grades(course, request.user.is_staff)\n\n return render_to_response(\n \"results_semester_detail.html\",\n dict(\n semester=semester,\n courses=courses\n ),\n context_instance=RequestContext(request))\n\n\n@fsr_required\ndef semester_export(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n\n filename = \"Evaluation-%s-%s.xls\" % (semester.name, get_language())\n\n response = HttpResponse(mimetype=\"application/vnd.ms-excel\")\n response[\"Content-Disposition\"] = \"attachment; filename=\\\"%s\\\"\" % filename\n\n exporter = ExcelExporter(semester)\n\n if 'all' in request.GET:\n exporter.export(response, True)\n else:\n exporter.export(response)\n\n return response\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n \n # staff can see preview of results even when course is not published\n if request.user.is_staff:\n course = get_object_or_404(semester.course_set.filter(state__in=['inEvaluation', 'evaluated', 'reviewed', 'published']), id=course_id)\n # everyone else can only see published results\n else:\n course = get_object_or_404(semester.course_set.filter(state=\"published\"), id=course_id)\n\n sections = calculate_results(course, request.user.is_staff)\n\n if (request.user.is_staff == False): # if user is not a student representative\n # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate)\n for section in sections:\n if not user_can_see_textresults(request.user, course, section):\n for index, result in list(enumerate(section.results))[::-1]:\n if isinstance(section.results[index], TextResult):\n del section.results[index]\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # show a warning if course is still in evaluation (for staff preview)\n evaluation_warning = course.state != 'published'\n\n # check whether course has a sufficient number of votes for publishing it\n sufficient_votes = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE\n\n # results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to lecturers\n # the FSR can still see all results but gets a warning message\n sufficient_votes_warning = (not sufficient_votes) and request.user.is_staff\n\n return render_to_response(\n \"results_course_detail.html\",\n dict(\n course=course,\n sections=sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning\n ),\n context_instance=RequestContext(request))\n\n\ndef user_can_see_textresults(user, course, section):\n if section.contributor == user:\n return True\n if course.is_user_responsible_or_delegate(user):\n return True\n\n represented_userprofiles = user.represented_users.all()\n represented_users = [profile.user for profile in represented_userprofiles]\n if section.contributor in represented_users:\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
| 1,343 | 552 |
gh_patches_debug_58558
|
rasdani/github-patches
|
git_diff
|
roboflow__supervision-219
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Developement - version issue
### Search before asking
- [X] I have searched the Supervision [issues](https://github.com/roboflow/supervision/issues) and found no similar bug report.
### Bug
I have not installed `supervision` but running directly for developement purpose.
But I got an following error when I import supervision code:
```
raise PackageNotFoundError(name)
importlib.metadata.PackageNotFoundError: supervision
```
After a quick investigation, it is found that `__init__.py` of `supervision` where version information used. It is creating an issue. If I comment this line and the bug is gone.
@onuralpszr Can you take a look?
I think it should be ignorable, if valid version is not found then use `development` version. Though, I do not have concret idea, how to tackle it.
### Environment
_No response_
### Minimal Reproducible Example
_No response_
### Additional
_No response_
### Are you willing to submit a PR?
- [X] Yes I'd like to help by submitting a PR!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `supervision/__init__.py`
Content:
```
1 import importlib.metadata as importlib_metadata
2
3 __version__ = importlib_metadata.version(__package__)
4
5
6 from supervision.classification.core import Classifications
7 from supervision.dataset.core import (
8 BaseDataset,
9 ClassificationDataset,
10 DetectionDataset,
11 )
12 from supervision.detection.annotate import BoxAnnotator, MaskAnnotator
13 from supervision.detection.core import Detections
14 from supervision.detection.line_counter import LineZone, LineZoneAnnotator
15 from supervision.detection.tools.polygon_zone import PolygonZone, PolygonZoneAnnotator
16 from supervision.detection.utils import (
17 box_iou_batch,
18 filter_polygons_by_area,
19 mask_to_polygons,
20 mask_to_xyxy,
21 non_max_suppression,
22 polygon_to_mask,
23 polygon_to_xyxy,
24 )
25 from supervision.draw.color import Color, ColorPalette
26 from supervision.draw.utils import draw_filled_rectangle, draw_polygon, draw_text
27 from supervision.geometry.core import Point, Position, Rect
28 from supervision.geometry.utils import get_polygon_center
29 from supervision.metrics.detection import ConfusionMatrix
30 from supervision.utils.file import list_files_with_extensions
31 from supervision.utils.image import ImageSink, crop
32 from supervision.utils.notebook import plot_image, plot_images_grid
33 from supervision.utils.video import (
34 VideoInfo,
35 VideoSink,
36 get_video_frames_generator,
37 process_video,
38 )
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/supervision/__init__.py b/supervision/__init__.py
--- a/supervision/__init__.py
+++ b/supervision/__init__.py
@@ -1,6 +1,10 @@
import importlib.metadata as importlib_metadata
-__version__ = importlib_metadata.version(__package__)
+try:
+ # This will read version from pyproject.toml
+ __version__ = importlib_metadata.version(__package__ or __name__)
+except importlib_metadata.PackageNotFoundError:
+ __version__ = "development"
from supervision.classification.core import Classifications
|
{"golden_diff": "diff --git a/supervision/__init__.py b/supervision/__init__.py\n--- a/supervision/__init__.py\n+++ b/supervision/__init__.py\n@@ -1,6 +1,10 @@\n import importlib.metadata as importlib_metadata\n \n-__version__ = importlib_metadata.version(__package__)\n+try:\n+ # This will read version from pyproject.toml\n+ __version__ = importlib_metadata.version(__package__ or __name__)\n+except importlib_metadata.PackageNotFoundError:\n+ __version__ = \"development\"\n \n \n from supervision.classification.core import Classifications\n", "issue": "Developement - version issue\n### Search before asking\n\n- [X] I have searched the Supervision [issues](https://github.com/roboflow/supervision/issues) and found no similar bug report.\n\n\n### Bug\n\nI have not installed `supervision` but running directly for developement purpose.\r\n\r\nBut I got an following error when I import supervision code:\r\n\r\n```\r\n raise PackageNotFoundError(name)\r\nimportlib.metadata.PackageNotFoundError: supervision\r\n```\r\n\r\nAfter a quick investigation, it is found that `__init__.py` of `supervision` where version information used. It is creating an issue. If I comment this line and the bug is gone.\r\n\r\n@onuralpszr Can you take a look?\r\n\r\n\r\nI think it should be ignorable, if valid version is not found then use `development` version. Though, I do not have concret idea, how to tackle it.\r\n\r\n\r\n\r\n\n\n### Environment\n\n_No response_\n\n### Minimal Reproducible Example\n\n_No response_\n\n### Additional\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [X] Yes I'd like to help by submitting a PR!\n", "before_files": [{"content": "import importlib.metadata as importlib_metadata\n\n__version__ = importlib_metadata.version(__package__)\n\n\nfrom supervision.classification.core import Classifications\nfrom supervision.dataset.core import (\n BaseDataset,\n ClassificationDataset,\n DetectionDataset,\n)\nfrom supervision.detection.annotate import BoxAnnotator, MaskAnnotator\nfrom supervision.detection.core import Detections\nfrom supervision.detection.line_counter import LineZone, LineZoneAnnotator\nfrom supervision.detection.tools.polygon_zone import PolygonZone, PolygonZoneAnnotator\nfrom supervision.detection.utils import (\n box_iou_batch,\n filter_polygons_by_area,\n mask_to_polygons,\n mask_to_xyxy,\n non_max_suppression,\n polygon_to_mask,\n polygon_to_xyxy,\n)\nfrom supervision.draw.color import Color, ColorPalette\nfrom supervision.draw.utils import draw_filled_rectangle, draw_polygon, draw_text\nfrom supervision.geometry.core import Point, Position, Rect\nfrom supervision.geometry.utils import get_polygon_center\nfrom supervision.metrics.detection import ConfusionMatrix\nfrom supervision.utils.file import list_files_with_extensions\nfrom supervision.utils.image import ImageSink, crop\nfrom supervision.utils.notebook import plot_image, plot_images_grid\nfrom supervision.utils.video import (\n VideoInfo,\n VideoSink,\n get_video_frames_generator,\n process_video,\n)\n", "path": "supervision/__init__.py"}], "after_files": [{"content": "import importlib.metadata as importlib_metadata\n\ntry:\n # This will read version from pyproject.toml\n __version__ = importlib_metadata.version(__package__ or __name__)\nexcept importlib_metadata.PackageNotFoundError:\n __version__ = \"development\"\n\n\nfrom supervision.classification.core import Classifications\nfrom supervision.dataset.core import (\n BaseDataset,\n ClassificationDataset,\n DetectionDataset,\n)\nfrom supervision.detection.annotate import BoxAnnotator, MaskAnnotator\nfrom supervision.detection.core import Detections\nfrom supervision.detection.line_counter import LineZone, LineZoneAnnotator\nfrom supervision.detection.tools.polygon_zone import PolygonZone, PolygonZoneAnnotator\nfrom supervision.detection.utils import (\n box_iou_batch,\n filter_polygons_by_area,\n mask_to_polygons,\n mask_to_xyxy,\n non_max_suppression,\n polygon_to_mask,\n polygon_to_xyxy,\n)\nfrom supervision.draw.color import Color, ColorPalette\nfrom supervision.draw.utils import draw_filled_rectangle, draw_polygon, draw_text\nfrom supervision.geometry.core import Point, Position, Rect\nfrom supervision.geometry.utils import get_polygon_center\nfrom supervision.metrics.detection import ConfusionMatrix\nfrom supervision.utils.file import list_files_with_extensions\nfrom supervision.utils.image import ImageSink, crop\nfrom supervision.utils.notebook import plot_image, plot_images_grid\nfrom supervision.utils.video import (\n VideoInfo,\n VideoSink,\n get_video_frames_generator,\n process_video,\n)\n", "path": "supervision/__init__.py"}]}
| 839 | 136 |
gh_patches_debug_5710
|
rasdani/github-patches
|
git_diff
|
getredash__redash-2062
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
More flexible widgets grid (allow finer control over height/width)
## User should be able to control widget's placement, width and height
### Current implementation
Currently editing a dashboard works as follow:
1. When adding a widget you select its width: regular (half width) or double (full width). Height is set based on the content.
2. When opening the "Edit Dashboard" dialog, you can rearrange the order of the widgets.

Issues with the current approach:
* You can't change widget size after placing it.
* You can't change widget's height.
* You can't control widget's width beyond the current two options.
### Solution
To solve this we want to have a flexible (_but still responsive_) grid that the user can freely place and move widgets on.
* Each visualization will define minimum width and height sizes (derived from the visualization type). This will be the default sizes for the widget when placed on the grid, but the user will be able to resize it to be larger (but not smaller) afterwards.
* Once entering dashboard "edit mode", the user will be able to move around and resize the widgets in place instead of a dedicated UI.
* We should probably change other behaviors in the UI (move add widget to be part of the edit mode, change dashboard title editing to be in place, etc), but we will leave it to second iteration after the grid behavior is implemented.
### Technical Notes
* We currently use `angular-gridster` to allow moving around the widgets in the Edit Dashboard dialog. Maybe we can use it for the grid or a similar library.
* Currently the widget knows nothing about its placement, but only its size (width). The dashboard has a layout property, which is an array of arrays of widgets ids (each item in the array is a row/array of widgets). This is error prone and makes layout changes harder. If possible, it will be better if each widget retains its own placement information.
* The API to update a widget currently supports only updating a textbox widget. We will need to extend it to support all widget types.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/widgets.py`
Content:
```
1 import json
2
3 from flask import request
4 from redash import models
5 from redash.handlers.base import BaseResource
6 from redash.permissions import (require_access,
7 require_object_modify_permission,
8 require_permission, view_only)
9
10
11 class WidgetListResource(BaseResource):
12 @require_permission('edit_dashboard')
13 def post(self):
14 """
15 Add a widget to a dashboard.
16
17 :<json number dashboard_id: The ID for the dashboard being added to
18 :<json visualization_id: The ID of the visualization to put in this widget
19 :<json object options: Widget options
20 :<json string text: Text box contents
21 :<json number width: Width for widget display
22
23 :>json object widget: The created widget
24 :>json array layout: The new layout of the dashboard this widget was added to
25 :>json boolean new_row: Whether this widget was added on a new row or not
26 :>json number version: The revision number of the dashboard
27 """
28 widget_properties = request.get_json(force=True)
29 dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)
30 require_object_modify_permission(dashboard, self.current_user)
31
32 widget_properties['options'] = json.dumps(widget_properties['options'])
33 widget_properties.pop('id', None)
34 widget_properties['dashboard'] = dashboard
35
36 visualization_id = widget_properties.pop('visualization_id')
37 if visualization_id:
38 visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)
39 require_access(visualization.query_rel.groups, self.current_user, view_only)
40 else:
41 visualization = None
42
43 widget_properties['visualization'] = visualization
44
45 widget = models.Widget(**widget_properties)
46 models.db.session.add(widget)
47 models.db.session.commit()
48
49 layout = json.loads(widget.dashboard.layout)
50 new_row = True
51
52 if len(layout) == 0 or widget.width == 2:
53 layout.append([widget.id])
54 elif len(layout[-1]) == 1:
55 neighbour_widget = models.Widget.query.get(layout[-1][0])
56 if neighbour_widget.width == 1:
57 layout[-1].append(widget.id)
58 new_row = False
59 else:
60 layout.append([widget.id])
61 else:
62 layout.append([widget.id])
63
64 widget.dashboard.layout = json.dumps(layout)
65 models.db.session.add(widget.dashboard)
66 models.db.session.commit()
67 return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row, 'version': dashboard.version}
68
69
70 class WidgetResource(BaseResource):
71 @require_permission('edit_dashboard')
72 def post(self, widget_id):
73 """
74 Updates a widget in a dashboard.
75 This method currently handles Text Box widgets only.
76
77 :param number widget_id: The ID of the widget to modify
78
79 :<json string text: The new contents of the text box
80 """
81 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
82 require_object_modify_permission(widget.dashboard, self.current_user)
83 widget_properties = request.get_json(force=True)
84 widget.text = widget_properties['text']
85 models.db.session.commit()
86 return widget.to_dict()
87
88 @require_permission('edit_dashboard')
89 def delete(self, widget_id):
90 """
91 Remove a widget from a dashboard.
92
93 :param number widget_id: ID of widget to remove
94
95 :>json array layout: New layout of dashboard this widget was removed from
96 :>json number version: Revision number of dashboard
97 """
98 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
99 require_object_modify_permission(widget.dashboard, self.current_user)
100 widget.delete()
101 models.db.session.commit()
102 return {'layout': widget.dashboard.layout, 'version': widget.dashboard.version}
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py
--- a/redash/handlers/widgets.py
+++ b/redash/handlers/widgets.py
@@ -82,6 +82,7 @@
require_object_modify_permission(widget.dashboard, self.current_user)
widget_properties = request.get_json(force=True)
widget.text = widget_properties['text']
+ widget.options = json.dumps(widget_properties['options'])
models.db.session.commit()
return widget.to_dict()
|
{"golden_diff": "diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py\n--- a/redash/handlers/widgets.py\n+++ b/redash/handlers/widgets.py\n@@ -82,6 +82,7 @@\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n+ widget.options = json.dumps(widget_properties['options'])\n models.db.session.commit()\n return widget.to_dict()\n", "issue": "More flexible widgets grid (allow finer control over height/width)\n## User should be able to control widget's placement, width and height\r\n\r\n### Current implementation\r\n\r\nCurrently editing a dashboard works as follow:\r\n\r\n1. When adding a widget you select its width: regular (half width) or double (full width). Height is set based on the content.\r\n2. When opening the \"Edit Dashboard\" dialog, you can rearrange the order of the widgets.\r\n\r\n\r\n\r\nIssues with the current approach:\r\n\r\n* You can't change widget size after placing it.\r\n* You can't change widget's height.\r\n* You can't control widget's width beyond the current two options.\r\n\r\n### Solution\r\n\r\nTo solve this we want to have a flexible (_but still responsive_) grid that the user can freely place and move widgets on. \r\n\r\n* Each visualization will define minimum width and height sizes (derived from the visualization type). This will be the default sizes for the widget when placed on the grid, but the user will be able to resize it to be larger (but not smaller) afterwards.\r\n* Once entering dashboard \"edit mode\", the user will be able to move around and resize the widgets in place instead of a dedicated UI.\r\n* We should probably change other behaviors in the UI (move add widget to be part of the edit mode, change dashboard title editing to be in place, etc), but we will leave it to second iteration after the grid behavior is implemented.\r\n\r\n### Technical Notes\r\n\r\n* We currently use `angular-gridster` to allow moving around the widgets in the Edit Dashboard dialog. Maybe we can use it for the grid or a similar library.\r\n* Currently the widget knows nothing about its placement, but only its size (width). The dashboard has a layout property, which is an array of arrays of widgets ids (each item in the array is a row/array of widgets). This is error prone and makes layout changes harder. If possible, it will be better if each widget retains its own placement information.\r\n* The API to update a widget currently supports only updating a textbox widget. We will need to extend it to support all widget types.\n", "before_files": [{"content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n \"\"\"\n Add a widget to a dashboard.\n\n :<json number dashboard_id: The ID for the dashboard being added to\n :<json visualization_id: The ID of the visualization to put in this widget\n :<json object options: Widget options\n :<json string text: Text box contents\n :<json number width: Width for widget display\n\n :>json object widget: The created widget\n :>json array layout: The new layout of the dashboard this widget was added to\n :>json boolean new_row: Whether this widget was added on a new row or not\n :>json number version: The revision number of the dashboard\n \"\"\"\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_object_modify_permission(dashboard, self.current_user)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query_rel.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget(**widget_properties)\n models.db.session.add(widget)\n models.db.session.commit()\n\n layout = json.loads(widget.dashboard.layout)\n new_row = True\n\n if len(layout) == 0 or widget.width == 2:\n layout.append([widget.id])\n elif len(layout[-1]) == 1:\n neighbour_widget = models.Widget.query.get(layout[-1][0])\n if neighbour_widget.width == 1:\n layout[-1].append(widget.id)\n new_row = False\n else:\n layout.append([widget.id])\n else:\n layout.append([widget.id])\n\n widget.dashboard.layout = json.dumps(layout)\n models.db.session.add(widget.dashboard)\n models.db.session.commit()\n return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row, 'version': dashboard.version}\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n \"\"\"\n Updates a widget in a dashboard.\n This method currently handles Text Box widgets only.\n\n :param number widget_id: The ID of the widget to modify\n\n :<json string text: The new contents of the text box\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n models.db.session.commit()\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n \"\"\"\n Remove a widget from a dashboard.\n\n :param number widget_id: ID of widget to remove\n\n :>json array layout: New layout of dashboard this widget was removed from\n :>json number version: Revision number of dashboard\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget.delete()\n models.db.session.commit()\n return {'layout': widget.dashboard.layout, 'version': widget.dashboard.version}\n", "path": "redash/handlers/widgets.py"}], "after_files": [{"content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n \"\"\"\n Add a widget to a dashboard.\n\n :<json number dashboard_id: The ID for the dashboard being added to\n :<json visualization_id: The ID of the visualization to put in this widget\n :<json object options: Widget options\n :<json string text: Text box contents\n :<json number width: Width for widget display\n\n :>json object widget: The created widget\n :>json array layout: The new layout of the dashboard this widget was added to\n :>json boolean new_row: Whether this widget was added on a new row or not\n :>json number version: The revision number of the dashboard\n \"\"\"\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_object_modify_permission(dashboard, self.current_user)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query_rel.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget(**widget_properties)\n models.db.session.add(widget)\n models.db.session.commit()\n\n layout = json.loads(widget.dashboard.layout)\n new_row = True\n\n if len(layout) == 0 or widget.width == 2:\n layout.append([widget.id])\n elif len(layout[-1]) == 1:\n neighbour_widget = models.Widget.query.get(layout[-1][0])\n if neighbour_widget.width == 1:\n layout[-1].append(widget.id)\n new_row = False\n else:\n layout.append([widget.id])\n else:\n layout.append([widget.id])\n\n widget.dashboard.layout = json.dumps(layout)\n models.db.session.add(widget.dashboard)\n models.db.session.commit()\n return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row, 'version': dashboard.version}\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n \"\"\"\n Updates a widget in a dashboard.\n This method currently handles Text Box widgets only.\n\n :param number widget_id: The ID of the widget to modify\n\n :<json string text: The new contents of the text box\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.options = json.dumps(widget_properties['options'])\n models.db.session.commit()\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n \"\"\"\n Remove a widget from a dashboard.\n\n :param number widget_id: ID of widget to remove\n\n :>json array layout: New layout of dashboard this widget was removed from\n :>json number version: Revision number of dashboard\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget.delete()\n models.db.session.commit()\n return {'layout': widget.dashboard.layout, 'version': widget.dashboard.version}\n", "path": "redash/handlers/widgets.py"}]}
| 1,758 | 107 |
gh_patches_debug_20122
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-771
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[cliquet] Split tests from kinto package
original : https://github.com/mozilla-services/cliquet/issues/267
Should we have tests within the kinto package or outside ? that was discussed but I don't think a decision was made.
Another related question is not wether tests is in kinto/ or outside, but rather : do we ship tests when we release kinto ? some people strip them when they build the release.
[cliquet] Split tests from kinto package
original : https://github.com/mozilla-services/cliquet/issues/267
Should we have tests within the kinto package or outside ? that was discussed but I don't think a decision was made.
Another related question is not wether tests is in kinto/ or outside, but rather : do we ship tests when we release kinto ? some people strip them when they build the release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import platform
2 import codecs
3 import os
4 from setuptools import setup, find_packages
5
6 here = os.path.abspath(os.path.dirname(__file__))
7
8
9 def read_file(filename):
10 """Open a related file and return its content."""
11 with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
12 content = f.read()
13 return content
14
15 README = read_file('README.rst')
16 CHANGELOG = read_file('CHANGELOG.rst')
17 CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
18
19 installed_with_pypy = platform.python_implementation() == 'PyPy'
20
21 REQUIREMENTS = [
22 'colander',
23 'colorama',
24 'cornice >= 1.1', # Fix cache CORS
25 'jsonschema',
26 'python-dateutil',
27 'pyramid_multiauth >= 0.8', # User on policy selected event.
28 'pyramid_tm',
29 'requests',
30 'six',
31 'structlog >= 16.1.0',
32 'enum34',
33 'waitress',
34 ]
35
36 if installed_with_pypy:
37 # We install psycopg2cffi instead of psycopg2 when dealing with pypy
38 # Note: JSONB support landed after psycopg2cffi 2.7.0
39 POSTGRESQL_REQUIRES = [
40 'SQLAlchemy',
41 'psycopg2cffi>2.7.0',
42 'zope.sqlalchemy',
43 ]
44 else:
45 # ujson is not pypy compliant, as it uses the CPython C API
46 REQUIREMENTS.append('ujson >= 1.35')
47 POSTGRESQL_REQUIRES = [
48 'SQLAlchemy',
49 'psycopg2>2.5',
50 'zope.sqlalchemy',
51 ]
52
53 REDIS_REQUIRES = [
54 'kinto_redis'
55 ]
56
57 DEPENDENCY_LINKS = [
58 ]
59
60 MONITORING_REQUIRES = [
61 'raven',
62 'statsd',
63 'newrelic',
64 'werkzeug',
65 ]
66
67 ENTRY_POINTS = {
68 'paste.app_factory': [
69 'main = kinto:main',
70 ],
71 'console_scripts': [
72 'kinto = kinto.__main__:main'
73 ],
74 }
75
76
77 setup(name='kinto',
78 version='4.1.0.dev0',
79 description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
80 long_description=README + "\n\n" + CHANGELOG + "\n\n" + CONTRIBUTORS,
81 license='Apache License (2.0)',
82 classifiers=[
83 "Programming Language :: Python",
84 "Programming Language :: Python :: 2",
85 "Programming Language :: Python :: 2.7",
86 "Programming Language :: Python :: 3",
87 "Programming Language :: Python :: 3.4",
88 "Programming Language :: Python :: 3.5",
89 "Programming Language :: Python :: Implementation :: CPython",
90 "Programming Language :: Python :: Implementation :: PyPy",
91 "Topic :: Internet :: WWW/HTTP",
92 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
93 "License :: OSI Approved :: Apache Software License"
94 ],
95 keywords="web sync json storage services",
96 author='Mozilla Services',
97 author_email='[email protected]',
98 url='https://github.com/Kinto/kinto',
99 packages=find_packages(),
100 package_data={'': ['*.rst', '*.py']},
101 include_package_data=True,
102 zip_safe=False,
103 install_requires=REQUIREMENTS,
104 extras_require={
105 'redis': REDIS_REQUIRES,
106 'postgresql': POSTGRESQL_REQUIRES,
107 'monitoring': MONITORING_REQUIRES,
108 ":python_version=='2.7'": ["functools32", "futures"],
109 },
110 test_suite="kinto.tests",
111 dependency_links=DEPENDENCY_LINKS,
112 entry_points=ENTRY_POINTS)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -54,6 +54,15 @@
'kinto_redis'
]
+SETUP_REQUIRES = [
+ 'pytest-runner'
+]
+
+TEST_REQUIREMENTS = [
+ 'pytest',
+ 'WebTest'
+]
+
DEPENDENCY_LINKS = [
]
@@ -100,6 +109,8 @@
package_data={'': ['*.rst', '*.py']},
include_package_data=True,
zip_safe=False,
+ setup_requires=SETUP_REQUIRES,
+ tests_require=TEST_REQUIREMENTS,
install_requires=REQUIREMENTS,
extras_require={
'redis': REDIS_REQUIRES,
@@ -107,6 +118,6 @@
'monitoring': MONITORING_REQUIRES,
":python_version=='2.7'": ["functools32", "futures"],
},
- test_suite="kinto.tests",
+ test_suite="tests",
dependency_links=DEPENDENCY_LINKS,
entry_points=ENTRY_POINTS)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,6 +54,15 @@\n 'kinto_redis'\n ]\n \n+SETUP_REQUIRES = [\n+ 'pytest-runner'\n+]\n+\n+TEST_REQUIREMENTS = [\n+ 'pytest',\n+ 'WebTest'\n+]\n+\n DEPENDENCY_LINKS = [\n ]\n \n@@ -100,6 +109,8 @@\n package_data={'': ['*.rst', '*.py']},\n include_package_data=True,\n zip_safe=False,\n+ setup_requires=SETUP_REQUIRES,\n+ tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n@@ -107,6 +118,6 @@\n 'monitoring': MONITORING_REQUIRES,\n \":python_version=='2.7'\": [\"functools32\", \"futures\"],\n },\n- test_suite=\"kinto.tests\",\n+ test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "issue": "[cliquet] Split tests from kinto package\noriginal : https://github.com/mozilla-services/cliquet/issues/267\n\nShould we have tests within the kinto package or outside ? that was discussed but I don't think a decision was made.\n\nAnother related question is not wether tests is in kinto/ or outside, but rather : do we ship tests when we release kinto ? some people strip them when they build the release.\n\n[cliquet] Split tests from kinto package\noriginal : https://github.com/mozilla-services/cliquet/issues/267\n\nShould we have tests within the kinto package or outside ? that was discussed but I don't think a decision was made.\n\nAnother related question is not wether tests is in kinto/ or outside, but rather : do we ship tests when we release kinto ? some people strip them when they build the release.\n\n", "before_files": [{"content": "import platform\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\ninstalled_with_pypy = platform.python_implementation() == 'PyPy'\n\nREQUIREMENTS = [\n 'colander',\n 'colorama',\n 'cornice >= 1.1', # Fix cache CORS\n 'jsonschema',\n 'python-dateutil',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'pyramid_tm',\n 'requests',\n 'six',\n 'structlog >= 16.1.0',\n 'enum34',\n 'waitress',\n]\n\nif installed_with_pypy:\n # We install psycopg2cffi instead of psycopg2 when dealing with pypy\n # Note: JSONB support landed after psycopg2cffi 2.7.0\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2cffi>2.7.0',\n 'zope.sqlalchemy',\n ]\nelse:\n # ujson is not pypy compliant, as it uses the CPython C API\n REQUIREMENTS.append('ujson >= 1.35')\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2>2.5',\n 'zope.sqlalchemy',\n ]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='4.1.0.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py']},\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n \":python_version=='2.7'\": [\"functools32\", \"futures\"],\n },\n test_suite=\"kinto.tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}], "after_files": [{"content": "import platform\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\ninstalled_with_pypy = platform.python_implementation() == 'PyPy'\n\nREQUIREMENTS = [\n 'colander',\n 'colorama',\n 'cornice >= 1.1', # Fix cache CORS\n 'jsonschema',\n 'python-dateutil',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'pyramid_tm',\n 'requests',\n 'six',\n 'structlog >= 16.1.0',\n 'enum34',\n 'waitress',\n]\n\nif installed_with_pypy:\n # We install psycopg2cffi instead of psycopg2 when dealing with pypy\n # Note: JSONB support landed after psycopg2cffi 2.7.0\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2cffi>2.7.0',\n 'zope.sqlalchemy',\n ]\nelse:\n # ujson is not pypy compliant, as it uses the CPython C API\n REQUIREMENTS.append('ujson >= 1.35')\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2>2.5',\n 'zope.sqlalchemy',\n ]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='4.1.0.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n \":python_version=='2.7'\": [\"functools32\", \"futures\"],\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}]}
| 1,501 | 243 |
gh_patches_debug_8204
|
rasdani/github-patches
|
git_diff
|
vas3k__vas3k.club-381
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Баг с пажинацией в разделе «Коммьюнити»
STR:
1. Открыть https://vas3k.club/people/
2. Отфильтровать по первым 4 фильтрам
3. Перейти на 2 страницу выдачи
ER: Фильтры сохранены
AR: Фильтры частично потеряны
Видео https://youtu.be/08cz2TOq60A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `posts/templatetags/query_params.py`
Content:
```
1 from urllib.parse import urlencode
2
3 from django import template
4
5 register = template.Library()
6
7
8 @register.simple_tag(takes_context=True)
9 def append_query_param(context, **kwargs):
10 query_params = dict(context.request.GET.items())
11 query_params.update(kwargs)
12 return "?" + urlencode(query_params)
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/posts/templatetags/query_params.py b/posts/templatetags/query_params.py
--- a/posts/templatetags/query_params.py
+++ b/posts/templatetags/query_params.py
@@ -1,4 +1,4 @@
-from urllib.parse import urlencode
+from copy import deepcopy
from django import template
@@ -7,6 +7,6 @@
@register.simple_tag(takes_context=True)
def append_query_param(context, **kwargs):
- query_params = dict(context.request.GET.items())
+ query_params = deepcopy(context.request.GET)
query_params.update(kwargs)
- return "?" + urlencode(query_params)
+ return "?" + query_params.urlencode()
|
{"golden_diff": "diff --git a/posts/templatetags/query_params.py b/posts/templatetags/query_params.py\n--- a/posts/templatetags/query_params.py\n+++ b/posts/templatetags/query_params.py\n@@ -1,4 +1,4 @@\n-from urllib.parse import urlencode\n+from copy import deepcopy\n \n from django import template\n \n@@ -7,6 +7,6 @@\n \n @register.simple_tag(takes_context=True)\n def append_query_param(context, **kwargs):\n- query_params = dict(context.request.GET.items())\n+ query_params = deepcopy(context.request.GET)\n query_params.update(kwargs)\n- return \"?\" + urlencode(query_params)\n+ return \"?\" + query_params.urlencode()\n", "issue": "\u0411\u0430\u0433 \u0441 \u043f\u0430\u0436\u0438\u043d\u0430\u0446\u0438\u0435\u0439 \u0432 \u0440\u0430\u0437\u0434\u0435\u043b\u0435 \u00ab\u041a\u043e\u043c\u043c\u044c\u044e\u043d\u0438\u0442\u0438\u00bb\nSTR:\r\n1. \u041e\u0442\u043a\u0440\u044b\u0442\u044c https://vas3k.club/people/\r\n2. \u041e\u0442\u0444\u0438\u043b\u044c\u0442\u0440\u043e\u0432\u0430\u0442\u044c\u00a0\u043f\u043e \u043f\u0435\u0440\u0432\u044b\u043c 4 \u0444\u0438\u043b\u044c\u0442\u0440\u0430\u043c\r\n3. \u041f\u0435\u0440\u0435\u0439\u0442\u0438 \u043d\u0430 2 \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u0443 \u0432\u044b\u0434\u0430\u0447\u0438\r\n\r\nER: \u0424\u0438\u043b\u044c\u0442\u0440\u044b \u0441\u043e\u0445\u0440\u0430\u043d\u0435\u043d\u044b\r\nAR: \u0424\u0438\u043b\u044c\u0442\u0440\u044b \u0447\u0430\u0441\u0442\u0438\u0447\u043d\u043e \u043f\u043e\u0442\u0435\u0440\u044f\u043d\u044b\r\n\r\n\u0412\u0438\u0434\u0435\u043e\u00a0https://youtu.be/08cz2TOq60A\n", "before_files": [{"content": "from urllib.parse import urlencode\n\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef append_query_param(context, **kwargs):\n query_params = dict(context.request.GET.items())\n query_params.update(kwargs)\n return \"?\" + urlencode(query_params)\n", "path": "posts/templatetags/query_params.py"}], "after_files": [{"content": "from copy import deepcopy\n\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef append_query_param(context, **kwargs):\n query_params = deepcopy(context.request.GET)\n query_params.update(kwargs)\n return \"?\" + query_params.urlencode()\n", "path": "posts/templatetags/query_params.py"}]}
| 446 | 150 |
gh_patches_debug_10785
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-916
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show output of nvcc after stacktrace
In the current implementation, `nvcc` prints error messages in stdout/stderr before stack trace is printed. It is hard to read.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/cuda/compiler.py`
Content:
```
1 import hashlib
2 import os
3 import re
4 import subprocess
5 import sys
6 import tempfile
7
8 import filelock
9 import six
10
11 from cupy.cuda import device
12 from cupy.cuda import function
13
14
15 def _get_arch():
16 cc = device.Device().compute_capability
17 return 'sm_%s' % cc
18
19
20 class TemporaryDirectory(object):
21
22 def __enter__(self):
23 self.path = tempfile.mkdtemp()
24 return self.path
25
26 def __exit__(self, exc_type, exc_value, traceback):
27 if exc_value is not None:
28 return
29
30 for name in os.listdir(self.path):
31 os.unlink(os.path.join(self.path, name))
32 os.rmdir(self.path)
33
34
35 def _run_nvcc(cmd, cwd):
36 try:
37 return subprocess.check_output(cmd, cwd=cwd)
38 except OSError as e:
39 msg = 'Failed to run `nvcc` command. ' \
40 'Check PATH environment variable: ' \
41 + str(e)
42 raise OSError(msg)
43
44
45 def nvcc(source, options=(), arch=None):
46 if not arch:
47 arch = _get_arch()
48 cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)
49
50 with TemporaryDirectory() as root_dir:
51 path = os.path.join(root_dir, 'kern')
52 cu_path = '%s.cu' % path
53 cubin_path = '%s.cubin' % path
54
55 with open(cu_path, 'w') as cu_file:
56 cu_file.write(source)
57
58 cmd.append(cu_path)
59 _run_nvcc(cmd, root_dir)
60
61 with open(cubin_path, 'rb') as bin_file:
62 return bin_file.read()
63
64
65 def preprocess(source, options=()):
66 cmd = ['nvcc', '--preprocess'] + list(options)
67 with TemporaryDirectory() as root_dir:
68 path = os.path.join(root_dir, 'kern')
69 cu_path = '%s.cu' % path
70
71 with open(cu_path, 'w') as cu_file:
72 cu_file.write(source)
73
74 cmd.append(cu_path)
75 pp_src = _run_nvcc(cmd, root_dir)
76
77 if isinstance(pp_src, six.binary_type):
78 pp_src = pp_src.decode('utf-8')
79 return re.sub('(?m)^#.*$', '', pp_src)
80
81
82 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
83
84
85 def get_cache_dir():
86 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
87
88
89 _empty_file_preprocess_cache = {}
90
91
92 def compile_with_cache(source, options=(), arch=None, cache_dir=None):
93 global _empty_file_preprocess_cache
94 if cache_dir is None:
95 cache_dir = get_cache_dir()
96 if arch is None:
97 arch = _get_arch()
98
99 if 'win32' == sys.platform:
100 options += ('-Xcompiler', '/wd 4819')
101 if sys.maxsize == 9223372036854775807:
102 options += '-m64',
103 elif sys.maxsize == 2147483647:
104 options += '-m32',
105
106 env = (arch, options)
107 if '#include' in source:
108 pp_src = '%s %s' % (env, preprocess(source, options))
109 else:
110 base = _empty_file_preprocess_cache.get(env, None)
111 if base is None:
112 base = _empty_file_preprocess_cache[env] = preprocess('', options)
113 pp_src = '%s %s %s' % (env, base, source)
114
115 if isinstance(pp_src, six.text_type):
116 pp_src = pp_src.encode('utf-8')
117 name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()
118
119 mod = function.Module()
120
121 if not os.path.exists(cache_dir):
122 os.makedirs(cache_dir)
123
124 lock_path = os.path.join(cache_dir, 'lock_file.lock')
125
126 path = os.path.join(cache_dir, name)
127 with filelock.FileLock(lock_path) as lock:
128 if os.path.exists(path):
129 with open(path, 'rb') as file:
130 cubin = file.read()
131 mod.load(cubin)
132 else:
133 lock.release()
134 cubin = nvcc(source, options, arch)
135 mod.load(cubin)
136 lock.acquire()
137 with open(path, 'wb') as cubin_file:
138 cubin_file.write(cubin)
139
140 return mod
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py
--- a/cupy/cuda/compiler.py
+++ b/cupy/cuda/compiler.py
@@ -34,7 +34,14 @@
def _run_nvcc(cmd, cwd):
try:
- return subprocess.check_output(cmd, cwd=cwd)
+ return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ msg = ('`nvcc` command returns non-zero exit status. \n'
+ 'command: {0}\n'
+ 'return-code: {1}\n'
+ 'stdout/stderr: \n'
+ '{2}'.format(e.cmd, e.returncode, e.output))
+ raise RuntimeError(msg)
except OSError as e:
msg = 'Failed to run `nvcc` command. ' \
'Check PATH environment variable: ' \
|
{"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -34,7 +34,14 @@\n \n def _run_nvcc(cmd, cwd):\n try:\n- return subprocess.check_output(cmd, cwd=cwd)\n+ return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)\n+ except subprocess.CalledProcessError as e:\n+ msg = ('`nvcc` command returns non-zero exit status. \\n'\n+ 'command: {0}\\n'\n+ 'return-code: {1}\\n'\n+ 'stdout/stderr: \\n'\n+ '{2}'.format(e.cmd, e.returncode, e.output))\n+ raise RuntimeError(msg)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n", "issue": "Show output of nvcc after stacktrace\nIn the current implementation, `nvcc` prints error messages in stdout/stderr before stack trace is printed. It is hard to read.\n\n", "before_files": [{"content": "import hashlib\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport filelock\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _run_nvcc(cmd, cwd):\n try:\n return subprocess.check_output(cmd, cwd=cwd)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n + str(e)\n raise OSError(msg)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n _run_nvcc(cmd, root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options)\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()\n\n mod = function.Module()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n lock_path = os.path.join(cache_dir, 'lock_file.lock')\n\n path = os.path.join(cache_dir, name)\n with filelock.FileLock(lock_path) as lock:\n if os.path.exists(path):\n with open(path, 'rb') as file:\n cubin = file.read()\n mod.load(cubin)\n else:\n lock.release()\n cubin = nvcc(source, options, arch)\n mod.load(cubin)\n lock.acquire()\n with open(path, 'wb') as cubin_file:\n cubin_file.write(cubin)\n\n return mod\n", "path": "cupy/cuda/compiler.py"}], "after_files": [{"content": "import hashlib\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport filelock\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _run_nvcc(cmd, cwd):\n try:\n return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n msg = ('`nvcc` command returns non-zero exit status. \\n'\n 'command: {0}\\n'\n 'return-code: {1}\\n'\n 'stdout/stderr: \\n'\n '{2}'.format(e.cmd, e.returncode, e.output))\n raise RuntimeError(msg)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n + str(e)\n raise OSError(msg)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n _run_nvcc(cmd, root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options)\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()\n\n mod = function.Module()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n lock_path = os.path.join(cache_dir, 'lock_file.lock')\n\n path = os.path.join(cache_dir, name)\n with filelock.FileLock(lock_path) as lock:\n if os.path.exists(path):\n with open(path, 'rb') as file:\n cubin = file.read()\n mod.load(cubin)\n else:\n lock.release()\n cubin = nvcc(source, options, arch)\n mod.load(cubin)\n lock.acquire()\n with open(path, 'wb') as cubin_file:\n cubin_file.write(cubin)\n\n return mod\n", "path": "cupy/cuda/compiler.py"}]}
| 1,605 | 205 |
gh_patches_debug_23660
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-3894
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tune Elasticsearch client for reindex separately from main search client
#3892 was necessary to alleviate the hard outage experienced due to ES cluster being down.
We were waiting 30s per request for Elasticsearch then retrying. This was added in #1471 to handle reindex issues.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/search/tasks.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import binascii
14 import os
15
16 from elasticsearch.helpers import parallel_bulk
17 from sqlalchemy import and_, func
18 from sqlalchemy.orm import aliased
19
20 from warehouse.packaging.models import (
21 Classifier, Project, Release, release_classifiers)
22 from warehouse.packaging.search import Project as ProjectDocType
23 from warehouse.search.utils import get_index
24 from warehouse import tasks
25 from warehouse.utils.db import windowed_query
26
27
28 def _project_docs(db):
29
30 releases_list = (
31 db.query(Release.name, Release.version)
32 .order_by(
33 Release.name,
34 Release.is_prerelease.nullslast(),
35 Release._pypi_ordering.desc(),
36 )
37 .distinct(Release.name)
38 .subquery("release_list")
39 )
40
41 r = aliased(Release, name="r")
42
43 all_versions = (
44 db.query(func.array_agg(r.version))
45 .filter(r.name == Release.name)
46 .correlate(Release)
47 .as_scalar()
48 .label("all_versions")
49 )
50
51 classifiers = (
52 db.query(func.array_agg(Classifier.classifier))
53 .select_from(release_classifiers)
54 .join(Classifier, Classifier.id == release_classifiers.c.trove_id)
55 .filter(Release.name == release_classifiers.c.name)
56 .filter(Release.version == release_classifiers.c.version)
57 .correlate(Release)
58 .as_scalar()
59 .label("classifiers")
60 )
61
62 release_data = (
63 db.query(
64 Release.description,
65 Release.name,
66 Release.version.label("latest_version"),
67 all_versions,
68 Release.author,
69 Release.author_email,
70 Release.maintainer,
71 Release.maintainer_email,
72 Release.home_page,
73 Release.summary,
74 Release.keywords,
75 Release.platform,
76 Release.download_url,
77 Release.created,
78 classifiers,
79 Project.normalized_name,
80 Project.name,
81 )
82 .select_from(releases_list)
83 .join(Release, and_(
84 Release.name == releases_list.c.name,
85 Release.version == releases_list.c.version))
86 .outerjoin(Release.project)
87 .order_by(Release.name)
88 )
89
90 for release in windowed_query(release_data, Release.name, 50000):
91 p = ProjectDocType.from_db(release)
92 p.full_clean()
93 yield p.to_dict(include_meta=True)
94
95
96 @tasks.task(ignore_result=True, acks_late=True)
97 def reindex(request):
98 """
99 Recreate the Search Index.
100 """
101 client = request.registry["elasticsearch.client"]
102 number_of_replicas = request.registry.get("elasticsearch.replicas", 0)
103 refresh_interval = request.registry.get("elasticsearch.interval", "1s")
104
105 # We use a randomly named index so that we can do a zero downtime reindex.
106 # Essentially we'll use a randomly named index which we will use until all
107 # of the data has been reindexed, at which point we'll point an alias at
108 # our randomly named index, and then delete the old randomly named index.
109
110 # Create the new index and associate all of our doc types with it.
111 index_base = request.registry["elasticsearch.index"]
112 random_token = binascii.hexlify(os.urandom(5)).decode("ascii")
113 new_index_name = "{}-{}".format(index_base, random_token)
114 doc_types = request.registry.get("search.doc_types", set())
115 shards = request.registry.get("elasticsearch.shards", 1)
116
117 # Create the new index with zero replicas and index refreshes disabled
118 # while we are bulk indexing.
119 new_index = get_index(
120 new_index_name,
121 doc_types,
122 using=client,
123 shards=shards,
124 replicas=0,
125 interval="-1",
126 )
127 new_index.create(wait_for_active_shards=shards)
128
129 # From this point on, if any error occurs, we want to be able to delete our
130 # in progress index.
131 try:
132 request.db.execute("SET statement_timeout = '600s'")
133
134 for _ in parallel_bulk(client, _project_docs(request.db)):
135 pass
136 except: # noqa
137 new_index.delete()
138 raise
139 finally:
140 request.db.rollback()
141 request.db.close()
142
143 # Now that we've finished indexing all of our data we can optimize it and
144 # update the replicas and refresh intervals.
145 client.indices.forcemerge(index=new_index_name)
146 client.indices.put_settings(
147 index=new_index_name,
148 body={
149 "index": {
150 "number_of_replicas": number_of_replicas,
151 "refresh_interval": refresh_interval,
152 }
153 }
154 )
155
156 # Point the alias at our new randomly named index and delete the old index.
157 if client.indices.exists_alias(name=index_base):
158 to_delete = set()
159 actions = []
160 for name in client.indices.get_alias(name=index_base):
161 to_delete.add(name)
162 actions.append({"remove": {"index": name, "alias": index_base}})
163 actions.append({"add": {"index": new_index_name, "alias": index_base}})
164 client.indices.update_aliases({"actions": actions})
165 client.indices.delete(",".join(to_delete))
166 else:
167 client.indices.put_alias(name=index_base, index=new_index_name)
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/search/tasks.py b/warehouse/search/tasks.py
--- a/warehouse/search/tasks.py
+++ b/warehouse/search/tasks.py
@@ -11,11 +11,15 @@
# limitations under the License.
import binascii
+import urllib
import os
from elasticsearch.helpers import parallel_bulk
+from elasticsearch_dsl import serializer
from sqlalchemy import and_, func
from sqlalchemy.orm import aliased
+import certifi
+import elasticsearch
from warehouse.packaging.models import (
Classifier, Project, Release, release_classifiers)
@@ -98,7 +102,15 @@
"""
Recreate the Search Index.
"""
- client = request.registry["elasticsearch.client"]
+ p = urllib.parse.urlparse(request.registry.settings["elasticsearch.url"])
+ client = elasticsearch.Elasticsearch(
+ [urllib.parse.urlunparse(p[:2] + ("",) * 4)],
+ verify_certs=True,
+ ca_certs=certifi.where(),
+ timeout=30,
+ retry_on_timeout=True,
+ serializer=serializer.serializer,
+ )
number_of_replicas = request.registry.get("elasticsearch.replicas", 0)
refresh_interval = request.registry.get("elasticsearch.interval", "1s")
|
{"golden_diff": "diff --git a/warehouse/search/tasks.py b/warehouse/search/tasks.py\n--- a/warehouse/search/tasks.py\n+++ b/warehouse/search/tasks.py\n@@ -11,11 +11,15 @@\n # limitations under the License.\n \n import binascii\n+import urllib\n import os\n \n from elasticsearch.helpers import parallel_bulk\n+from elasticsearch_dsl import serializer\n from sqlalchemy import and_, func\n from sqlalchemy.orm import aliased\n+import certifi\n+import elasticsearch\n \n from warehouse.packaging.models import (\n Classifier, Project, Release, release_classifiers)\n@@ -98,7 +102,15 @@\n \"\"\"\n Recreate the Search Index.\n \"\"\"\n- client = request.registry[\"elasticsearch.client\"]\n+ p = urllib.parse.urlparse(request.registry.settings[\"elasticsearch.url\"])\n+ client = elasticsearch.Elasticsearch(\n+ [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n+ verify_certs=True,\n+ ca_certs=certifi.where(),\n+ timeout=30,\n+ retry_on_timeout=True,\n+ serializer=serializer.serializer,\n+ )\n number_of_replicas = request.registry.get(\"elasticsearch.replicas\", 0)\n refresh_interval = request.registry.get(\"elasticsearch.interval\", \"1s\")\n", "issue": "Tune Elasticsearch client for reindex separately from main search client\n#3892 was necessary to alleviate the hard outage experienced due to ES cluster being down.\r\n\r\nWe were waiting 30s per request for Elasticsearch then retrying. This was added in #1471 to handle reindex issues.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport os\n\nfrom elasticsearch.helpers import parallel_bulk\nfrom sqlalchemy import and_, func\nfrom sqlalchemy.orm import aliased\n\nfrom warehouse.packaging.models import (\n Classifier, Project, Release, release_classifiers)\nfrom warehouse.packaging.search import Project as ProjectDocType\nfrom warehouse.search.utils import get_index\nfrom warehouse import tasks\nfrom warehouse.utils.db import windowed_query\n\n\ndef _project_docs(db):\n\n releases_list = (\n db.query(Release.name, Release.version)\n .order_by(\n Release.name,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .distinct(Release.name)\n .subquery(\"release_list\")\n )\n\n r = aliased(Release, name=\"r\")\n\n all_versions = (\n db.query(func.array_agg(r.version))\n .filter(r.name == Release.name)\n .correlate(Release)\n .as_scalar()\n .label(\"all_versions\")\n )\n\n classifiers = (\n db.query(func.array_agg(Classifier.classifier))\n .select_from(release_classifiers)\n .join(Classifier, Classifier.id == release_classifiers.c.trove_id)\n .filter(Release.name == release_classifiers.c.name)\n .filter(Release.version == release_classifiers.c.version)\n .correlate(Release)\n .as_scalar()\n .label(\"classifiers\")\n )\n\n release_data = (\n db.query(\n Release.description,\n Release.name,\n Release.version.label(\"latest_version\"),\n all_versions,\n Release.author,\n Release.author_email,\n Release.maintainer,\n Release.maintainer_email,\n Release.home_page,\n Release.summary,\n Release.keywords,\n Release.platform,\n Release.download_url,\n Release.created,\n classifiers,\n Project.normalized_name,\n Project.name,\n )\n .select_from(releases_list)\n .join(Release, and_(\n Release.name == releases_list.c.name,\n Release.version == releases_list.c.version))\n .outerjoin(Release.project)\n .order_by(Release.name)\n )\n\n for release in windowed_query(release_data, Release.name, 50000):\n p = ProjectDocType.from_db(release)\n p.full_clean()\n yield p.to_dict(include_meta=True)\n\n\[email protected](ignore_result=True, acks_late=True)\ndef reindex(request):\n \"\"\"\n Recreate the Search Index.\n \"\"\"\n client = request.registry[\"elasticsearch.client\"]\n number_of_replicas = request.registry.get(\"elasticsearch.replicas\", 0)\n refresh_interval = request.registry.get(\"elasticsearch.interval\", \"1s\")\n\n # We use a randomly named index so that we can do a zero downtime reindex.\n # Essentially we'll use a randomly named index which we will use until all\n # of the data has been reindexed, at which point we'll point an alias at\n # our randomly named index, and then delete the old randomly named index.\n\n # Create the new index and associate all of our doc types with it.\n index_base = request.registry[\"elasticsearch.index\"]\n random_token = binascii.hexlify(os.urandom(5)).decode(\"ascii\")\n new_index_name = \"{}-{}\".format(index_base, random_token)\n doc_types = request.registry.get(\"search.doc_types\", set())\n shards = request.registry.get(\"elasticsearch.shards\", 1)\n\n # Create the new index with zero replicas and index refreshes disabled\n # while we are bulk indexing.\n new_index = get_index(\n new_index_name,\n doc_types,\n using=client,\n shards=shards,\n replicas=0,\n interval=\"-1\",\n )\n new_index.create(wait_for_active_shards=shards)\n\n # From this point on, if any error occurs, we want to be able to delete our\n # in progress index.\n try:\n request.db.execute(\"SET statement_timeout = '600s'\")\n\n for _ in parallel_bulk(client, _project_docs(request.db)):\n pass\n except: # noqa\n new_index.delete()\n raise\n finally:\n request.db.rollback()\n request.db.close()\n\n # Now that we've finished indexing all of our data we can optimize it and\n # update the replicas and refresh intervals.\n client.indices.forcemerge(index=new_index_name)\n client.indices.put_settings(\n index=new_index_name,\n body={\n \"index\": {\n \"number_of_replicas\": number_of_replicas,\n \"refresh_interval\": refresh_interval,\n }\n }\n )\n\n # Point the alias at our new randomly named index and delete the old index.\n if client.indices.exists_alias(name=index_base):\n to_delete = set()\n actions = []\n for name in client.indices.get_alias(name=index_base):\n to_delete.add(name)\n actions.append({\"remove\": {\"index\": name, \"alias\": index_base}})\n actions.append({\"add\": {\"index\": new_index_name, \"alias\": index_base}})\n client.indices.update_aliases({\"actions\": actions})\n client.indices.delete(\",\".join(to_delete))\n else:\n client.indices.put_alias(name=index_base, index=new_index_name)\n", "path": "warehouse/search/tasks.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport urllib\nimport os\n\nfrom elasticsearch.helpers import parallel_bulk\nfrom elasticsearch_dsl import serializer\nfrom sqlalchemy import and_, func\nfrom sqlalchemy.orm import aliased\nimport certifi\nimport elasticsearch\n\nfrom warehouse.packaging.models import (\n Classifier, Project, Release, release_classifiers)\nfrom warehouse.packaging.search import Project as ProjectDocType\nfrom warehouse.search.utils import get_index\nfrom warehouse import tasks\nfrom warehouse.utils.db import windowed_query\n\n\ndef _project_docs(db):\n\n releases_list = (\n db.query(Release.name, Release.version)\n .order_by(\n Release.name,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .distinct(Release.name)\n .subquery(\"release_list\")\n )\n\n r = aliased(Release, name=\"r\")\n\n all_versions = (\n db.query(func.array_agg(r.version))\n .filter(r.name == Release.name)\n .correlate(Release)\n .as_scalar()\n .label(\"all_versions\")\n )\n\n classifiers = (\n db.query(func.array_agg(Classifier.classifier))\n .select_from(release_classifiers)\n .join(Classifier, Classifier.id == release_classifiers.c.trove_id)\n .filter(Release.name == release_classifiers.c.name)\n .filter(Release.version == release_classifiers.c.version)\n .correlate(Release)\n .as_scalar()\n .label(\"classifiers\")\n )\n\n release_data = (\n db.query(\n Release.description,\n Release.name,\n Release.version.label(\"latest_version\"),\n all_versions,\n Release.author,\n Release.author_email,\n Release.maintainer,\n Release.maintainer_email,\n Release.home_page,\n Release.summary,\n Release.keywords,\n Release.platform,\n Release.download_url,\n Release.created,\n classifiers,\n Project.normalized_name,\n Project.name,\n )\n .select_from(releases_list)\n .join(Release, and_(\n Release.name == releases_list.c.name,\n Release.version == releases_list.c.version))\n .outerjoin(Release.project)\n .order_by(Release.name)\n )\n\n for release in windowed_query(release_data, Release.name, 50000):\n p = ProjectDocType.from_db(release)\n p.full_clean()\n yield p.to_dict(include_meta=True)\n\n\[email protected](ignore_result=True, acks_late=True)\ndef reindex(request):\n \"\"\"\n Recreate the Search Index.\n \"\"\"\n p = urllib.parse.urlparse(request.registry.settings[\"elasticsearch.url\"])\n client = elasticsearch.Elasticsearch(\n [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n verify_certs=True,\n ca_certs=certifi.where(),\n timeout=30,\n retry_on_timeout=True,\n serializer=serializer.serializer,\n )\n number_of_replicas = request.registry.get(\"elasticsearch.replicas\", 0)\n refresh_interval = request.registry.get(\"elasticsearch.interval\", \"1s\")\n\n # We use a randomly named index so that we can do a zero downtime reindex.\n # Essentially we'll use a randomly named index which we will use until all\n # of the data has been reindexed, at which point we'll point an alias at\n # our randomly named index, and then delete the old randomly named index.\n\n # Create the new index and associate all of our doc types with it.\n index_base = request.registry[\"elasticsearch.index\"]\n random_token = binascii.hexlify(os.urandom(5)).decode(\"ascii\")\n new_index_name = \"{}-{}\".format(index_base, random_token)\n doc_types = request.registry.get(\"search.doc_types\", set())\n shards = request.registry.get(\"elasticsearch.shards\", 1)\n\n # Create the new index with zero replicas and index refreshes disabled\n # while we are bulk indexing.\n new_index = get_index(\n new_index_name,\n doc_types,\n using=client,\n shards=shards,\n replicas=0,\n interval=\"-1\",\n )\n new_index.create(wait_for_active_shards=shards)\n\n # From this point on, if any error occurs, we want to be able to delete our\n # in progress index.\n try:\n request.db.execute(\"SET statement_timeout = '600s'\")\n\n for _ in parallel_bulk(client, _project_docs(request.db)):\n pass\n except: # noqa\n new_index.delete()\n raise\n finally:\n request.db.rollback()\n request.db.close()\n\n # Now that we've finished indexing all of our data we can optimize it and\n # update the replicas and refresh intervals.\n client.indices.forcemerge(index=new_index_name)\n client.indices.put_settings(\n index=new_index_name,\n body={\n \"index\": {\n \"number_of_replicas\": number_of_replicas,\n \"refresh_interval\": refresh_interval,\n }\n }\n )\n\n # Point the alias at our new randomly named index and delete the old index.\n if client.indices.exists_alias(name=index_base):\n to_delete = set()\n actions = []\n for name in client.indices.get_alias(name=index_base):\n to_delete.add(name)\n actions.append({\"remove\": {\"index\": name, \"alias\": index_base}})\n actions.append({\"add\": {\"index\": new_index_name, \"alias\": index_base}})\n client.indices.update_aliases({\"actions\": actions})\n client.indices.delete(\",\".join(to_delete))\n else:\n client.indices.put_alias(name=index_base, index=new_index_name)\n", "path": "warehouse/search/tasks.py"}]}
| 1,951 | 281 |
gh_patches_debug_26736
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-446
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add axis labels to ITER parameters plot
The ITER parameters plot [here](http://docs.plasmapy.org/en/stable/auto_examples/plot_physics.html#sphx-glr-auto-examples-plot-physics-py) could use some labels and units in said labels.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/__init__.py`
Content:
```
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2
3 # Packages may add whatever they like to this file, but
4 # should keep this content at the top.
5 # ----------------------------------------------------------------------------
6 from ._base_init import *
7 # ----------------------------------------------------------------------------
8
9 # Enforce Python version check during package import.
10 # This is the same check as the one at the top of setup.py
11 import sys
12
13 __name__ = "plasmapy"
14
15 __doc__ = ("A community-developed and community-driven open source "
16 "core Python package for plasma physics.")
17
18
19 class UnsupportedPythonError(Exception):
20 pass
21
22
23 if sys.version_info < tuple((int(val) for val in "3.6".split('.'))):
24 raise UnsupportedPythonError("plasmapy does not support Python < {}".format(3.6))
25
26 if not _ASTROPY_SETUP_:
27 # For egg_info test builds to pass, put package imports here.
28 from . import atomic
29 from . import classes
30 from . import constants
31 from . import diagnostics
32 from . import mathematics
33 from . import physics
34 from . import utils
35
36 __citation__ = """@misc{plasmapy_community_2018_1238132,
37 author = {PlasmaPy Community and
38 Murphy, Nicholas A. and
39 Leonard, Andrew J. and
40 Sta\'nczak, Dominik and
41 Kozlowski, Pawel M. and
42 Langendorf, Samuel J. and
43 Haggerty, Colby C. and
44 Beckers, Jasper P. and
45 Mumford, Stuart J. and
46 Parashar, Tulasi N. and
47 Huang, Yi-Min},
48 title = {{PlasmaPy: an open source community-developed
49 Python package for plasma physics}},
50 month = apr,
51 year = 2018,
52 doi = {10.5281/zenodo.1238132},
53 url = {https://doi.org/10.5281/zenodo.1238132}
54 }"""
55
```
Path: `plasmapy/examples/plot_physics.py`
Content:
```
1 """
2 Analysing ITER parameters
3 =========================
4
5 Let's try to look at ITER plasma conditions using the `physics` subpackage.
6 """
7
8 from astropy import units as u
9 from plasmapy import physics
10 import matplotlib.pyplot as plt
11 import numpy as np
12 from mpl_toolkits.mplot3d import Axes3D
13
14 ######################################################
15 # The radius of electric field shielding clouds, also known as the Debye length,
16 # would be
17
18 electron_temperature = 8.8 * u.keV
19 electron_concentration = 10.1e19 / u.m**3
20 print(physics.Debye_length(electron_temperature, electron_concentration))
21
22 ############################################################
23 # Note that we can also neglect the unit for the concentration, as
24 # 1/m^3 is the a standard unit for this kind of Quantity:
25
26 print(physics.Debye_length(electron_temperature, 10.1e19))
27
28 ############################################################
29 # Assuming the magnetic field as 5.3 Teslas (which is the value at the major
30 # radius):
31
32 B = 5.3 * u.T
33
34 print(physics.gyrofrequency(B, particle='e'))
35
36 print(physics.gyroradius(B, T_i=electron_temperature, particle='e'))
37
38 ######################################################################
39 # The electron inertial length would be
40 print(physics.inertial_length(electron_concentration, particle='e'))
41
42 ######################################################################
43 # In these conditions, they should reach thermal velocities of about
44 print(physics.thermal_speed(T=electron_temperature, particle='e'))
45
46 ######################################################################
47 # And the Langmuir wave plasma frequency should be on the order of
48 print(physics.plasma_frequency(electron_concentration))
49
50 ############################################################
51 # Let's try to recreate some plots and get a feel for some of these quantities.
52
53 n_e = np.logspace(4, 30, 100) / u.m**3
54 plt.plot(n_e, physics.plasma_frequency(n_e))
55 plt.scatter(
56 electron_concentration,
57 physics.plasma_frequency(electron_concentration))
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py
--- a/plasmapy/__init__.py
+++ b/plasmapy/__init__.py
@@ -33,6 +33,29 @@
from . import physics
from . import utils
+def online_help(query):
+ """
+ Search the online PlasmaPy documentation for the given query from plasmapy.org
+ Opens the results in the default web browser.
+ Requires an active Internet connection.
+ Redirects to Astropy.units in case of query 'unit' or 'units'
+
+ Parameters
+ ----------
+ query : str
+ The search query.
+ """
+ from urllib.parse import urlencode
+ import webbrowser
+
+ url = 'http://docs.plasmapy.org/en/stable/search.html?\
+ {0}&check_keywords=yes&area=default'.format(urlencode({'q': query}))
+
+ if(query.lower() in ('unit', 'units')):
+ url = 'http://docs.astropy.org/en/stable/units/'
+
+ webbrowser.open(url)
+
__citation__ = """@misc{plasmapy_community_2018_1238132,
author = {PlasmaPy Community and
Murphy, Nicholas A. and
@@ -51,4 +74,4 @@
year = 2018,
doi = {10.5281/zenodo.1238132},
url = {https://doi.org/10.5281/zenodo.1238132}
-}"""
+}"""
\ No newline at end of file
diff --git a/plasmapy/examples/plot_physics.py b/plasmapy/examples/plot_physics.py
--- a/plasmapy/examples/plot_physics.py
+++ b/plasmapy/examples/plot_physics.py
@@ -55,3 +55,6 @@
plt.scatter(
electron_concentration,
physics.plasma_frequency(electron_concentration))
+plt.xlabel("Electron Concentration (m^-3)")
+plt.ylabel("Langmuir Wave Plasma Frequency (rad/s)")
+plt.show()
|
{"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -33,6 +33,29 @@\n from . import physics\n from . import utils\n \n+def online_help(query):\n+ \"\"\"\n+ Search the online PlasmaPy documentation for the given query from plasmapy.org\n+ Opens the results in the default web browser.\n+ Requires an active Internet connection.\n+ Redirects to Astropy.units in case of query 'unit' or 'units'\n+\n+ Parameters\n+ ----------\n+ query : str\n+ The search query.\n+ \"\"\"\n+ from urllib.parse import urlencode\n+ import webbrowser\n+\n+ url = 'http://docs.plasmapy.org/en/stable/search.html?\\\n+ {0}&check_keywords=yes&area=default'.format(urlencode({'q': query}))\n+\n+ if(query.lower() in ('unit', 'units')):\n+ url = 'http://docs.astropy.org/en/stable/units/'\n+\n+ webbrowser.open(url)\n+\n __citation__ = \"\"\"@misc{plasmapy_community_2018_1238132,\n author = {PlasmaPy Community and\n Murphy, Nicholas A. and\n@@ -51,4 +74,4 @@\n year = 2018,\n doi = {10.5281/zenodo.1238132},\n url = {https://doi.org/10.5281/zenodo.1238132}\n-}\"\"\"\n+}\"\"\"\n\\ No newline at end of file\ndiff --git a/plasmapy/examples/plot_physics.py b/plasmapy/examples/plot_physics.py\n--- a/plasmapy/examples/plot_physics.py\n+++ b/plasmapy/examples/plot_physics.py\n@@ -55,3 +55,6 @@\n plt.scatter(\n electron_concentration,\n physics.plasma_frequency(electron_concentration))\n+plt.xlabel(\"Electron Concentration (m^-3)\")\n+plt.ylabel(\"Langmuir Wave Plasma Frequency (rad/s)\")\n+plt.show()\n", "issue": "add axis labels to ITER parameters plot\nThe ITER parameters plot [here](http://docs.plasmapy.org/en/stable/auto_examples/plot_physics.html#sphx-glr-auto-examples-plot-physics-py) could use some labels and units in said labels.\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nfrom ._base_init import *\n# ----------------------------------------------------------------------------\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\n__name__ = \"plasmapy\"\n\n__doc__ = (\"A community-developed and community-driven open source \"\n \"core Python package for plasma physics.\")\n\n\nclass UnsupportedPythonError(Exception):\n pass\n\n\nif sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n raise UnsupportedPythonError(\"plasmapy does not support Python < {}\".format(3.6))\n\nif not _ASTROPY_SETUP_:\n # For egg_info test builds to pass, put package imports here.\n from . import atomic\n from . import classes\n from . import constants\n from . import diagnostics\n from . import mathematics\n from . import physics\n from . import utils\n\n__citation__ = \"\"\"@misc{plasmapy_community_2018_1238132,\n author = {PlasmaPy Community and\n Murphy, Nicholas A. and\n Leonard, Andrew J. and\n Sta\\'nczak, Dominik and\n Kozlowski, Pawel M. and\n Langendorf, Samuel J. and\n Haggerty, Colby C. and\n Beckers, Jasper P. and\n Mumford, Stuart J. and\n Parashar, Tulasi N. and\n Huang, Yi-Min},\n title = {{PlasmaPy: an open source community-developed \n Python package for plasma physics}},\n month = apr,\n year = 2018,\n doi = {10.5281/zenodo.1238132},\n url = {https://doi.org/10.5281/zenodo.1238132}\n}\"\"\"\n", "path": "plasmapy/__init__.py"}, {"content": "\"\"\"\nAnalysing ITER parameters\n=========================\n\nLet's try to look at ITER plasma conditions using the `physics` subpackage.\n\"\"\"\n\nfrom astropy import units as u\nfrom plasmapy import physics\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\n######################################################\n# The radius of electric field shielding clouds, also known as the Debye length,\n# would be\n\nelectron_temperature = 8.8 * u.keV\nelectron_concentration = 10.1e19 / u.m**3\nprint(physics.Debye_length(electron_temperature, electron_concentration))\n\n############################################################\n# Note that we can also neglect the unit for the concentration, as\n# 1/m^3 is the a standard unit for this kind of Quantity:\n\nprint(physics.Debye_length(electron_temperature, 10.1e19))\n\n############################################################\n# Assuming the magnetic field as 5.3 Teslas (which is the value at the major\n# radius):\n\nB = 5.3 * u.T\n\nprint(physics.gyrofrequency(B, particle='e'))\n\nprint(physics.gyroradius(B, T_i=electron_temperature, particle='e'))\n\n######################################################################\n# The electron inertial length would be\nprint(physics.inertial_length(electron_concentration, particle='e'))\n\n######################################################################\n# In these conditions, they should reach thermal velocities of about\nprint(physics.thermal_speed(T=electron_temperature, particle='e'))\n\n######################################################################\n# And the Langmuir wave plasma frequency should be on the order of\nprint(physics.plasma_frequency(electron_concentration))\n\n############################################################\n# Let's try to recreate some plots and get a feel for some of these quantities.\n\nn_e = np.logspace(4, 30, 100) / u.m**3\nplt.plot(n_e, physics.plasma_frequency(n_e))\nplt.scatter(\n electron_concentration,\n physics.plasma_frequency(electron_concentration))\n", "path": "plasmapy/examples/plot_physics.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nfrom ._base_init import *\n# ----------------------------------------------------------------------------\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\n__name__ = \"plasmapy\"\n\n__doc__ = (\"A community-developed and community-driven open source \"\n \"core Python package for plasma physics.\")\n\n\nclass UnsupportedPythonError(Exception):\n pass\n\n\nif sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n raise UnsupportedPythonError(\"plasmapy does not support Python < {}\".format(3.6))\n\nif not _ASTROPY_SETUP_:\n # For egg_info test builds to pass, put package imports here.\n from . import atomic\n from . import classes\n from . import constants\n from . import diagnostics\n from . import mathematics\n from . import physics\n from . import utils\n\ndef online_help(query):\n \"\"\"\n Search the online PlasmaPy documentation for the given query from plasmapy.org\n Opens the results in the default web browser.\n Requires an active Internet connection.\n Redirects to Astropy.units in case of query 'unit' or 'units'\n\n Parameters\n ----------\n query : str\n The search query.\n \"\"\"\n from urllib.parse import urlencode\n import webbrowser\n\n url = 'http://docs.plasmapy.org/en/stable/search.html?\\\n {0}&check_keywords=yes&area=default'.format(urlencode({'q': query}))\n\n if(query.lower() in ('unit', 'units')):\n url = 'http://docs.astropy.org/en/stable/units/'\n\n webbrowser.open(url)\n\n__citation__ = \"\"\"@misc{plasmapy_community_2018_1238132,\n author = {PlasmaPy Community and\n Murphy, Nicholas A. and\n Leonard, Andrew J. and\n Sta\\'nczak, Dominik and\n Kozlowski, Pawel M. and\n Langendorf, Samuel J. and\n Haggerty, Colby C. and\n Beckers, Jasper P. and\n Mumford, Stuart J. and\n Parashar, Tulasi N. and\n Huang, Yi-Min},\n title = {{PlasmaPy: an open source community-developed \n Python package for plasma physics}},\n month = apr,\n year = 2018,\n doi = {10.5281/zenodo.1238132},\n url = {https://doi.org/10.5281/zenodo.1238132}\n}\"\"\"", "path": "plasmapy/__init__.py"}, {"content": "\"\"\"\nAnalysing ITER parameters\n=========================\n\nLet's try to look at ITER plasma conditions using the `physics` subpackage.\n\"\"\"\n\nfrom astropy import units as u\nfrom plasmapy import physics\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\n######################################################\n# The radius of electric field shielding clouds, also known as the Debye length,\n# would be\n\nelectron_temperature = 8.8 * u.keV\nelectron_concentration = 10.1e19 / u.m**3\nprint(physics.Debye_length(electron_temperature, electron_concentration))\n\n############################################################\n# Note that we can also neglect the unit for the concentration, as\n# 1/m^3 is the a standard unit for this kind of Quantity:\n\nprint(physics.Debye_length(electron_temperature, 10.1e19))\n\n############################################################\n# Assuming the magnetic field as 5.3 Teslas (which is the value at the major\n# radius):\n\nB = 5.3 * u.T\n\nprint(physics.gyrofrequency(B, particle='e'))\n\nprint(physics.gyroradius(B, T_i=electron_temperature, particle='e'))\n\n######################################################################\n# The electron inertial length would be\nprint(physics.inertial_length(electron_concentration, particle='e'))\n\n######################################################################\n# In these conditions, they should reach thermal velocities of about\nprint(physics.thermal_speed(T=electron_temperature, particle='e'))\n\n######################################################################\n# And the Langmuir wave plasma frequency should be on the order of\nprint(physics.plasma_frequency(electron_concentration))\n\n############################################################\n# Let's try to recreate some plots and get a feel for some of these quantities.\n\nn_e = np.logspace(4, 30, 100) / u.m**3\nplt.plot(n_e, physics.plasma_frequency(n_e))\nplt.scatter(\n electron_concentration,\n physics.plasma_frequency(electron_concentration))\nplt.xlabel(\"Electron Concentration (m^-3)\")\nplt.ylabel(\"Langmuir Wave Plasma Frequency (rad/s)\")\nplt.show()\n", "path": "plasmapy/examples/plot_physics.py"}]}
| 1,450 | 501 |
gh_patches_debug_1817
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-8318
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect docstrings in x25519 and x448 `.public_key()` methods
See:
https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x25519.py#L60-L64
https://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x448.py#L60-L64
In both instances, the method does not return serialised bytes, but a public key object. The full [generated documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/x25519/#cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.public_key) is correct, as are the Ed* docstrings.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/asymmetric/x25519.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 import abc
7
8 from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
9 from cryptography.hazmat.primitives import _serialization
10
11
12 class X25519PublicKey(metaclass=abc.ABCMeta):
13 @classmethod
14 def from_public_bytes(cls, data: bytes) -> "X25519PublicKey":
15 from cryptography.hazmat.backends.openssl.backend import backend
16
17 if not backend.x25519_supported():
18 raise UnsupportedAlgorithm(
19 "X25519 is not supported by this version of OpenSSL.",
20 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,
21 )
22
23 return backend.x25519_load_public_bytes(data)
24
25 @abc.abstractmethod
26 def public_bytes(
27 self,
28 encoding: _serialization.Encoding,
29 format: _serialization.PublicFormat,
30 ) -> bytes:
31 """
32 The serialized bytes of the public key.
33 """
34
35
36 class X25519PrivateKey(metaclass=abc.ABCMeta):
37 @classmethod
38 def generate(cls) -> "X25519PrivateKey":
39 from cryptography.hazmat.backends.openssl.backend import backend
40
41 if not backend.x25519_supported():
42 raise UnsupportedAlgorithm(
43 "X25519 is not supported by this version of OpenSSL.",
44 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,
45 )
46 return backend.x25519_generate_key()
47
48 @classmethod
49 def from_private_bytes(cls, data: bytes) -> "X25519PrivateKey":
50 from cryptography.hazmat.backends.openssl.backend import backend
51
52 if not backend.x25519_supported():
53 raise UnsupportedAlgorithm(
54 "X25519 is not supported by this version of OpenSSL.",
55 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,
56 )
57
58 return backend.x25519_load_private_bytes(data)
59
60 @abc.abstractmethod
61 def public_key(self) -> X25519PublicKey:
62 """
63 The serialized bytes of the public key.
64 """
65
66 @abc.abstractmethod
67 def private_bytes(
68 self,
69 encoding: _serialization.Encoding,
70 format: _serialization.PrivateFormat,
71 encryption_algorithm: _serialization.KeySerializationEncryption,
72 ) -> bytes:
73 """
74 The serialized bytes of the private key.
75 """
76
77 @abc.abstractmethod
78 def exchange(self, peer_public_key: X25519PublicKey) -> bytes:
79 """
80 Performs a key exchange operation using the provided peer's public key.
81 """
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/hazmat/primitives/asymmetric/x25519.py b/src/cryptography/hazmat/primitives/asymmetric/x25519.py
--- a/src/cryptography/hazmat/primitives/asymmetric/x25519.py
+++ b/src/cryptography/hazmat/primitives/asymmetric/x25519.py
@@ -60,7 +60,7 @@
@abc.abstractmethod
def public_key(self) -> X25519PublicKey:
"""
- The serialized bytes of the public key.
+ Returns the public key assosciated with this private key
"""
@abc.abstractmethod
|
{"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/asymmetric/x25519.py b/src/cryptography/hazmat/primitives/asymmetric/x25519.py\n--- a/src/cryptography/hazmat/primitives/asymmetric/x25519.py\n+++ b/src/cryptography/hazmat/primitives/asymmetric/x25519.py\n@@ -60,7 +60,7 @@\n @abc.abstractmethod\n def public_key(self) -> X25519PublicKey:\n \"\"\"\n- The serialized bytes of the public key.\n+ Returns the public key assosciated with this private key\n \"\"\"\n \n @abc.abstractmethod\n", "issue": "Incorrect docstrings in x25519 and x448 `.public_key()` methods\nSee:\r\n\r\nhttps://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x25519.py#L60-L64\r\n\r\nhttps://github.com/pyca/cryptography/blob/127a2860740c77f45362e68e0ed7d2d108a39033/src/cryptography/hazmat/primitives/asymmetric/x448.py#L60-L64\r\n\r\nIn both instances, the method does not return serialised bytes, but a public key object. The full [generated documentation](https://cryptography.io/en/latest/hazmat/primitives/asymmetric/x25519/#cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.public_key) is correct, as are the Ed* docstrings.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\n\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.primitives import _serialization\n\n\nclass X25519PublicKey(metaclass=abc.ABCMeta):\n @classmethod\n def from_public_bytes(cls, data: bytes) -> \"X25519PublicKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_public_bytes(data)\n\n @abc.abstractmethod\n def public_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PublicFormat,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n\nclass X25519PrivateKey(metaclass=abc.ABCMeta):\n @classmethod\n def generate(cls) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n return backend.x25519_generate_key()\n\n @classmethod\n def from_private_bytes(cls, data: bytes) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_private_bytes(data)\n\n @abc.abstractmethod\n def public_key(self) -> X25519PublicKey:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PrivateFormat,\n encryption_algorithm: _serialization.KeySerializationEncryption,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the private key.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, peer_public_key: X25519PublicKey) -> bytes:\n \"\"\"\n Performs a key exchange operation using the provided peer's public key.\n \"\"\"\n", "path": "src/cryptography/hazmat/primitives/asymmetric/x25519.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\n\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.primitives import _serialization\n\n\nclass X25519PublicKey(metaclass=abc.ABCMeta):\n @classmethod\n def from_public_bytes(cls, data: bytes) -> \"X25519PublicKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_public_bytes(data)\n\n @abc.abstractmethod\n def public_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PublicFormat,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the public key.\n \"\"\"\n\n\nclass X25519PrivateKey(metaclass=abc.ABCMeta):\n @classmethod\n def generate(cls) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n return backend.x25519_generate_key()\n\n @classmethod\n def from_private_bytes(cls, data: bytes) -> \"X25519PrivateKey\":\n from cryptography.hazmat.backends.openssl.backend import backend\n\n if not backend.x25519_supported():\n raise UnsupportedAlgorithm(\n \"X25519 is not supported by this version of OpenSSL.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM,\n )\n\n return backend.x25519_load_private_bytes(data)\n\n @abc.abstractmethod\n def public_key(self) -> X25519PublicKey:\n \"\"\"\n Returns the public key assosciated with this private key\n \"\"\"\n\n @abc.abstractmethod\n def private_bytes(\n self,\n encoding: _serialization.Encoding,\n format: _serialization.PrivateFormat,\n encryption_algorithm: _serialization.KeySerializationEncryption,\n ) -> bytes:\n \"\"\"\n The serialized bytes of the private key.\n \"\"\"\n\n @abc.abstractmethod\n def exchange(self, peer_public_key: X25519PublicKey) -> bytes:\n \"\"\"\n Performs a key exchange operation using the provided peer's public key.\n \"\"\"\n", "path": "src/cryptography/hazmat/primitives/asymmetric/x25519.py"}]}
| 1,270 | 148 |
gh_patches_debug_18999
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1270
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update regex pattern for the envvar construtor
You can use envvars on your configuration, but they need to follow this pattern `$ENVVARNAME` the regex pattern should be updated to allow users to use either `$ENVVARNAME` or `${ENVVARNAME}`.
While we are at it we should allow users to use other characters as well like `_` or `-`.
This change needs to be done on [opsdroid.loader.load_config_file](https://github.com/opsdroid/opsdroid/blob/674013037eab826640174407a73f8fed1a29b290/opsdroid/loader.py#L347)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/const.py`
Content:
```
1 """Constants used by OpsDroid."""
2 import os
3 from appdirs import user_log_dir, user_config_dir, user_data_dir
4 import opsdroid
5 from opsdroid import __version__ # noqa # pylint: disable=unused-import
6
7 NAME = "opsdroid"
8 MODULE_ROOT = os.path.dirname(os.path.abspath(opsdroid.__file__))
9 DEFAULT_GIT_URL = "https://github.com/opsdroid/"
10 MODULES_DIRECTORY = "opsdroid-modules"
11 DEFAULT_ROOT_PATH = user_data_dir(NAME)
12 DEFAULT_LOG_FILENAME = os.path.join(user_log_dir(NAME, appauthor=False), "output.log")
13 DEFAULT_MODULES_PATH = user_data_dir(NAME, MODULES_DIRECTORY)
14 DEFAULT_MODULE_DEPS_PATH = os.path.join(
15 user_data_dir(NAME, MODULES_DIRECTORY), "site-packages"
16 )
17 DEFAULT_CONFIG_PATH = os.path.join(
18 user_config_dir(NAME, appauthor=False), "configuration.yaml"
19 )
20 DEFAULT_MODULE_BRANCH = "master"
21 DEFAULT_LANGUAGE = "en"
22 LOCALE_DIR = os.path.join(MODULE_ROOT, "locale")
23 EXAMPLE_CONFIG_FILE = os.path.join(
24 os.path.dirname(os.path.abspath(__file__)),
25 "configuration/example_configuration.yaml",
26 )
27 REGEX_PARSE_SCORE_FACTOR = 0.6
28
29 RASANLU_DEFAULT_URL = "http://localhost:5000"
30 RASANLU_DEFAULT_PROJECT = "opsdroid"
31
32 LUISAI_DEFAULT_URL = "https://westus.api.cognitive.microsoft.com/luis/v2.0/apps/"
33
34 DIALOGFLOW_API_ENDPOINT = "https://api.dialogflow.com/v1/query"
35 DIALOGFLOW_API_VERSION = "20150910"
36
37 WITAI_DEFAULT_VERSION = "20170307"
38 WITAI_API_ENDPOINT = "https://api.wit.ai/message?"
39
40 SAPCAI_API_ENDPOINT = "https://api.cai.tools.sap/v2/request"
41
42 WATSON_API_ENDPOINT = "https://{gateway}.watsonplatform.net/assistant/api"
43 WATSON_API_VERSION = "2019-02-28"
44
```
Path: `opsdroid/configuration/__init__.py`
Content:
```
1 """Load configuration from yaml file."""
2
3 import os
4 import shutil
5 import sys
6 import re
7 import logging
8 import yaml
9
10 from opsdroid.const import DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE
11 from opsdroid.configuration.validation import validate_configuration, BASE_SCHEMA
12 from opsdroid.helper import update_pre_0_17_config_format
13
14
15 _LOGGER = logging.getLogger(__name__)
16
17
18 def create_default_config(config_path):
19 """Create a default config file based on the example config file.
20
21 If we can't find any configuration.yaml, we will pull the whole
22 example_configuration.yaml and use this file as the configuration.
23
24 Args:
25 config_path: String containing the path to configuration.yaml
26 default install location
27
28 Returns:
29 str: path to configuration.yaml default install location
30
31 """
32 _LOGGER.info("Creating %s.", config_path)
33 config_dir, _ = os.path.split(config_path)
34 if not os.path.isdir(config_dir):
35 os.makedirs(config_dir)
36 shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)
37 return config_path
38
39
40 def get_config_path(config_paths):
41 """Get the path to configuration.yaml.
42
43 Opsdroid configuration.yaml can be located in different paths.
44 With this function, we will go through all of the possible paths and
45 return the correct path.
46
47 If we don't have any configuration.yaml we will just create one using
48 the example configuration file.
49
50 Args:
51 config_paths: List containing all the possible config paths.
52
53 Returns:
54 str: Path to the configuration file.
55
56 """
57 config_path = ""
58 for possible_path in config_paths:
59 if not os.path.isfile(possible_path):
60 _LOGGER.debug(_("Config file %s not found."), possible_path)
61 else:
62 config_path = possible_path
63 break
64
65 if not config_path:
66 _LOGGER.info(
67 _("No configuration files found. Creating %s"), DEFAULT_CONFIG_PATH
68 )
69 config_path = create_default_config(DEFAULT_CONFIG_PATH)
70
71 return config_path
72
73
74 env_var_pattern = re.compile(r"^\$([A-Z_]*)$")
75
76
77 def envvar_constructor(loader, node):
78 """Yaml parser for env vars."""
79 value = loader.construct_scalar(node)
80 [env_var] = env_var_pattern.match(value).groups()
81 return os.environ[env_var]
82
83
84 def load_config_file(config_paths):
85 """Load a yaml config file from path.
86
87 We get a path for the configuration file and then use the yaml
88 library to load this file - the configuration will be shown as a
89 dict. Here we also add constructors to our yaml loader and handle
90 different exceptions that could be raised when trying to load or
91 validate the file.
92
93 Args:
94 config_paths: List of paths to configuration.yaml files
95
96 Returns:
97 dict: Dict containing config fields
98
99 """
100
101 config_path = get_config_path(config_paths)
102
103 yaml.SafeLoader.add_implicit_resolver("!envvar", env_var_pattern, first="$")
104 yaml.SafeLoader.add_constructor("!envvar", envvar_constructor)
105
106 try:
107 with open(config_path, "r") as stream:
108 _LOGGER.info(_("Loaded config from %s."), config_path)
109
110 data = yaml.load(stream, Loader=yaml.SafeLoader)
111 configuration = update_pre_0_17_config_format(data)
112 validate_configuration(configuration, BASE_SCHEMA)
113
114 return configuration
115
116 except yaml.YAMLError as error:
117 _LOGGER.critical(error)
118 sys.exit(1)
119
120 except FileNotFoundError as error:
121 _LOGGER.critical(error)
122 sys.exit(1)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/configuration/__init__.py b/opsdroid/configuration/__init__.py
--- a/opsdroid/configuration/__init__.py
+++ b/opsdroid/configuration/__init__.py
@@ -7,7 +7,7 @@
import logging
import yaml
-from opsdroid.const import DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE
+from opsdroid.const import DEFAULT_CONFIG_PATH, ENV_VAR_REGEX, EXAMPLE_CONFIG_FILE
from opsdroid.configuration.validation import validate_configuration, BASE_SCHEMA
from opsdroid.helper import update_pre_0_17_config_format
@@ -71,7 +71,7 @@
return config_path
-env_var_pattern = re.compile(r"^\$([A-Z_]*)$")
+env_var_pattern = re.compile(ENV_VAR_REGEX)
def envvar_constructor(loader, node):
diff --git a/opsdroid/const.py b/opsdroid/const.py
--- a/opsdroid/const.py
+++ b/opsdroid/const.py
@@ -41,3 +41,4 @@
WATSON_API_ENDPOINT = "https://{gateway}.watsonplatform.net/assistant/api"
WATSON_API_VERSION = "2019-02-28"
+ENV_VAR_REGEX = r"^\"?\${?(?=\_?[A-Z])([A-Z-_]+)}?\"?$"
|
{"golden_diff": "diff --git a/opsdroid/configuration/__init__.py b/opsdroid/configuration/__init__.py\n--- a/opsdroid/configuration/__init__.py\n+++ b/opsdroid/configuration/__init__.py\n@@ -7,7 +7,7 @@\n import logging\n import yaml\n \n-from opsdroid.const import DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE\n+from opsdroid.const import DEFAULT_CONFIG_PATH, ENV_VAR_REGEX, EXAMPLE_CONFIG_FILE\n from opsdroid.configuration.validation import validate_configuration, BASE_SCHEMA\n from opsdroid.helper import update_pre_0_17_config_format\n \n@@ -71,7 +71,7 @@\n return config_path\n \n \n-env_var_pattern = re.compile(r\"^\\$([A-Z_]*)$\")\n+env_var_pattern = re.compile(ENV_VAR_REGEX)\n \n \n def envvar_constructor(loader, node):\ndiff --git a/opsdroid/const.py b/opsdroid/const.py\n--- a/opsdroid/const.py\n+++ b/opsdroid/const.py\n@@ -41,3 +41,4 @@\n \n WATSON_API_ENDPOINT = \"https://{gateway}.watsonplatform.net/assistant/api\"\n WATSON_API_VERSION = \"2019-02-28\"\n+ENV_VAR_REGEX = r\"^\\\"?\\${?(?=\\_?[A-Z])([A-Z-_]+)}?\\\"?$\"\n", "issue": "Update regex pattern for the envvar construtor\nYou can use envvars on your configuration, but they need to follow this pattern `$ENVVARNAME` the regex pattern should be updated to allow users to use either `$ENVVARNAME` or `${ENVVARNAME}`. \r\n\r\nWhile we are at it we should allow users to use other characters as well like `_` or `-`. \r\n\r\nThis change needs to be done on [opsdroid.loader.load_config_file](https://github.com/opsdroid/opsdroid/blob/674013037eab826640174407a73f8fed1a29b290/opsdroid/loader.py#L347)\n", "before_files": [{"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\nfrom appdirs import user_log_dir, user_config_dir, user_data_dir\nimport opsdroid\nfrom opsdroid import __version__ # noqa # pylint: disable=unused-import\n\nNAME = \"opsdroid\"\nMODULE_ROOT = os.path.dirname(os.path.abspath(opsdroid.__file__))\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = user_data_dir(NAME)\nDEFAULT_LOG_FILENAME = os.path.join(user_log_dir(NAME, appauthor=False), \"output.log\")\nDEFAULT_MODULES_PATH = user_data_dir(NAME, MODULES_DIRECTORY)\nDEFAULT_MODULE_DEPS_PATH = os.path.join(\n user_data_dir(NAME, MODULES_DIRECTORY), \"site-packages\"\n)\nDEFAULT_CONFIG_PATH = os.path.join(\n user_config_dir(NAME, appauthor=False), \"configuration.yaml\"\n)\nDEFAULT_MODULE_BRANCH = \"master\"\nDEFAULT_LANGUAGE = \"en\"\nLOCALE_DIR = os.path.join(MODULE_ROOT, \"locale\")\nEXAMPLE_CONFIG_FILE = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\",\n)\nREGEX_PARSE_SCORE_FACTOR = 0.6\n\nRASANLU_DEFAULT_URL = \"http://localhost:5000\"\nRASANLU_DEFAULT_PROJECT = \"opsdroid\"\n\nLUISAI_DEFAULT_URL = \"https://westus.api.cognitive.microsoft.com/luis/v2.0/apps/\"\n\nDIALOGFLOW_API_ENDPOINT = \"https://api.dialogflow.com/v1/query\"\nDIALOGFLOW_API_VERSION = \"20150910\"\n\nWITAI_DEFAULT_VERSION = \"20170307\"\nWITAI_API_ENDPOINT = \"https://api.wit.ai/message?\"\n\nSAPCAI_API_ENDPOINT = \"https://api.cai.tools.sap/v2/request\"\n\nWATSON_API_ENDPOINT = \"https://{gateway}.watsonplatform.net/assistant/api\"\nWATSON_API_VERSION = \"2019-02-28\"\n", "path": "opsdroid/const.py"}, {"content": "\"\"\"Load configuration from yaml file.\"\"\"\n\nimport os\nimport shutil\nimport sys\nimport re\nimport logging\nimport yaml\n\nfrom opsdroid.const import DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE\nfrom opsdroid.configuration.validation import validate_configuration, BASE_SCHEMA\nfrom opsdroid.helper import update_pre_0_17_config_format\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef create_default_config(config_path):\n \"\"\"Create a default config file based on the example config file.\n\n If we can't find any configuration.yaml, we will pull the whole\n example_configuration.yaml and use this file as the configuration.\n\n Args:\n config_path: String containing the path to configuration.yaml\n default install location\n\n Returns:\n str: path to configuration.yaml default install location\n\n \"\"\"\n _LOGGER.info(\"Creating %s.\", config_path)\n config_dir, _ = os.path.split(config_path)\n if not os.path.isdir(config_dir):\n os.makedirs(config_dir)\n shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)\n return config_path\n\n\ndef get_config_path(config_paths):\n \"\"\"Get the path to configuration.yaml.\n\n Opsdroid configuration.yaml can be located in different paths.\n With this function, we will go through all of the possible paths and\n return the correct path.\n\n If we don't have any configuration.yaml we will just create one using\n the example configuration file.\n\n Args:\n config_paths: List containing all the possible config paths.\n\n Returns:\n str: Path to the configuration file.\n\n \"\"\"\n config_path = \"\"\n for possible_path in config_paths:\n if not os.path.isfile(possible_path):\n _LOGGER.debug(_(\"Config file %s not found.\"), possible_path)\n else:\n config_path = possible_path\n break\n\n if not config_path:\n _LOGGER.info(\n _(\"No configuration files found. Creating %s\"), DEFAULT_CONFIG_PATH\n )\n config_path = create_default_config(DEFAULT_CONFIG_PATH)\n\n return config_path\n\n\nenv_var_pattern = re.compile(r\"^\\$([A-Z_]*)$\")\n\n\ndef envvar_constructor(loader, node):\n \"\"\"Yaml parser for env vars.\"\"\"\n value = loader.construct_scalar(node)\n [env_var] = env_var_pattern.match(value).groups()\n return os.environ[env_var]\n\n\ndef load_config_file(config_paths):\n \"\"\"Load a yaml config file from path.\n\n We get a path for the configuration file and then use the yaml\n library to load this file - the configuration will be shown as a\n dict. Here we also add constructors to our yaml loader and handle\n different exceptions that could be raised when trying to load or\n validate the file.\n\n Args:\n config_paths: List of paths to configuration.yaml files\n\n Returns:\n dict: Dict containing config fields\n\n \"\"\"\n\n config_path = get_config_path(config_paths)\n\n yaml.SafeLoader.add_implicit_resolver(\"!envvar\", env_var_pattern, first=\"$\")\n yaml.SafeLoader.add_constructor(\"!envvar\", envvar_constructor)\n\n try:\n with open(config_path, \"r\") as stream:\n _LOGGER.info(_(\"Loaded config from %s.\"), config_path)\n\n data = yaml.load(stream, Loader=yaml.SafeLoader)\n configuration = update_pre_0_17_config_format(data)\n validate_configuration(configuration, BASE_SCHEMA)\n\n return configuration\n\n except yaml.YAMLError as error:\n _LOGGER.critical(error)\n sys.exit(1)\n\n except FileNotFoundError as error:\n _LOGGER.critical(error)\n sys.exit(1)\n", "path": "opsdroid/configuration/__init__.py"}], "after_files": [{"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\nfrom appdirs import user_log_dir, user_config_dir, user_data_dir\nimport opsdroid\nfrom opsdroid import __version__ # noqa # pylint: disable=unused-import\n\nNAME = \"opsdroid\"\nMODULE_ROOT = os.path.dirname(os.path.abspath(opsdroid.__file__))\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = user_data_dir(NAME)\nDEFAULT_LOG_FILENAME = os.path.join(user_log_dir(NAME, appauthor=False), \"output.log\")\nDEFAULT_MODULES_PATH = user_data_dir(NAME, MODULES_DIRECTORY)\nDEFAULT_MODULE_DEPS_PATH = os.path.join(\n user_data_dir(NAME, MODULES_DIRECTORY), \"site-packages\"\n)\nDEFAULT_CONFIG_PATH = os.path.join(\n user_config_dir(NAME, appauthor=False), \"configuration.yaml\"\n)\nDEFAULT_MODULE_BRANCH = \"master\"\nDEFAULT_LANGUAGE = \"en\"\nLOCALE_DIR = os.path.join(MODULE_ROOT, \"locale\")\nEXAMPLE_CONFIG_FILE = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\",\n)\nREGEX_PARSE_SCORE_FACTOR = 0.6\n\nRASANLU_DEFAULT_URL = \"http://localhost:5000\"\nRASANLU_DEFAULT_PROJECT = \"opsdroid\"\n\nLUISAI_DEFAULT_URL = \"https://westus.api.cognitive.microsoft.com/luis/v2.0/apps/\"\n\nDIALOGFLOW_API_ENDPOINT = \"https://api.dialogflow.com/v1/query\"\nDIALOGFLOW_API_VERSION = \"20150910\"\n\nWITAI_DEFAULT_VERSION = \"20170307\"\nWITAI_API_ENDPOINT = \"https://api.wit.ai/message?\"\n\nSAPCAI_API_ENDPOINT = \"https://api.cai.tools.sap/v2/request\"\n\nWATSON_API_ENDPOINT = \"https://{gateway}.watsonplatform.net/assistant/api\"\nWATSON_API_VERSION = \"2019-02-28\"\nENV_VAR_REGEX = r\"^\\\"?\\${?(?=\\_?[A-Z])([A-Z-_]+)}?\\\"?$\"\n", "path": "opsdroid/const.py"}, {"content": "\"\"\"Load configuration from yaml file.\"\"\"\n\nimport os\nimport shutil\nimport sys\nimport re\nimport logging\nimport yaml\n\nfrom opsdroid.const import DEFAULT_CONFIG_PATH, ENV_VAR_REGEX, EXAMPLE_CONFIG_FILE\nfrom opsdroid.configuration.validation import validate_configuration, BASE_SCHEMA\nfrom opsdroid.helper import update_pre_0_17_config_format\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef create_default_config(config_path):\n \"\"\"Create a default config file based on the example config file.\n\n If we can't find any configuration.yaml, we will pull the whole\n example_configuration.yaml and use this file as the configuration.\n\n Args:\n config_path: String containing the path to configuration.yaml\n default install location\n\n Returns:\n str: path to configuration.yaml default install location\n\n \"\"\"\n _LOGGER.info(\"Creating %s.\", config_path)\n config_dir, _ = os.path.split(config_path)\n if not os.path.isdir(config_dir):\n os.makedirs(config_dir)\n shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)\n return config_path\n\n\ndef get_config_path(config_paths):\n \"\"\"Get the path to configuration.yaml.\n\n Opsdroid configuration.yaml can be located in different paths.\n With this function, we will go through all of the possible paths and\n return the correct path.\n\n If we don't have any configuration.yaml we will just create one using\n the example configuration file.\n\n Args:\n config_paths: List containing all the possible config paths.\n\n Returns:\n str: Path to the configuration file.\n\n \"\"\"\n config_path = \"\"\n for possible_path in config_paths:\n if not os.path.isfile(possible_path):\n _LOGGER.debug(_(\"Config file %s not found.\"), possible_path)\n else:\n config_path = possible_path\n break\n\n if not config_path:\n _LOGGER.info(\n _(\"No configuration files found. Creating %s\"), DEFAULT_CONFIG_PATH\n )\n config_path = create_default_config(DEFAULT_CONFIG_PATH)\n\n return config_path\n\n\nenv_var_pattern = re.compile(ENV_VAR_REGEX)\n\n\ndef envvar_constructor(loader, node):\n \"\"\"Yaml parser for env vars.\"\"\"\n value = loader.construct_scalar(node)\n [env_var] = env_var_pattern.match(value).groups()\n return os.environ[env_var]\n\n\ndef load_config_file(config_paths):\n \"\"\"Load a yaml config file from path.\n\n We get a path for the configuration file and then use the yaml\n library to load this file - the configuration will be shown as a\n dict. Here we also add constructors to our yaml loader and handle\n different exceptions that could be raised when trying to load or\n validate the file.\n\n Args:\n config_paths: List of paths to configuration.yaml files\n\n Returns:\n dict: Dict containing config fields\n\n \"\"\"\n\n config_path = get_config_path(config_paths)\n\n yaml.SafeLoader.add_implicit_resolver(\"!envvar\", env_var_pattern, first=\"$\")\n yaml.SafeLoader.add_constructor(\"!envvar\", envvar_constructor)\n\n try:\n with open(config_path, \"r\") as stream:\n _LOGGER.info(_(\"Loaded config from %s.\"), config_path)\n\n data = yaml.load(stream, Loader=yaml.SafeLoader)\n configuration = update_pre_0_17_config_format(data)\n validate_configuration(configuration, BASE_SCHEMA)\n\n return configuration\n\n except yaml.YAMLError as error:\n _LOGGER.critical(error)\n sys.exit(1)\n\n except FileNotFoundError as error:\n _LOGGER.critical(error)\n sys.exit(1)\n", "path": "opsdroid/configuration/__init__.py"}]}
| 1,983 | 299 |
gh_patches_debug_20188
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-2080
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Threading integration breaks OpenCensus' threading integration
### How do you use Sentry?
Sentry Saas (sentry.io)
### Version
1.21.1
### Steps to Reproduce
1. Enable threading Sentry integration (it seems to be enabled by default)
2. Enable [threading OpenCensus integration](https://pypi.org/project/opencensus-ext-threading/)
3. Start a thread
```python
import threading
import sentry_sdk.integrations.threading
import opencensus.trace.config_integration
sentry_sdk.init(integrations=[
sentry_sdk.integrations.threading.ThreadingIntegration(),
])
opencensus.trace.config_integration.trace_integrations(
integrations=["threading"],
)
thread = threading.Thread(target=print, args=("foo",))
thread.start()
thread.join()
```
### Expected Result
No error, output: `foo`
### Actual Result
Exceptions raised on every thread's startup
```python
Exception in thread Thread-1 (print):
Traceback (most recent call last):
File "/usr/lib/python3.10/threading.py", line 1016, in _bootstrap_inner
self.run()
File ".venv/lib/python3.10/site-packages/sentry_sdk/integrations/threading.py", line 69, in run
reraise(*_capture_exception())
File ".venv/lib/python3.10/site-packages/sentry_sdk/_compat.py", line 60, in reraise
raise value
File ".venv/lib/python3.10/site-packages/sentry_sdk/integrations/threading.py", line 67, in run
return old_run_func(self, *a, **kw)
File ".venv/lib/python3.10/site-packages/opencensus/ext/threading/trace.py", line 80, in call
*self._opencensus_context
AttributeError: 'Thread' object has no attribute '_opencensus_context'
```
### Additional details
* Python: 3.10
* OpenCensus threading extension: 0.1.2
My suggestion is to use [`functools.update_wrapper`](https://docs.python.org/3/library/functools.html#functools.update_wrapper) (or the convenience decorator [`functools.wraps`](https://docs.python.org/3/library/functools.html#functools.wraps)) to set the name of [`sentry_start`](https://github.com/getsentry/sentry-python/blob/f763061ed9d9e99d85b3e95adc3ed63b623fc4a0/sentry_sdk/integrations/threading.py#L35-54) to `"start"` (and maybe the same for the result of `_wrap_run`, to `"run"`), so it "holds up to basic introspection".
PS: please include 'Additional details' block in issue template
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/threading.py`
Content:
```
1 from __future__ import absolute_import
2
3 import sys
4 from threading import Thread, current_thread
5
6 from sentry_sdk import Hub
7 from sentry_sdk._compat import reraise
8 from sentry_sdk._types import TYPE_CHECKING
9 from sentry_sdk.integrations import Integration
10 from sentry_sdk.utils import event_from_exception, capture_internal_exceptions
11
12 if TYPE_CHECKING:
13 from typing import Any
14 from typing import TypeVar
15 from typing import Callable
16 from typing import Optional
17
18 from sentry_sdk._types import ExcInfo
19
20 F = TypeVar("F", bound=Callable[..., Any])
21
22
23 class ThreadingIntegration(Integration):
24 identifier = "threading"
25
26 def __init__(self, propagate_hub=False):
27 # type: (bool) -> None
28 self.propagate_hub = propagate_hub
29
30 @staticmethod
31 def setup_once():
32 # type: () -> None
33 old_start = Thread.start
34
35 def sentry_start(self, *a, **kw):
36 # type: (Thread, *Any, **Any) -> Any
37 hub = Hub.current
38 integration = hub.get_integration(ThreadingIntegration)
39 if integration is not None:
40 if not integration.propagate_hub:
41 hub_ = None
42 else:
43 hub_ = Hub(hub)
44 # Patching instance methods in `start()` creates a reference cycle if
45 # done in a naive way. See
46 # https://github.com/getsentry/sentry-python/pull/434
47 #
48 # In threading module, using current_thread API will access current thread instance
49 # without holding it to avoid a reference cycle in an easier way.
50 with capture_internal_exceptions():
51 new_run = _wrap_run(hub_, getattr(self.run, "__func__", self.run))
52 self.run = new_run # type: ignore
53
54 return old_start(self, *a, **kw)
55
56 Thread.start = sentry_start # type: ignore
57
58
59 def _wrap_run(parent_hub, old_run_func):
60 # type: (Optional[Hub], F) -> F
61 def run(*a, **kw):
62 # type: (*Any, **Any) -> Any
63 hub = parent_hub or Hub.current
64 with hub:
65 try:
66 self = current_thread()
67 return old_run_func(self, *a, **kw)
68 except Exception:
69 reraise(*_capture_exception())
70
71 return run # type: ignore
72
73
74 def _capture_exception():
75 # type: () -> ExcInfo
76 hub = Hub.current
77 exc_info = sys.exc_info()
78
79 if hub.get_integration(ThreadingIntegration) is not None:
80 # If an integration is there, a client has to be there.
81 client = hub.client # type: Any
82
83 event, hint = event_from_exception(
84 exc_info,
85 client_options=client.options,
86 mechanism={"type": "threading", "handled": False},
87 )
88 hub.capture_event(event, hint=hint)
89
90 return exc_info
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry_sdk/integrations/threading.py b/sentry_sdk/integrations/threading.py
--- a/sentry_sdk/integrations/threading.py
+++ b/sentry_sdk/integrations/threading.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import
import sys
+from functools import wraps
from threading import Thread, current_thread
from sentry_sdk import Hub
@@ -32,6 +33,7 @@
# type: () -> None
old_start = Thread.start
+ @wraps(old_start)
def sentry_start(self, *a, **kw):
# type: (Thread, *Any, **Any) -> Any
hub = Hub.current
@@ -58,6 +60,7 @@
def _wrap_run(parent_hub, old_run_func):
# type: (Optional[Hub], F) -> F
+ @wraps(old_run_func)
def run(*a, **kw):
# type: (*Any, **Any) -> Any
hub = parent_hub or Hub.current
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/threading.py b/sentry_sdk/integrations/threading.py\n--- a/sentry_sdk/integrations/threading.py\n+++ b/sentry_sdk/integrations/threading.py\n@@ -1,6 +1,7 @@\n from __future__ import absolute_import\n \n import sys\n+from functools import wraps\n from threading import Thread, current_thread\n \n from sentry_sdk import Hub\n@@ -32,6 +33,7 @@\n # type: () -> None\n old_start = Thread.start\n \n+ @wraps(old_start)\n def sentry_start(self, *a, **kw):\n # type: (Thread, *Any, **Any) -> Any\n hub = Hub.current\n@@ -58,6 +60,7 @@\n \n def _wrap_run(parent_hub, old_run_func):\n # type: (Optional[Hub], F) -> F\n+ @wraps(old_run_func)\n def run(*a, **kw):\n # type: (*Any, **Any) -> Any\n hub = parent_hub or Hub.current\n", "issue": "Threading integration breaks OpenCensus' threading integration\n### How do you use Sentry?\r\n\r\nSentry Saas (sentry.io)\r\n\r\n### Version\r\n\r\n1.21.1\r\n\r\n### Steps to Reproduce\r\n\r\n1. Enable threading Sentry integration (it seems to be enabled by default)\r\n2. Enable [threading OpenCensus integration](https://pypi.org/project/opencensus-ext-threading/)\r\n3. Start a thread\r\n\r\n```python\r\nimport threading\r\n\r\nimport sentry_sdk.integrations.threading\r\nimport opencensus.trace.config_integration\r\n\r\nsentry_sdk.init(integrations=[\r\n sentry_sdk.integrations.threading.ThreadingIntegration(),\r\n])\r\n\r\nopencensus.trace.config_integration.trace_integrations(\r\n integrations=[\"threading\"],\r\n)\r\n\r\nthread = threading.Thread(target=print, args=(\"foo\",))\r\nthread.start()\r\nthread.join()\r\n```\r\n\r\n### Expected Result\r\n\r\nNo error, output: `foo`\r\n\r\n### Actual Result\r\n\r\nExceptions raised on every thread's startup\r\n\r\n```python\r\nException in thread Thread-1 (print):\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\r\n self.run()\r\n File \".venv/lib/python3.10/site-packages/sentry_sdk/integrations/threading.py\", line 69, in run\r\n reraise(*_capture_exception())\r\n File \".venv/lib/python3.10/site-packages/sentry_sdk/_compat.py\", line 60, in reraise\r\n raise value\r\n File \".venv/lib/python3.10/site-packages/sentry_sdk/integrations/threading.py\", line 67, in run\r\n return old_run_func(self, *a, **kw)\r\n File \".venv/lib/python3.10/site-packages/opencensus/ext/threading/trace.py\", line 80, in call\r\n *self._opencensus_context\r\nAttributeError: 'Thread' object has no attribute '_opencensus_context'\r\n```\r\n\r\n### Additional details\r\n\r\n* Python: 3.10\r\n* OpenCensus threading extension: 0.1.2\r\n\r\nMy suggestion is to use [`functools.update_wrapper`](https://docs.python.org/3/library/functools.html#functools.update_wrapper) (or the convenience decorator [`functools.wraps`](https://docs.python.org/3/library/functools.html#functools.wraps)) to set the name of [`sentry_start`](https://github.com/getsentry/sentry-python/blob/f763061ed9d9e99d85b3e95adc3ed63b623fc4a0/sentry_sdk/integrations/threading.py#L35-54) to `\"start\"` (and maybe the same for the result of `_wrap_run`, to `\"run\"`), so it \"holds up to basic introspection\".\r\n\r\nPS: please include 'Additional details' block in issue template\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\nfrom threading import Thread, current_thread\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk._types import TYPE_CHECKING\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.utils import event_from_exception, capture_internal_exceptions\n\nif TYPE_CHECKING:\n from typing import Any\n from typing import TypeVar\n from typing import Callable\n from typing import Optional\n\n from sentry_sdk._types import ExcInfo\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n\nclass ThreadingIntegration(Integration):\n identifier = \"threading\"\n\n def __init__(self, propagate_hub=False):\n # type: (bool) -> None\n self.propagate_hub = propagate_hub\n\n @staticmethod\n def setup_once():\n # type: () -> None\n old_start = Thread.start\n\n def sentry_start(self, *a, **kw):\n # type: (Thread, *Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(ThreadingIntegration)\n if integration is not None:\n if not integration.propagate_hub:\n hub_ = None\n else:\n hub_ = Hub(hub)\n # Patching instance methods in `start()` creates a reference cycle if\n # done in a naive way. See\n # https://github.com/getsentry/sentry-python/pull/434\n #\n # In threading module, using current_thread API will access current thread instance\n # without holding it to avoid a reference cycle in an easier way.\n with capture_internal_exceptions():\n new_run = _wrap_run(hub_, getattr(self.run, \"__func__\", self.run))\n self.run = new_run # type: ignore\n\n return old_start(self, *a, **kw)\n\n Thread.start = sentry_start # type: ignore\n\n\ndef _wrap_run(parent_hub, old_run_func):\n # type: (Optional[Hub], F) -> F\n def run(*a, **kw):\n # type: (*Any, **Any) -> Any\n hub = parent_hub or Hub.current\n with hub:\n try:\n self = current_thread()\n return old_run_func(self, *a, **kw)\n except Exception:\n reraise(*_capture_exception())\n\n return run # type: ignore\n\n\ndef _capture_exception():\n # type: () -> ExcInfo\n hub = Hub.current\n exc_info = sys.exc_info()\n\n if hub.get_integration(ThreadingIntegration) is not None:\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"threading\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return exc_info\n", "path": "sentry_sdk/integrations/threading.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport sys\nfrom functools import wraps\nfrom threading import Thread, current_thread\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk._types import TYPE_CHECKING\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.utils import event_from_exception, capture_internal_exceptions\n\nif TYPE_CHECKING:\n from typing import Any\n from typing import TypeVar\n from typing import Callable\n from typing import Optional\n\n from sentry_sdk._types import ExcInfo\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n\nclass ThreadingIntegration(Integration):\n identifier = \"threading\"\n\n def __init__(self, propagate_hub=False):\n # type: (bool) -> None\n self.propagate_hub = propagate_hub\n\n @staticmethod\n def setup_once():\n # type: () -> None\n old_start = Thread.start\n\n @wraps(old_start)\n def sentry_start(self, *a, **kw):\n # type: (Thread, *Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(ThreadingIntegration)\n if integration is not None:\n if not integration.propagate_hub:\n hub_ = None\n else:\n hub_ = Hub(hub)\n # Patching instance methods in `start()` creates a reference cycle if\n # done in a naive way. See\n # https://github.com/getsentry/sentry-python/pull/434\n #\n # In threading module, using current_thread API will access current thread instance\n # without holding it to avoid a reference cycle in an easier way.\n with capture_internal_exceptions():\n new_run = _wrap_run(hub_, getattr(self.run, \"__func__\", self.run))\n self.run = new_run # type: ignore\n\n return old_start(self, *a, **kw)\n\n Thread.start = sentry_start # type: ignore\n\n\ndef _wrap_run(parent_hub, old_run_func):\n # type: (Optional[Hub], F) -> F\n @wraps(old_run_func)\n def run(*a, **kw):\n # type: (*Any, **Any) -> Any\n hub = parent_hub or Hub.current\n with hub:\n try:\n self = current_thread()\n return old_run_func(self, *a, **kw)\n except Exception:\n reraise(*_capture_exception())\n\n return run # type: ignore\n\n\ndef _capture_exception():\n # type: () -> ExcInfo\n hub = Hub.current\n exc_info = sys.exc_info()\n\n if hub.get_integration(ThreadingIntegration) is not None:\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"threading\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return exc_info\n", "path": "sentry_sdk/integrations/threading.py"}]}
| 1,729 | 238 |
gh_patches_debug_29915
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-398
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: confusion matrix chart displays wrong output
To reproduce: https://www.kaggle.com/itay94/notebook05f499eb19

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/checks/performance/confusion_matrix_report.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """The confusion_matrix_report check module."""
12 import numpy as np
13 import sklearn
14 from sklearn.base import BaseEstimator
15
16 import plotly.figure_factory as ff
17 from deepchecks import CheckResult, Dataset
18 from deepchecks.base.check import SingleDatasetBaseCheck
19 from deepchecks.utils.metrics import ModelType, task_type_validation
20
21
22 __all__ = ['ConfusionMatrixReport']
23
24
25 class ConfusionMatrixReport(SingleDatasetBaseCheck):
26 """Calculate the confusion matrix of the model on the given dataset."""
27
28 def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:
29 """Run check.
30
31 Args:
32 model (BaseEstimator): A scikit-learn-compatible fitted estimator instance
33 dataset: a Dataset object
34
35 Returns:
36 CheckResult: value is numpy array of the confusion matrix, displays the confusion matrix
37
38 Raises:
39 DeepchecksValueError: If the object is not a Dataset instance with a label
40 """
41 return self._confusion_matrix_report(dataset, model)
42
43 def _confusion_matrix_report(self, dataset: Dataset, model):
44 Dataset.validate_dataset(dataset)
45 dataset.validate_label()
46 task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])
47
48 label = dataset.label_name
49 ds_x = dataset.data[dataset.features]
50 ds_y = dataset.data[label]
51 y_pred = model.predict(ds_x)
52
53 confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)
54
55 labels = [str(val) for val in np.unique(ds_y)]
56 fig = ff.create_annotated_heatmap(confusion_matrix, x=labels, y=labels, colorscale='Viridis')
57 fig.update_layout(width=600, height=600)
58 fig.update_xaxes(title='Predicted Value')
59 fig.update_yaxes(title='True value', autorange='reversed')
60 fig['data'][0]['showscale'] = True
61 fig['layout']['xaxis']['side'] = 'bottom'
62
63 return CheckResult(confusion_matrix, display=fig)
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deepchecks/checks/performance/confusion_matrix_report.py b/deepchecks/checks/performance/confusion_matrix_report.py
--- a/deepchecks/checks/performance/confusion_matrix_report.py
+++ b/deepchecks/checks/performance/confusion_matrix_report.py
@@ -9,11 +9,10 @@
# ----------------------------------------------------------------------------
#
"""The confusion_matrix_report check module."""
-import numpy as np
import sklearn
from sklearn.base import BaseEstimator
-import plotly.figure_factory as ff
+import plotly.express as px
from deepchecks import CheckResult, Dataset
from deepchecks.base.check import SingleDatasetBaseCheck
from deepchecks.utils.metrics import ModelType, task_type_validation
@@ -49,15 +48,12 @@
ds_x = dataset.data[dataset.features]
ds_y = dataset.data[label]
y_pred = model.predict(ds_x)
-
confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)
- labels = [str(val) for val in np.unique(ds_y)]
- fig = ff.create_annotated_heatmap(confusion_matrix, x=labels, y=labels, colorscale='Viridis')
+ # Figure
+ fig = px.imshow(confusion_matrix, x=dataset.classes, y=dataset.classes, text_auto=True)
fig.update_layout(width=600, height=600)
- fig.update_xaxes(title='Predicted Value')
- fig.update_yaxes(title='True value', autorange='reversed')
- fig['data'][0]['showscale'] = True
- fig['layout']['xaxis']['side'] = 'bottom'
+ fig.update_xaxes(title='Predicted Value', type='category')
+ fig.update_yaxes(title='True value', type='category')
return CheckResult(confusion_matrix, display=fig)
|
{"golden_diff": "diff --git a/deepchecks/checks/performance/confusion_matrix_report.py b/deepchecks/checks/performance/confusion_matrix_report.py\n--- a/deepchecks/checks/performance/confusion_matrix_report.py\n+++ b/deepchecks/checks/performance/confusion_matrix_report.py\n@@ -9,11 +9,10 @@\n # ----------------------------------------------------------------------------\n #\n \"\"\"The confusion_matrix_report check module.\"\"\"\n-import numpy as np\n import sklearn\n from sklearn.base import BaseEstimator\n \n-import plotly.figure_factory as ff\n+import plotly.express as px\n from deepchecks import CheckResult, Dataset\n from deepchecks.base.check import SingleDatasetBaseCheck\n from deepchecks.utils.metrics import ModelType, task_type_validation\n@@ -49,15 +48,12 @@\n ds_x = dataset.data[dataset.features]\n ds_y = dataset.data[label]\n y_pred = model.predict(ds_x)\n-\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n \n- labels = [str(val) for val in np.unique(ds_y)]\n- fig = ff.create_annotated_heatmap(confusion_matrix, x=labels, y=labels, colorscale='Viridis')\n+ # Figure\n+ fig = px.imshow(confusion_matrix, x=dataset.classes, y=dataset.classes, text_auto=True)\n fig.update_layout(width=600, height=600)\n- fig.update_xaxes(title='Predicted Value')\n- fig.update_yaxes(title='True value', autorange='reversed')\n- fig['data'][0]['showscale'] = True\n- fig['layout']['xaxis']['side'] = 'bottom'\n+ fig.update_xaxes(title='Predicted Value', type='category')\n+ fig.update_yaxes(title='True value', type='category')\n \n return CheckResult(confusion_matrix, display=fig)\n", "issue": "BUG: confusion matrix chart displays wrong output\nTo reproduce: https://www.kaggle.com/itay94/notebook05f499eb19\r\n\r\n\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The confusion_matrix_report check module.\"\"\"\nimport numpy as np\nimport sklearn\nfrom sklearn.base import BaseEstimator\n\nimport plotly.figure_factory as ff\nfrom deepchecks import CheckResult, Dataset\nfrom deepchecks.base.check import SingleDatasetBaseCheck\nfrom deepchecks.utils.metrics import ModelType, task_type_validation\n\n\n__all__ = ['ConfusionMatrixReport']\n\n\nclass ConfusionMatrixReport(SingleDatasetBaseCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\"\"\"\n\n def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n model (BaseEstimator): A scikit-learn-compatible fitted estimator instance\n dataset: a Dataset object\n\n Returns:\n CheckResult: value is numpy array of the confusion matrix, displays the confusion matrix\n\n Raises:\n DeepchecksValueError: If the object is not a Dataset instance with a label\n \"\"\"\n return self._confusion_matrix_report(dataset, model)\n\n def _confusion_matrix_report(self, dataset: Dataset, model):\n Dataset.validate_dataset(dataset)\n dataset.validate_label()\n task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])\n\n label = dataset.label_name\n ds_x = dataset.data[dataset.features]\n ds_y = dataset.data[label]\n y_pred = model.predict(ds_x)\n\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n\n labels = [str(val) for val in np.unique(ds_y)]\n fig = ff.create_annotated_heatmap(confusion_matrix, x=labels, y=labels, colorscale='Viridis')\n fig.update_layout(width=600, height=600)\n fig.update_xaxes(title='Predicted Value')\n fig.update_yaxes(title='True value', autorange='reversed')\n fig['data'][0]['showscale'] = True\n fig['layout']['xaxis']['side'] = 'bottom'\n\n return CheckResult(confusion_matrix, display=fig)\n", "path": "deepchecks/checks/performance/confusion_matrix_report.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The confusion_matrix_report check module.\"\"\"\nimport sklearn\nfrom sklearn.base import BaseEstimator\n\nimport plotly.express as px\nfrom deepchecks import CheckResult, Dataset\nfrom deepchecks.base.check import SingleDatasetBaseCheck\nfrom deepchecks.utils.metrics import ModelType, task_type_validation\n\n\n__all__ = ['ConfusionMatrixReport']\n\n\nclass ConfusionMatrixReport(SingleDatasetBaseCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\"\"\"\n\n def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n model (BaseEstimator): A scikit-learn-compatible fitted estimator instance\n dataset: a Dataset object\n\n Returns:\n CheckResult: value is numpy array of the confusion matrix, displays the confusion matrix\n\n Raises:\n DeepchecksValueError: If the object is not a Dataset instance with a label\n \"\"\"\n return self._confusion_matrix_report(dataset, model)\n\n def _confusion_matrix_report(self, dataset: Dataset, model):\n Dataset.validate_dataset(dataset)\n dataset.validate_label()\n task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])\n\n label = dataset.label_name\n ds_x = dataset.data[dataset.features]\n ds_y = dataset.data[label]\n y_pred = model.predict(ds_x)\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n\n # Figure\n fig = px.imshow(confusion_matrix, x=dataset.classes, y=dataset.classes, text_auto=True)\n fig.update_layout(width=600, height=600)\n fig.update_xaxes(title='Predicted Value', type='category')\n fig.update_yaxes(title='True value', type='category')\n\n return CheckResult(confusion_matrix, display=fig)\n", "path": "deepchecks/checks/performance/confusion_matrix_report.py"}]}
| 1,020 | 399 |
gh_patches_debug_42763
|
rasdani/github-patches
|
git_diff
|
getredash__redash-1002
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add events to track embeds usage
Add events to track embeds usage
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/embed.py`
Content:
```
1 import json
2
3 from funcy import project
4 from flask import render_template, request
5 from flask_login import login_required, current_user
6 from flask_restful import abort
7
8 from redash import models, settings
9 from redash import serializers
10 from redash.utils import json_dumps
11 from redash.handlers import routes
12 from redash.handlers.base import org_scoped_rule
13 from redash.permissions import require_access, view_only
14 from authentication import current_org
15
16
17 @routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])
18 @login_required
19 def embed(query_id, visualization_id, org_slug=None):
20 # TODO: add event for embed access
21 query = models.Query.get_by_id_and_org(query_id, current_org)
22 require_access(query.groups, current_user, view_only)
23 vis = query.visualizations.where(models.Visualization.id == visualization_id).first()
24 qr = {}
25
26 if vis is not None:
27 vis = vis.to_dict()
28 qr = query.latest_query_data
29 if qr is None:
30 abort(400, message="No Results for this query")
31 else:
32 qr = qr.to_dict()
33 else:
34 abort(404, message="Visualization not found.")
35
36 client_config = {}
37 client_config.update(settings.COMMON_CLIENT_CONFIG)
38
39 qr = project(qr, ('data', 'id', 'retrieved_at'))
40 vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))
41 vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))
42
43 return render_template("embed.html",
44 client_config=json_dumps(client_config),
45 visualization=json_dumps(vis),
46 query_result=json_dumps(qr))
47
48
49 @routes.route(org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])
50 @login_required
51 def public_dashboard(token, org_slug=None):
52 # TODO: verify object is a dashboard?
53 if not isinstance(current_user, models.ApiUser):
54 api_key = models.ApiKey.get_by_api_key(token)
55 dashboard = api_key.object
56 else:
57 dashboard = current_user.object
58
59 user = {
60 'permissions': [],
61 'apiKey': current_user.id
62 }
63
64 headers = {
65 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'
66 }
67
68 response = render_template("public.html",
69 headless='embed' in request.args,
70 user=json.dumps(user),
71 seed_data=json_dumps({
72 'dashboard': serializers.public_dashboard(dashboard)
73 }),
74 client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))
75
76 return response, 200, headers
77
```
Path: `redash/handlers/base.py`
Content:
```
1 import time
2 from flask import request, Blueprint
3 from flask_restful import Resource, abort
4 from flask_login import current_user, login_required
5 from peewee import DoesNotExist
6
7 from redash import settings
8 from redash.tasks import record_event
9 from redash.models import ApiUser
10 from redash.authentication import current_org
11
12 routes = Blueprint('redash', __name__, template_folder=settings.fix_assets_path('templates'))
13
14
15 class BaseResource(Resource):
16 decorators = [login_required]
17
18 def __init__(self, *args, **kwargs):
19 super(BaseResource, self).__init__(*args, **kwargs)
20 self._user = None
21
22 def dispatch_request(self, *args, **kwargs):
23 kwargs.pop('org_slug', None)
24
25 return super(BaseResource, self).dispatch_request(*args, **kwargs)
26
27 @property
28 def current_user(self):
29 return current_user._get_current_object()
30
31 @property
32 def current_org(self):
33 return current_org._get_current_object()
34
35 def record_event(self, options):
36 if isinstance(self.current_user, ApiUser):
37 options.update({
38 'api_key': self.current_user.name,
39 'org_id': self.current_org.id
40 })
41 else:
42 options.update({
43 'user_id': self.current_user.id,
44 'org_id': self.current_org.id
45 })
46
47 options.update({
48 'user_agent': request.user_agent.string,
49 'ip': request.remote_addr
50 })
51
52 if 'timestamp' not in options:
53 options['timestamp'] = int(time.time())
54
55 record_event.delay(options)
56
57
58 def require_fields(req, fields):
59 for f in fields:
60 if f not in req:
61 abort(400)
62
63
64 def get_object_or_404(fn, *args, **kwargs):
65 try:
66 return fn(*args, **kwargs)
67 except DoesNotExist:
68 abort(404)
69
70
71 def org_scoped_rule(rule):
72 if settings.MULTI_ORG:
73 return "/<org_slug:org_slug>{}".format(rule)
74
75 return rule
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/handlers/base.py b/redash/handlers/base.py
--- a/redash/handlers/base.py
+++ b/redash/handlers/base.py
@@ -5,7 +5,7 @@
from peewee import DoesNotExist
from redash import settings
-from redash.tasks import record_event
+from redash.tasks import record_event as record_event_task
from redash.models import ApiUser
from redash.authentication import current_org
@@ -33,26 +33,30 @@
return current_org._get_current_object()
def record_event(self, options):
- if isinstance(self.current_user, ApiUser):
- options.update({
- 'api_key': self.current_user.name,
- 'org_id': self.current_org.id
- })
- else:
- options.update({
- 'user_id': self.current_user.id,
- 'org_id': self.current_org.id
- })
+ record_event(self.current_org, self.current_user, options)
+
+def record_event(org, user, options):
+ if isinstance(user, ApiUser):
+ options.update({
+ 'api_key': user.name,
+ 'org_id': org.id
+ })
+ else:
options.update({
- 'user_agent': request.user_agent.string,
- 'ip': request.remote_addr
+ 'user_id': user.id,
+ 'org_id': org.id
})
- if 'timestamp' not in options:
- options['timestamp'] = int(time.time())
+ options.update({
+ 'user_agent': request.user_agent.string,
+ 'ip': request.remote_addr
+ })
+
+ if 'timestamp' not in options:
+ options['timestamp'] = int(time.time())
- record_event.delay(options)
+ record_event_task.delay(options)
def require_fields(req, fields):
diff --git a/redash/handlers/embed.py b/redash/handlers/embed.py
--- a/redash/handlers/embed.py
+++ b/redash/handlers/embed.py
@@ -9,7 +9,7 @@
from redash import serializers
from redash.utils import json_dumps
from redash.handlers import routes
-from redash.handlers.base import org_scoped_rule
+from redash.handlers.base import org_scoped_rule, record_event
from redash.permissions import require_access, view_only
from authentication import current_org
@@ -17,7 +17,6 @@
@routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])
@login_required
def embed(query_id, visualization_id, org_slug=None):
- # TODO: add event for embed access
query = models.Query.get_by_id_and_org(query_id, current_org)
require_access(query.groups, current_user, view_only)
vis = query.visualizations.where(models.Visualization.id == visualization_id).first()
@@ -33,6 +32,15 @@
else:
abort(404, message="Visualization not found.")
+ record_event(current_org, current_user, {
+ 'action': 'view',
+ 'object_id': visualization_id,
+ 'object_type': 'visualization',
+ 'query_id': query_id,
+ 'embed': True,
+ 'referer': request.headers.get('Referer')
+ })
+
client_config = {}
client_config.update(settings.COMMON_CLIENT_CONFIG)
@@ -65,6 +73,15 @@
'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'
}
+ record_event(current_org, current_user, {
+ 'action': 'view',
+ 'object_id': dashboard.id,
+ 'object_type': 'dashboard',
+ 'public': True,
+ 'headless': 'embed' in request.args,
+ 'referer': request.headers.get('Referer')
+ })
+
response = render_template("public.html",
headless='embed' in request.args,
user=json.dumps(user),
|
{"golden_diff": "diff --git a/redash/handlers/base.py b/redash/handlers/base.py\n--- a/redash/handlers/base.py\n+++ b/redash/handlers/base.py\n@@ -5,7 +5,7 @@\n from peewee import DoesNotExist\n \n from redash import settings\n-from redash.tasks import record_event\n+from redash.tasks import record_event as record_event_task\n from redash.models import ApiUser\n from redash.authentication import current_org\n \n@@ -33,26 +33,30 @@\n return current_org._get_current_object()\n \n def record_event(self, options):\n- if isinstance(self.current_user, ApiUser):\n- options.update({\n- 'api_key': self.current_user.name,\n- 'org_id': self.current_org.id\n- })\n- else:\n- options.update({\n- 'user_id': self.current_user.id,\n- 'org_id': self.current_org.id\n- })\n+ record_event(self.current_org, self.current_user, options)\n \n+\n+def record_event(org, user, options):\n+ if isinstance(user, ApiUser):\n+ options.update({\n+ 'api_key': user.name,\n+ 'org_id': org.id\n+ })\n+ else:\n options.update({\n- 'user_agent': request.user_agent.string,\n- 'ip': request.remote_addr\n+ 'user_id': user.id,\n+ 'org_id': org.id\n })\n \n- if 'timestamp' not in options:\n- options['timestamp'] = int(time.time())\n+ options.update({\n+ 'user_agent': request.user_agent.string,\n+ 'ip': request.remote_addr\n+ })\n+\n+ if 'timestamp' not in options:\n+ options['timestamp'] = int(time.time())\n \n- record_event.delay(options)\n+ record_event_task.delay(options)\n \n \n def require_fields(req, fields):\ndiff --git a/redash/handlers/embed.py b/redash/handlers/embed.py\n--- a/redash/handlers/embed.py\n+++ b/redash/handlers/embed.py\n@@ -9,7 +9,7 @@\n from redash import serializers\n from redash.utils import json_dumps\n from redash.handlers import routes\n-from redash.handlers.base import org_scoped_rule\n+from redash.handlers.base import org_scoped_rule, record_event\n from redash.permissions import require_access, view_only\n from authentication import current_org\n \n@@ -17,7 +17,6 @@\n @routes.route(org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n @login_required\n def embed(query_id, visualization_id, org_slug=None):\n- # TODO: add event for embed access\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n@@ -33,6 +32,15 @@\n else:\n abort(404, message=\"Visualization not found.\")\n \n+ record_event(current_org, current_user, {\n+ 'action': 'view',\n+ 'object_id': visualization_id,\n+ 'object_type': 'visualization',\n+ 'query_id': query_id,\n+ 'embed': True,\n+ 'referer': request.headers.get('Referer')\n+ })\n+\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n \n@@ -65,6 +73,15 @@\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n \n+ record_event(current_org, current_user, {\n+ 'action': 'view',\n+ 'object_id': dashboard.id,\n+ 'object_type': 'dashboard',\n+ 'public': True,\n+ 'headless': 'embed' in request.args,\n+ 'referer': request.headers.get('Referer')\n+ })\n+\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n", "issue": "Add events to track embeds usage\n\nAdd events to track embeds usage\n\n", "before_files": [{"content": "import json\n\nfrom funcy import project\nfrom flask import render_template, request\nfrom flask_login import login_required, current_user\nfrom flask_restful import abort\n\nfrom redash import models, settings\nfrom redash import serializers\nfrom redash.utils import json_dumps\nfrom redash.handlers import routes\nfrom redash.handlers.base import org_scoped_rule\nfrom redash.permissions import require_access, view_only\nfrom authentication import current_org\n\n\[email protected](org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n@login_required\ndef embed(query_id, visualization_id, org_slug=None):\n # TODO: add event for embed access\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n qr = {}\n\n if vis is not None:\n vis = vis.to_dict()\n qr = query.latest_query_data\n if qr is None:\n abort(400, message=\"No Results for this query\")\n else:\n qr = qr.to_dict()\n else:\n abort(404, message=\"Visualization not found.\")\n\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n\n qr = project(qr, ('data', 'id', 'retrieved_at'))\n vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n\n return render_template(\"embed.html\",\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n\n\[email protected](org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])\n@login_required\ndef public_dashboard(token, org_slug=None):\n # TODO: verify object is a dashboard?\n if not isinstance(current_user, models.ApiUser):\n api_key = models.ApiKey.get_by_api_key(token)\n dashboard = api_key.object\n else:\n dashboard = current_user.object\n\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n\n headers = {\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n seed_data=json_dumps({\n 'dashboard': serializers.public_dashboard(dashboard)\n }),\n client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))\n\n return response, 200, headers\n", "path": "redash/handlers/embed.py"}, {"content": "import time\nfrom flask import request, Blueprint\nfrom flask_restful import Resource, abort\nfrom flask_login import current_user, login_required\nfrom peewee import DoesNotExist\n\nfrom redash import settings\nfrom redash.tasks import record_event\nfrom redash.models import ApiUser\nfrom redash.authentication import current_org\n\nroutes = Blueprint('redash', __name__, template_folder=settings.fix_assets_path('templates'))\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n if isinstance(self.current_user, ApiUser):\n options.update({\n 'api_key': self.current_user.name,\n 'org_id': self.current_org.id\n })\n else:\n options.update({\n 'user_id': self.current_user.id,\n 'org_id': self.current_org.id\n })\n\n options.update({\n 'user_agent': request.user_agent.string,\n 'ip': request.remote_addr\n })\n\n if 'timestamp' not in options:\n options['timestamp'] = int(time.time())\n\n record_event.delay(options)\n\n\ndef require_fields(req, fields):\n for f in fields:\n if f not in req:\n abort(400)\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n\n\ndef org_scoped_rule(rule):\n if settings.MULTI_ORG:\n return \"/<org_slug:org_slug>{}\".format(rule)\n\n return rule\n", "path": "redash/handlers/base.py"}], "after_files": [{"content": "import json\n\nfrom funcy import project\nfrom flask import render_template, request\nfrom flask_login import login_required, current_user\nfrom flask_restful import abort\n\nfrom redash import models, settings\nfrom redash import serializers\nfrom redash.utils import json_dumps\nfrom redash.handlers import routes\nfrom redash.handlers.base import org_scoped_rule, record_event\nfrom redash.permissions import require_access, view_only\nfrom authentication import current_org\n\n\[email protected](org_scoped_rule('/embed/query/<query_id>/visualization/<visualization_id>'), methods=['GET'])\n@login_required\ndef embed(query_id, visualization_id, org_slug=None):\n query = models.Query.get_by_id_and_org(query_id, current_org)\n require_access(query.groups, current_user, view_only)\n vis = query.visualizations.where(models.Visualization.id == visualization_id).first()\n qr = {}\n\n if vis is not None:\n vis = vis.to_dict()\n qr = query.latest_query_data\n if qr is None:\n abort(400, message=\"No Results for this query\")\n else:\n qr = qr.to_dict()\n else:\n abort(404, message=\"Visualization not found.\")\n\n record_event(current_org, current_user, {\n 'action': 'view',\n 'object_id': visualization_id,\n 'object_type': 'visualization',\n 'query_id': query_id,\n 'embed': True,\n 'referer': request.headers.get('Referer')\n })\n\n client_config = {}\n client_config.update(settings.COMMON_CLIENT_CONFIG)\n\n qr = project(qr, ('data', 'id', 'retrieved_at'))\n vis = project(vis, ('description', 'name', 'id', 'options', 'query', 'type', 'updated_at'))\n vis['query'] = project(vis['query'], ('created_at', 'description', 'name', 'id', 'latest_query_data_id', 'name', 'updated_at'))\n\n return render_template(\"embed.html\",\n client_config=json_dumps(client_config),\n visualization=json_dumps(vis),\n query_result=json_dumps(qr))\n\n\[email protected](org_scoped_rule('/public/dashboards/<token>'), methods=['GET'])\n@login_required\ndef public_dashboard(token, org_slug=None):\n # TODO: verify object is a dashboard?\n if not isinstance(current_user, models.ApiUser):\n api_key = models.ApiKey.get_by_api_key(token)\n dashboard = api_key.object\n else:\n dashboard = current_user.object\n\n user = {\n 'permissions': [],\n 'apiKey': current_user.id\n }\n\n headers = {\n 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate'\n }\n\n record_event(current_org, current_user, {\n 'action': 'view',\n 'object_id': dashboard.id,\n 'object_type': 'dashboard',\n 'public': True,\n 'headless': 'embed' in request.args,\n 'referer': request.headers.get('Referer')\n })\n\n response = render_template(\"public.html\",\n headless='embed' in request.args,\n user=json.dumps(user),\n seed_data=json_dumps({\n 'dashboard': serializers.public_dashboard(dashboard)\n }),\n client_config=json.dumps(settings.COMMON_CLIENT_CONFIG))\n\n return response, 200, headers\n", "path": "redash/handlers/embed.py"}, {"content": "import time\nfrom flask import request, Blueprint\nfrom flask_restful import Resource, abort\nfrom flask_login import current_user, login_required\nfrom peewee import DoesNotExist\n\nfrom redash import settings\nfrom redash.tasks import record_event as record_event_task\nfrom redash.models import ApiUser\nfrom redash.authentication import current_org\n\nroutes = Blueprint('redash', __name__, template_folder=settings.fix_assets_path('templates'))\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n record_event(self.current_org, self.current_user, options)\n\n\ndef record_event(org, user, options):\n if isinstance(user, ApiUser):\n options.update({\n 'api_key': user.name,\n 'org_id': org.id\n })\n else:\n options.update({\n 'user_id': user.id,\n 'org_id': org.id\n })\n\n options.update({\n 'user_agent': request.user_agent.string,\n 'ip': request.remote_addr\n })\n\n if 'timestamp' not in options:\n options['timestamp'] = int(time.time())\n\n record_event_task.delay(options)\n\n\ndef require_fields(req, fields):\n for f in fields:\n if f not in req:\n abort(400)\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n\n\ndef org_scoped_rule(rule):\n if settings.MULTI_ORG:\n return \"/<org_slug:org_slug>{}\".format(rule)\n\n return rule\n", "path": "redash/handlers/base.py"}]}
| 1,634 | 886 |
gh_patches_debug_23847
|
rasdani/github-patches
|
git_diff
|
AppDaemon__appdaemon-1661
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve packaging system
The `requirements.txt` file is used in a bit confusing way. It it usual practice for this file to contain all the dependencies present in the project virtualenv, created using `pip freeze > requirements.txt`, so that the virtualenv can be easily re-created by any developer locally using `pip install -r requirements.txt.`
But this file is also used as the `install_requires` inside `setup.py`, creating a bit of a conflict.
If one installs the appdaemon locally with `pip install .`, then run `pip freeze > requirements.txt`, the `requirements.txt` file is modified, since all the transitive dependencies are also loaded, as is expected. So the question is: shouldn't the declaration of the project dependencies be moved in a more appropriate place, separate from `requirements.txt`?
## Proposed solution
I propose to more crealry declare the project dependencies, by upgrading the way we use the Python packaging system.
At the moment the package is created from the `setup.py`, read by `setuptools` when building the package.
The way to package applications has changed a lot over the years. Nowadays if one were to follow the official [Python packaging guide](https://packaging.python.org/en/latest/tutorials/packaging-projects/), the recommended setup is to use a `pyproject.toml` to describe all the metadata of the project, including the build tool and the project dependencies.
I suggest we move from the current `setup.py` to a more modern and "standard" solution, by migrating to a `pyproject.toml` file to contain all the project dependencies.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from setuptools import setup, find_packages
5
6 from appdaemon.version import __version__
7
8 # sudo apt-get install python3-aiohttp-dbg
9
10 with open("requirements.txt") as f:
11 install_requires = [x for x in f.read().split("\n") if x]
12
13 with open("README.md") as f:
14 long_description = f.read()
15
16 setup(
17 name="appdaemon",
18 version=__version__,
19 description="Apps for the Home Assistant home automation package.",
20 long_description=long_description,
21 long_description_content_type="text/markdown",
22 author="Andrew I Cockburn",
23 author_email="[email protected]",
24 url="https://github.com/home-assistant/appdaemon.git",
25 packages=find_packages(exclude=["contrib", "docs", "tests*"]),
26 include_package_data=True,
27 install_requires=install_requires,
28 license="Apache License 2.0",
29 python_requires=">=3.7",
30 zip_safe=False,
31 keywords=["appdaemon", "home", "automation"],
32 entry_points={"console_scripts": ["appdaemon = appdaemon.__main__:main"]},
33 classifiers=[
34 "Development Status :: 5 - Production/Stable",
35 "Intended Audience :: Developers",
36 "License :: OSI Approved :: Apache Software License",
37 "Natural Language :: English",
38 "Programming Language :: Python :: 3.7",
39 "Programming Language :: Python :: 3.8",
40 "Programming Language :: Python :: 3.9",
41 "Programming Language :: Python :: 3.10",
42 "Topic :: Home Automation",
43 ],
44 )
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
deleted file mode 100755
--- a/setup.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from setuptools import setup, find_packages
-
-from appdaemon.version import __version__
-
-# sudo apt-get install python3-aiohttp-dbg
-
-with open("requirements.txt") as f:
- install_requires = [x for x in f.read().split("\n") if x]
-
-with open("README.md") as f:
- long_description = f.read()
-
-setup(
- name="appdaemon",
- version=__version__,
- description="Apps for the Home Assistant home automation package.",
- long_description=long_description,
- long_description_content_type="text/markdown",
- author="Andrew I Cockburn",
- author_email="[email protected]",
- url="https://github.com/home-assistant/appdaemon.git",
- packages=find_packages(exclude=["contrib", "docs", "tests*"]),
- include_package_data=True,
- install_requires=install_requires,
- license="Apache License 2.0",
- python_requires=">=3.7",
- zip_safe=False,
- keywords=["appdaemon", "home", "automation"],
- entry_points={"console_scripts": ["appdaemon = appdaemon.__main__:main"]},
- classifiers=[
- "Development Status :: 5 - Production/Stable",
- "Intended Audience :: Developers",
- "License :: OSI Approved :: Apache Software License",
- "Natural Language :: English",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- "Programming Language :: Python :: 3.9",
- "Programming Language :: Python :: 3.10",
- "Topic :: Home Automation",
- ],
-)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\ndeleted file mode 100755\n--- a/setup.py\n+++ /dev/null\n@@ -1,44 +0,0 @@\n-#!/usr/bin/env python\n-# -*- coding: utf-8 -*-\n-\n-from setuptools import setup, find_packages\n-\n-from appdaemon.version import __version__\n-\n-# sudo apt-get install python3-aiohttp-dbg\n-\n-with open(\"requirements.txt\") as f:\n- install_requires = [x for x in f.read().split(\"\\n\") if x]\n-\n-with open(\"README.md\") as f:\n- long_description = f.read()\n-\n-setup(\n- name=\"appdaemon\",\n- version=__version__,\n- description=\"Apps for the Home Assistant home automation package.\",\n- long_description=long_description,\n- long_description_content_type=\"text/markdown\",\n- author=\"Andrew I Cockburn\",\n- author_email=\"[email protected]\",\n- url=\"https://github.com/home-assistant/appdaemon.git\",\n- packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests*\"]),\n- include_package_data=True,\n- install_requires=install_requires,\n- license=\"Apache License 2.0\",\n- python_requires=\">=3.7\",\n- zip_safe=False,\n- keywords=[\"appdaemon\", \"home\", \"automation\"],\n- entry_points={\"console_scripts\": [\"appdaemon = appdaemon.__main__:main\"]},\n- classifiers=[\n- \"Development Status :: 5 - Production/Stable\",\n- \"Intended Audience :: Developers\",\n- \"License :: OSI Approved :: Apache Software License\",\n- \"Natural Language :: English\",\n- \"Programming Language :: Python :: 3.7\",\n- \"Programming Language :: Python :: 3.8\",\n- \"Programming Language :: Python :: 3.9\",\n- \"Programming Language :: Python :: 3.10\",\n- \"Topic :: Home Automation\",\n- ],\n-)\n", "issue": "Improve packaging system\nThe `requirements.txt` file is used in a bit confusing way. It it usual practice for this file to contain all the dependencies present in the project virtualenv, created using `pip freeze > requirements.txt`, so that the virtualenv can be easily re-created by any developer locally using `pip install -r requirements.txt.`\r\nBut this file is also used as the `install_requires` inside `setup.py`, creating a bit of a conflict.\r\nIf one installs the appdaemon locally with `pip install .`, then run `pip freeze > requirements.txt`, the `requirements.txt` file is modified, since all the transitive dependencies are also loaded, as is expected. So the question is: shouldn't the declaration of the project dependencies be moved in a more appropriate place, separate from `requirements.txt`?\r\n\r\n## Proposed solution\r\nI propose to more crealry declare the project dependencies, by upgrading the way we use the Python packaging system.\r\nAt the moment the package is created from the `setup.py`, read by `setuptools` when building the package.\r\nThe way to package applications has changed a lot over the years. Nowadays if one were to follow the official [Python packaging guide](https://packaging.python.org/en/latest/tutorials/packaging-projects/), the recommended setup is to use a `pyproject.toml` to describe all the metadata of the project, including the build tool and the project dependencies.\r\nI suggest we move from the current `setup.py` to a more modern and \"standard\" solution, by migrating to a `pyproject.toml` file to contain all the project dependencies.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nfrom appdaemon.version import __version__\n\n# sudo apt-get install python3-aiohttp-dbg\n\nwith open(\"requirements.txt\") as f:\n install_requires = [x for x in f.read().split(\"\\n\") if x]\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nsetup(\n name=\"appdaemon\",\n version=__version__,\n description=\"Apps for the Home Assistant home automation package.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Andrew I Cockburn\",\n author_email=\"[email protected]\",\n url=\"https://github.com/home-assistant/appdaemon.git\",\n packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests*\"]),\n include_package_data=True,\n install_requires=install_requires,\n license=\"Apache License 2.0\",\n python_requires=\">=3.7\",\n zip_safe=False,\n keywords=[\"appdaemon\", \"home\", \"automation\"],\n entry_points={\"console_scripts\": [\"appdaemon = appdaemon.__main__:main\"]},\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Home Automation\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": null, "path": "setup.py"}]}
| 1,025 | 427 |
gh_patches_debug_6209
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-3021
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A suspicious user trying to log in results in a 500
https://app.getsentry.com/gratipay/gratipay-com/group/32039756/
<bountysource-plugin>
---
Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/4727299-a-suspicious-user-trying-to-log-in-results-in-a-500?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).
</bountysource-plugin>
A suspicious user trying to log in results in a 500
https://app.getsentry.com/gratipay/gratipay-com/group/32039756/
<bountysource-plugin>
---
Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/4727299-a-suspicious-user-trying-to-log-in-results-in-a-500?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).
</bountysource-plugin>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/security/user.py`
Content:
```
1
2 from datetime import timedelta
3 import uuid
4
5 from aspen.utils import utcnow
6 from gratipay.models.participant import Participant
7 from gratipay.utils import set_cookie
8
9
10 SESSION = b'session'
11 SESSION_REFRESH = timedelta(hours=1)
12 SESSION_TIMEOUT = timedelta(hours=6)
13
14
15 class User(object):
16 """Represent a user of our website.
17 """
18
19 participant = None
20
21
22 # Constructors
23 # ============
24
25 @classmethod
26 def from_session_token(cls, token):
27 """Find a participant based on token and return a User.
28 """
29 self = cls()
30 self.participant = Participant.from_session_token(token)
31 return self
32
33 @classmethod
34 def from_api_key(cls, api_key):
35 """Find a participant based on token and return a User.
36 """
37 self = cls()
38 self.participant = Participant.from_api_key(api_key)
39 return self
40
41 @classmethod
42 def from_username(cls, username):
43 """Find a participant based on username and return a User.
44 """
45 self = cls()
46 self.participant = Participant.from_username(username)
47 return self
48
49 def __str__(self):
50 if self.participant is None:
51 out = '<Anonymous>'
52 else:
53 out = '<User: %s>' % self.participant.username
54 return out
55 __repr__ = __str__
56
57
58 # Authentication Helpers
59 # ======================
60
61 def sign_in(self, cookies):
62 """Start a new session for the user.
63 """
64 token = uuid.uuid4().hex
65 expires = utcnow() + SESSION_TIMEOUT
66 self.participant.update_session(token, expires)
67 set_cookie(cookies, SESSION, token, expires)
68
69 def keep_signed_in(self, cookies):
70 """Extend the user's current session.
71 """
72 new_expires = utcnow() + SESSION_TIMEOUT
73 if new_expires - self.participant.session_expires > SESSION_REFRESH:
74 self.participant.set_session_expires(new_expires)
75 token = self.participant.session_token
76 set_cookie(cookies, SESSION, token, expires=new_expires)
77
78 def sign_out(self, cookies):
79 """End the user's current session.
80 """
81 self.participant.update_session(None, None)
82 self.participant = None
83 set_cookie(cookies, SESSION, '')
84
85
86 # Roles
87 # =====
88
89 @property
90 def ADMIN(self):
91 return not self.ANON and self.participant.is_admin
92
93 @property
94 def ANON(self):
95 return self.participant is None or self.participant.is_suspicious is True
96 # Append "is True" here because otherwise Python will return the result
97 # of evaluating the right side of the or expression, which can be None.
98
99 def get_highest_role(self, owner):
100 """Return a string representing the highest role this user has.
101
102 :param string owner: the username of the owner of the resource we're
103 concerned with, or None
104
105 """
106 def is_owner():
107 if self.participant is not None:
108 if owner is not None:
109 if self.participant.username == owner:
110 return True
111 return False
112
113 if self.ADMIN:
114 return 'admin'
115 elif is_owner():
116 return 'owner'
117 elif not self.ANON:
118 return 'authenticated'
119 else:
120 return 'anonymous'
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gratipay/security/user.py b/gratipay/security/user.py
--- a/gratipay/security/user.py
+++ b/gratipay/security/user.py
@@ -92,9 +92,7 @@
@property
def ANON(self):
- return self.participant is None or self.participant.is_suspicious is True
- # Append "is True" here because otherwise Python will return the result
- # of evaluating the right side of the or expression, which can be None.
+ return self.participant is None
def get_highest_role(self, owner):
"""Return a string representing the highest role this user has.
|
{"golden_diff": "diff --git a/gratipay/security/user.py b/gratipay/security/user.py\n--- a/gratipay/security/user.py\n+++ b/gratipay/security/user.py\n@@ -92,9 +92,7 @@\n \n @property\n def ANON(self):\n- return self.participant is None or self.participant.is_suspicious is True\n- # Append \"is True\" here because otherwise Python will return the result\n- # of evaluating the right side of the or expression, which can be None.\n+ return self.participant is None\n \n def get_highest_role(self, owner):\n \"\"\"Return a string representing the highest role this user has.\n", "issue": "A suspicious user trying to log in results in a 500\nhttps://app.getsentry.com/gratipay/gratipay-com/group/32039756/\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/4727299-a-suspicious-user-trying-to-log-in-results-in-a-500?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\nA suspicious user trying to log in results in a 500\nhttps://app.getsentry.com/gratipay/gratipay-com/group/32039756/\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/4727299-a-suspicious-user-trying-to-log-in-results-in-a-500?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\n", "before_files": [{"content": "\nfrom datetime import timedelta\nimport uuid\n\nfrom aspen.utils import utcnow\nfrom gratipay.models.participant import Participant\nfrom gratipay.utils import set_cookie\n\n\nSESSION = b'session'\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\n\nclass User(object):\n \"\"\"Represent a user of our website.\n \"\"\"\n\n participant = None\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_session_token(cls, token):\n \"\"\"Find a participant based on token and return a User.\n \"\"\"\n self = cls()\n self.participant = Participant.from_session_token(token)\n return self\n\n @classmethod\n def from_api_key(cls, api_key):\n \"\"\"Find a participant based on token and return a User.\n \"\"\"\n self = cls()\n self.participant = Participant.from_api_key(api_key)\n return self\n\n @classmethod\n def from_username(cls, username):\n \"\"\"Find a participant based on username and return a User.\n \"\"\"\n self = cls()\n self.participant = Participant.from_username(username)\n return self\n\n def __str__(self):\n if self.participant is None:\n out = '<Anonymous>'\n else:\n out = '<User: %s>' % self.participant.username\n return out\n __repr__ = __str__\n\n\n # Authentication Helpers\n # ======================\n\n def sign_in(self, cookies):\n \"\"\"Start a new session for the user.\n \"\"\"\n token = uuid.uuid4().hex\n expires = utcnow() + SESSION_TIMEOUT\n self.participant.update_session(token, expires)\n set_cookie(cookies, SESSION, token, expires)\n\n def keep_signed_in(self, cookies):\n \"\"\"Extend the user's current session.\n \"\"\"\n new_expires = utcnow() + SESSION_TIMEOUT\n if new_expires - self.participant.session_expires > SESSION_REFRESH:\n self.participant.set_session_expires(new_expires)\n token = self.participant.session_token\n set_cookie(cookies, SESSION, token, expires=new_expires)\n\n def sign_out(self, cookies):\n \"\"\"End the user's current session.\n \"\"\"\n self.participant.update_session(None, None)\n self.participant = None\n set_cookie(cookies, SESSION, '')\n\n\n # Roles\n # =====\n\n @property\n def ADMIN(self):\n return not self.ANON and self.participant.is_admin\n\n @property\n def ANON(self):\n return self.participant is None or self.participant.is_suspicious is True\n # Append \"is True\" here because otherwise Python will return the result\n # of evaluating the right side of the or expression, which can be None.\n\n def get_highest_role(self, owner):\n \"\"\"Return a string representing the highest role this user has.\n\n :param string owner: the username of the owner of the resource we're\n concerned with, or None\n\n \"\"\"\n def is_owner():\n if self.participant is not None:\n if owner is not None:\n if self.participant.username == owner:\n return True\n return False\n\n if self.ADMIN:\n return 'admin'\n elif is_owner():\n return 'owner'\n elif not self.ANON:\n return 'authenticated'\n else:\n return 'anonymous'\n", "path": "gratipay/security/user.py"}], "after_files": [{"content": "\nfrom datetime import timedelta\nimport uuid\n\nfrom aspen.utils import utcnow\nfrom gratipay.models.participant import Participant\nfrom gratipay.utils import set_cookie\n\n\nSESSION = b'session'\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\n\nclass User(object):\n \"\"\"Represent a user of our website.\n \"\"\"\n\n participant = None\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_session_token(cls, token):\n \"\"\"Find a participant based on token and return a User.\n \"\"\"\n self = cls()\n self.participant = Participant.from_session_token(token)\n return self\n\n @classmethod\n def from_api_key(cls, api_key):\n \"\"\"Find a participant based on token and return a User.\n \"\"\"\n self = cls()\n self.participant = Participant.from_api_key(api_key)\n return self\n\n @classmethod\n def from_username(cls, username):\n \"\"\"Find a participant based on username and return a User.\n \"\"\"\n self = cls()\n self.participant = Participant.from_username(username)\n return self\n\n def __str__(self):\n if self.participant is None:\n out = '<Anonymous>'\n else:\n out = '<User: %s>' % self.participant.username\n return out\n __repr__ = __str__\n\n\n # Authentication Helpers\n # ======================\n\n def sign_in(self, cookies):\n \"\"\"Start a new session for the user.\n \"\"\"\n token = uuid.uuid4().hex\n expires = utcnow() + SESSION_TIMEOUT\n self.participant.update_session(token, expires)\n set_cookie(cookies, SESSION, token, expires)\n\n def keep_signed_in(self, cookies):\n \"\"\"Extend the user's current session.\n \"\"\"\n new_expires = utcnow() + SESSION_TIMEOUT\n if new_expires - self.participant.session_expires > SESSION_REFRESH:\n self.participant.set_session_expires(new_expires)\n token = self.participant.session_token\n set_cookie(cookies, SESSION, token, expires=new_expires)\n\n def sign_out(self, cookies):\n \"\"\"End the user's current session.\n \"\"\"\n self.participant.update_session(None, None)\n self.participant = None\n set_cookie(cookies, SESSION, '')\n\n\n # Roles\n # =====\n\n @property\n def ADMIN(self):\n return not self.ANON and self.participant.is_admin\n\n @property\n def ANON(self):\n return self.participant is None\n\n def get_highest_role(self, owner):\n \"\"\"Return a string representing the highest role this user has.\n\n :param string owner: the username of the owner of the resource we're\n concerned with, or None\n\n \"\"\"\n def is_owner():\n if self.participant is not None:\n if owner is not None:\n if self.participant.username == owner:\n return True\n return False\n\n if self.ADMIN:\n return 'admin'\n elif is_owner():\n return 'owner'\n elif not self.ANON:\n return 'authenticated'\n else:\n return 'anonymous'\n", "path": "gratipay/security/user.py"}]}
| 1,586 | 145 |
gh_patches_debug_2497
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-agent-1047
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows agent connection error
agent version: 4.4
OS: Windows 2008 Standard, SP2.
case: https://datadog.desk.com/agent/case/11902
- log snippet:
> 2014-06-24 13:45:04 Eastern Daylight Time | ERROR | forwarder(ddagent.pyc:240) | Response: HTTPResponse(_body=None,buffer=None,code=599,effective_url='https://app.datadoghq.com/intake?api_key=#################',error=gaierror(11001, 'getaddrinfo failed'),headers={},reason='Unknown',request=<tornado.httpclient.HTTPRequest object at 0x01212170>,request_time=0.018000125885009766,time_info={})
>
> 2014-06-24 13:45:04 Eastern Daylight Time | WARNING | transaction(transaction.pyc:213) | Transaction 4 in error (5 errors), it will be replayed after 2014-06-24 13:46:34
>
> 2014-06-24 13:45:05 Eastern Daylight Time | INFO | win32.agent(agent.pyc:91) | pup has died. Restarting...
>
> 2014-06-24 13:45:05 Eastern Daylight Time | ERROR | forwarder(ddagent.pyc:240) | Response: HTTPResponse(_body=None,buffer=None,code=599,effective_url='https://app.datadoghq.com/api/v1/series/?api_key=################',error=gaierror(11001, 'getaddrinfo failed`
- able to TCP connection (80 and 443) to app.datadoghq.com
EDIT:
> telnet app.datadoghq.com 80
> telnet app.datadoghq.com 443
- from agent python shell:
> print socket.getaddrinfo('app.datadoghq.com',443)
> Traceback (most recent call last):
> File "shell.py", line 13, in shell
> File "<string>", line 1, in <module>
> gaierror: [Errno 11001] getaddrinfo failed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import platform
2 import sys
3 from config import get_version
4 from jmxfetch import JMX_FETCH_JAR_NAME
5
6 try:
7 from setuptools import setup, find_packages
8
9 # required to build the cython extensions
10 from distutils.extension import Extension #pylint: disable=no-name-in-module
11
12 except ImportError:
13 from ez_setup import use_setuptools
14 use_setuptools()
15 from setuptools import setup, find_packages
16
17 # Extra arguments to pass to the setup function
18 extra_args = {}
19
20 # Prereqs of the build. Won't get installed when deploying the egg.
21 setup_requires = [
22 ]
23
24 # Prereqs of the install. Will install when deploying the egg.
25 install_requires=[
26 ]
27
28 if sys.platform == 'win32':
29 from glob import glob
30 import py2exe
31 install_requires.extend([
32 'tornado==3.0.1',
33 'pywin32==217',
34 'wmi==1.4.9',
35 'simplejson==2.6.1',
36 'mysql-python==1.2.3',
37 'pymongo==2.3',
38 'pg8000',
39 'python-memcached==1.48',
40 'adodbapi'
41 'elementtree',
42 'pycurl',
43 'pymysql',
44 'psutil',
45 'redis',
46 'requests',
47 'httplib2==0.9',
48 ])
49
50 # Modules to force-include in the exe
51 include_modules = [
52 # 3p
53 'win32service',
54 'win32serviceutil',
55 'win32event',
56 'simplejson',
57 'adodbapi',
58 'elementtree.ElementTree',
59 'pycurl',
60 'tornado.curl_httpclient',
61 'pymongo',
62 'pymysql',
63 'psutil',
64 'pg8000',
65 'redis',
66 'requests',
67
68 # agent
69 'checks.services_checks',
70 'httplib2',
71
72 # pup
73 'pup',
74 'pup.pup',
75 'tornado.websocket',
76 'tornado.web',
77 'tornado.ioloop',
78 ]
79
80 class Target(object):
81 def __init__(self, **kw):
82 self.__dict__.update(kw)
83 self.version = get_version()
84 self.company_name = 'Datadog, Inc.'
85 self.copyright = 'Copyright 2013 Datadog, Inc.'
86 self.cmdline_style = 'pywin32'
87
88 agent_svc = Target(name='Datadog Agent', modules='win32.agent', dest_base='ddagent')
89
90 extra_args = {
91 'options': {
92 'py2exe': {
93 'includes': ','.join(include_modules),
94 'optimize': 0,
95 'compressed': True,
96 'bundle_files': 3,
97 },
98 },
99 'console': ['win32\shell.py'],
100 'service': [agent_svc],
101 'windows': [{'script': 'win32\gui.py',
102 'dest_base': "agent-manager",
103 'uac_info': "requireAdministrator", # The manager needs to be administrator to stop/start the service
104 'icon_resources': [(1, r"packaging\datadog-agent\win32\install_files\dd_agent_win_256.ico")],
105 }],
106 'data_files': [
107 ("Microsoft.VC90.CRT", glob(r'C:\Python27\redist\*.*')),
108 ('pup', glob('pup/pup.html')),
109 ('pup', glob('pup/status.html')),
110 ('pup/static', glob('pup/static/*.*')),
111 ('jmxfetch', glob('checks/libs/%s' % JMX_FETCH_JAR_NAME)),
112 ],
113 }
114
115 setup(
116 name='datadog-agent',
117 version=get_version(),
118 description="DevOps' best friend",
119 author='DataDog',
120 author_email='[email protected]',
121 url='http://www.datadoghq.com',
122 install_requires=install_requires,
123 setup_requires=setup_requires,
124 packages=find_packages(exclude=['ez_setup']),
125 include_package_data=True,
126 test_suite='nose.collector',
127 zip_safe=False,
128 **extra_args
129 )
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -94,6 +94,7 @@
'optimize': 0,
'compressed': True,
'bundle_files': 3,
+ 'dll_excludes': [ "IPHLPAPI.DLL", "NSI.dll", "WINNSI.DLL", "WTSAPI32.dll"],
},
},
'console': ['win32\shell.py'],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -94,6 +94,7 @@\n 'optimize': 0,\n 'compressed': True,\n 'bundle_files': 3,\n+ 'dll_excludes': [ \"IPHLPAPI.DLL\", \"NSI.dll\", \"WINNSI.DLL\", \"WTSAPI32.dll\"],\n },\n },\n 'console': ['win32\\shell.py'],\n", "issue": "Windows agent connection error\nagent version: 4.4\nOS: Windows 2008 Standard, SP2.\ncase: https://datadog.desk.com/agent/case/11902\n- log snippet:\n\n> 2014-06-24 13:45:04 Eastern Daylight Time | ERROR | forwarder(ddagent.pyc:240) | Response: HTTPResponse(_body=None,buffer=None,code=599,effective_url='https://app.datadoghq.com/intake?api_key=#################',error=gaierror(11001, 'getaddrinfo failed'),headers={},reason='Unknown',request=<tornado.httpclient.HTTPRequest object at 0x01212170>,request_time=0.018000125885009766,time_info={})\n> \n> 2014-06-24 13:45:04 Eastern Daylight Time | WARNING | transaction(transaction.pyc:213) | Transaction 4 in error (5 errors), it will be replayed after 2014-06-24 13:46:34\n> \n> 2014-06-24 13:45:05 Eastern Daylight Time | INFO | win32.agent(agent.pyc:91) | pup has died. Restarting...\n> \n> 2014-06-24 13:45:05 Eastern Daylight Time | ERROR | forwarder(ddagent.pyc:240) | Response: HTTPResponse(_body=None,buffer=None,code=599,effective_url='https://app.datadoghq.com/api/v1/series/?api_key=################',error=gaierror(11001, 'getaddrinfo failed`\n- able to TCP connection (80 and 443) to app.datadoghq.com\n EDIT:\n \n > telnet app.datadoghq.com 80\n > telnet app.datadoghq.com 443\n- from agent python shell:\n \n > print socket.getaddrinfo('app.datadoghq.com',443)\n > Traceback (most recent call last):\n > File \"shell.py\", line 13, in shell\n > File \"<string>\", line 1, in <module>\n > gaierror: [Errno 11001] getaddrinfo failed\n\n", "before_files": [{"content": "import platform\nimport sys\nfrom config import get_version\nfrom jmxfetch import JMX_FETCH_JAR_NAME\n\ntry:\n from setuptools import setup, find_packages\n\n # required to build the cython extensions\n from distutils.extension import Extension #pylint: disable=no-name-in-module\n\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\n# Extra arguments to pass to the setup function\nextra_args = {}\n\n# Prereqs of the build. Won't get installed when deploying the egg.\nsetup_requires = [\n]\n\n# Prereqs of the install. Will install when deploying the egg.\ninstall_requires=[\n]\n\nif sys.platform == 'win32':\n from glob import glob\n import py2exe\n install_requires.extend([\n 'tornado==3.0.1',\n 'pywin32==217',\n 'wmi==1.4.9',\n 'simplejson==2.6.1',\n 'mysql-python==1.2.3',\n 'pymongo==2.3',\n 'pg8000',\n 'python-memcached==1.48',\n 'adodbapi'\n 'elementtree',\n 'pycurl',\n 'pymysql',\n 'psutil',\n 'redis',\n 'requests',\n 'httplib2==0.9',\n ])\n\n # Modules to force-include in the exe\n include_modules = [\n # 3p\n 'win32service',\n 'win32serviceutil',\n 'win32event',\n 'simplejson',\n 'adodbapi',\n 'elementtree.ElementTree',\n 'pycurl',\n 'tornado.curl_httpclient',\n 'pymongo',\n 'pymysql',\n 'psutil',\n 'pg8000',\n 'redis',\n 'requests',\n\n # agent\n 'checks.services_checks',\n 'httplib2',\n\n # pup\n 'pup',\n 'pup.pup',\n 'tornado.websocket',\n 'tornado.web',\n 'tornado.ioloop',\n ]\n\n class Target(object):\n def __init__(self, **kw):\n self.__dict__.update(kw) \n self.version = get_version()\n self.company_name = 'Datadog, Inc.'\n self.copyright = 'Copyright 2013 Datadog, Inc.'\n self.cmdline_style = 'pywin32'\n\n agent_svc = Target(name='Datadog Agent', modules='win32.agent', dest_base='ddagent')\n\n extra_args = {\n 'options': {\n 'py2exe': {\n 'includes': ','.join(include_modules),\n 'optimize': 0,\n 'compressed': True,\n 'bundle_files': 3,\n },\n },\n 'console': ['win32\\shell.py'],\n 'service': [agent_svc],\n 'windows': [{'script': 'win32\\gui.py',\n 'dest_base': \"agent-manager\",\n 'uac_info': \"requireAdministrator\", # The manager needs to be administrator to stop/start the service\n 'icon_resources': [(1, r\"packaging\\datadog-agent\\win32\\install_files\\dd_agent_win_256.ico\")],\n }],\n 'data_files': [\n (\"Microsoft.VC90.CRT\", glob(r'C:\\Python27\\redist\\*.*')),\n ('pup', glob('pup/pup.html')),\n ('pup', glob('pup/status.html')),\n ('pup/static', glob('pup/static/*.*')),\n ('jmxfetch', glob('checks/libs/%s' % JMX_FETCH_JAR_NAME)),\n ],\n }\n\nsetup(\n name='datadog-agent',\n version=get_version(),\n description=\"DevOps' best friend\",\n author='DataDog',\n author_email='[email protected]',\n url='http://www.datadoghq.com',\n install_requires=install_requires,\n setup_requires=setup_requires,\n packages=find_packages(exclude=['ez_setup']),\n include_package_data=True,\n test_suite='nose.collector',\n zip_safe=False,\n **extra_args\n)\n", "path": "setup.py"}], "after_files": [{"content": "import platform\nimport sys\nfrom config import *\nfrom jmxfetch import JMX_FETCH_JAR_NAME\n\ntry:\n from setuptools import setup, find_packages\n\n # required to build the cython extensions\n from distutils.extension import Extension #pylint: disable=no-name-in-module\n\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\n# Extra arguments to pass to the setup function\nextra_args = {}\n\n# Prereqs of the build. Won't get installed when deploying the egg.\nsetup_requires = [\n]\n\n# Prereqs of the install. Will install when deploying the egg.\ninstall_requires=[\n]\n\nif sys.platform == 'win32':\n from glob import glob\n import py2exe\n install_requires.extend([\n 'tornado==3.0.1',\n 'pywin32==217',\n 'wmi==1.4.9',\n 'simplejson==2.6.1',\n 'mysql-python==1.2.3',\n 'pymongo==2.3',\n 'pg8000',\n 'python-memcached==1.48',\n 'adodbapi'\n 'elementtree',\n 'pycurl',\n 'pymysql',\n 'psutil',\n 'redis',\n 'requests',\n 'httplib2==0.9',\n ])\n\n # Modules to force-include in the exe\n include_modules = [\n # 3p\n 'win32service',\n 'win32serviceutil',\n 'win32event',\n 'simplejson',\n 'adodbapi',\n 'elementtree.ElementTree',\n 'pycurl',\n 'tornado.curl_httpclient',\n 'pymongo',\n 'pymysql',\n 'psutil',\n 'pg8000',\n 'redis',\n 'requests',\n\n # agent\n 'checks.services_checks',\n 'httplib2',\n\n # pup\n 'pup',\n 'pup.pup',\n 'tornado.websocket',\n 'tornado.web',\n 'tornado.ioloop',\n ]\n\n class Target(object):\n def __init__(self, **kw):\n self.__dict__.update(kw) \n self.version = get_version()\n self.company_name = 'Datadog, Inc.'\n self.copyright = 'Copyright 2013 Datadog, Inc.'\n self.cmdline_style = 'pywin32'\n\n agent_svc = Target(name='Datadog Agent', modules='win32.agent', dest_base='ddagent')\n\n extra_args = {\n 'options': {\n 'py2exe': {\n 'includes': ','.join(include_modules),\n 'optimize': 0,\n 'compressed': True,\n 'bundle_files': 3,\n 'dll_excludes': [ \"IPHLPAPI.DLL\", \"NSI.dll\", \"WINNSI.DLL\", \"WTSAPI32.dll\"],\n },\n },\n 'console': ['win32\\shell.py'],\n 'service': [agent_svc],\n 'windows': [{'script': 'win32\\gui.py',\n 'dest_base': \"agent-manager\",\n 'uac_info': \"requireAdministrator\", # The manager needs to be administrator to stop/start the service\n 'icon_resources': [(1, r\"packaging\\datadog-agent\\win32\\install_files\\dd_agent_win_256.ico\")],\n }],\n 'data_files': [\n (\"Microsoft.VC90.CRT\", glob(r'C:\\Python27\\redist\\*.*')),\n ('pup', glob('pup/pup.html')),\n ('pup', glob('pup/status.html')),\n ('pup/static', glob('pup/static/*.*')),\n ('jmxfetch', glob('checks/libs/%s' % JMX_FETCH_JAR_NAME)),\n ],\n }\n\nsetup(\n name='datadog-agent',\n version=get_version(),\n description=\"DevOps' best friend\",\n author='DataDog',\n author_email='[email protected]',\n url='http://www.datadoghq.com',\n install_requires=install_requires,\n setup_requires=setup_requires,\n packages=find_packages(exclude=['ez_setup']),\n include_package_data=True,\n test_suite='nose.collector',\n zip_safe=False,\n **extra_args\n)\n", "path": "setup.py"}]}
| 2,032 | 106 |
gh_patches_debug_14055
|
rasdani/github-patches
|
git_diff
|
spack__spack-12932
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issue: py-adios
### Steps to reproduce the issue
```console
$ spack install -j 64 py-adios ^[email protected]:
```
The result is lots of errors like:
```adios_mpi.cpp:47080:21: error: 'PyThreadState' {aka 'struct _ts'} has no member named 'exc_type'; did you mean 'curexc_type'?```
And similar.
### Platform and user environment
```commandline
$ uname -a
Linux quokka1.ccs.ornl.gov 4.14.0-115.el7a.aarch64 #1 SMP Tue Sep 25 12:32:51 EDT 2018 aarch64 aarch64 aarch64 GNU/Linux
```
### Additional information
py-adios includes a file (adios_mpi.cpp) which was generated using Cython 0.28.2. This version of Cython generates code that won't compile against the Python 3.7 C API. (This was fixed in the 0.29 release.) The Adios developers have been notified (https://github.com/ornladios/ADIOS/issues/202).
I think the workaround is to add a ```depends_on()``` line for python <=3.6.x. If/when the issue is fixed in Adios, we can add a ```when``` clause to the dependency. I'll issue a pull request shortly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/py-adios/package.py`
Content:
```
1 # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class PyAdios(PythonPackage):
10 """NumPy bindings of ADIOS1"""
11
12 homepage = "https://www.olcf.ornl.gov/center-projects/adios/"
13 url = "https://github.com/ornladios/ADIOS/archive/v1.12.0.tar.gz"
14 git = "https://github.com/ornladios/ADIOS.git"
15
16 maintainers = ['ax3l']
17
18 version('develop', branch='master')
19 version('1.13.0', '68af36b821debbdf4748b20320a990ce')
20 version('1.12.0', '84a1c71b6698009224f6f748c5257fc9')
21 version('1.11.1', '5639bfc235e50bf17ba9dafb14ea4185')
22 version('1.11.0', '5eead5b2ccf962f5e6d5f254d29d5238')
23 version('1.10.0', 'eff450a4c0130479417cfd63186957f3')
24 version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')
25
26 variant('mpi', default=True,
27 description='Enable MPI support')
28
29 for v in ['1.9.0', '1.10.0', '1.11.0', '1.11.1', '1.12.0', '1.13.0',
30 'develop']:
31 depends_on('adios@{0} ~mpi'.format(v),
32 when='@{0} ~mpi'.format(v),
33 type=['build', 'link', 'run'])
34 depends_on('adios@{0} +mpi'.format(v),
35 when='@{0} +mpi'.format(v),
36 type=['build', 'link', 'run'])
37
38 depends_on('py-numpy', type=['build', 'run'])
39 depends_on('mpi', when='+mpi')
40 depends_on('py-mpi4py', type=['run'], when='+mpi')
41 depends_on('py-cython', type=['build'])
42
43 phases = ['build_clib', 'install']
44 build_directory = 'wrappers/numpy'
45
46 def setup_file(self):
47 """Returns the name of the setup file to use."""
48 if '+mpi' in self.spec:
49 return 'setup_mpi.py'
50 else:
51 return 'setup.py'
52
53 def build_clib(self, spec, prefix):
54 # calls: make [MPI=y] python
55 args = ''
56 if '+mpi' in self.spec:
57 args = 'MPI=y '
58 args += 'python'
59 with working_dir(self.build_directory):
60 make(args)
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/var/spack/repos/builtin/packages/py-adios/package.py b/var/spack/repos/builtin/packages/py-adios/package.py
--- a/var/spack/repos/builtin/packages/py-adios/package.py
+++ b/var/spack/repos/builtin/packages/py-adios/package.py
@@ -35,6 +35,14 @@
when='@{0} +mpi'.format(v),
type=['build', 'link', 'run'])
+ # NOTE: this dependency is a work-around for a bug in Adios itself.
+ # Specifically, Adios uses code that was generated by Cython 0.28.2.
+ # This code won't compile against the Python 3.7 C API.
+ # See https://github.com/ornladios/ADIOS/issues/202 and
+ # the first entry under "Bug Fixes" at
+ # https://github.com/cython/cython/blob/0.29.x/CHANGES.rst
+ depends_on('python@:3.6')
+
depends_on('py-numpy', type=['build', 'run'])
depends_on('mpi', when='+mpi')
depends_on('py-mpi4py', type=['run'], when='+mpi')
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/py-adios/package.py b/var/spack/repos/builtin/packages/py-adios/package.py\n--- a/var/spack/repos/builtin/packages/py-adios/package.py\n+++ b/var/spack/repos/builtin/packages/py-adios/package.py\n@@ -35,6 +35,14 @@\n when='@{0} +mpi'.format(v),\n type=['build', 'link', 'run'])\n \n+ # NOTE: this dependency is a work-around for a bug in Adios itself.\n+ # Specifically, Adios uses code that was generated by Cython 0.28.2.\n+ # This code won't compile against the Python 3.7 C API.\n+ # See https://github.com/ornladios/ADIOS/issues/202 and\n+ # the first entry under \"Bug Fixes\" at\n+ # https://github.com/cython/cython/blob/0.29.x/CHANGES.rst\n+ depends_on('python@:3.6')\n+\n depends_on('py-numpy', type=['build', 'run'])\n depends_on('mpi', when='+mpi')\n depends_on('py-mpi4py', type=['run'], when='+mpi')\n", "issue": "Installation issue: py-adios\n### Steps to reproduce the issue\r\n```console\r\n$ spack install -j 64 py-adios ^[email protected]:\r\n```\r\n\r\nThe result is lots of errors like:\r\n```adios_mpi.cpp:47080:21: error: 'PyThreadState' {aka 'struct _ts'} has no member named 'exc_type'; did you mean 'curexc_type'?```\r\n\r\nAnd similar.\r\n\r\n### Platform and user environment\r\n```commandline\r\n$ uname -a\r\nLinux quokka1.ccs.ornl.gov 4.14.0-115.el7a.aarch64 #1 SMP Tue Sep 25 12:32:51 EDT 2018 aarch64 aarch64 aarch64 GNU/Linux\r\n``` \r\n\r\n### Additional information\r\npy-adios includes a file (adios_mpi.cpp) which was generated using Cython 0.28.2. This version of Cython generates code that won't compile against the Python 3.7 C API. (This was fixed in the 0.29 release.) The Adios developers have been notified (https://github.com/ornladios/ADIOS/issues/202).\r\n\r\nI think the workaround is to add a ```depends_on()``` line for python <=3.6.x. If/when the issue is fixed in Adios, we can add a ```when``` clause to the dependency. I'll issue a pull request shortly.\r\n\n", "before_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyAdios(PythonPackage):\n \"\"\"NumPy bindings of ADIOS1\"\"\"\n\n homepage = \"https://www.olcf.ornl.gov/center-projects/adios/\"\n url = \"https://github.com/ornladios/ADIOS/archive/v1.12.0.tar.gz\"\n git = \"https://github.com/ornladios/ADIOS.git\"\n\n maintainers = ['ax3l']\n\n version('develop', branch='master')\n version('1.13.0', '68af36b821debbdf4748b20320a990ce')\n version('1.12.0', '84a1c71b6698009224f6f748c5257fc9')\n version('1.11.1', '5639bfc235e50bf17ba9dafb14ea4185')\n version('1.11.0', '5eead5b2ccf962f5e6d5f254d29d5238')\n version('1.10.0', 'eff450a4c0130479417cfd63186957f3')\n version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')\n\n variant('mpi', default=True,\n description='Enable MPI support')\n\n for v in ['1.9.0', '1.10.0', '1.11.0', '1.11.1', '1.12.0', '1.13.0',\n 'develop']:\n depends_on('adios@{0} ~mpi'.format(v),\n when='@{0} ~mpi'.format(v),\n type=['build', 'link', 'run'])\n depends_on('adios@{0} +mpi'.format(v),\n when='@{0} +mpi'.format(v),\n type=['build', 'link', 'run'])\n\n depends_on('py-numpy', type=['build', 'run'])\n depends_on('mpi', when='+mpi')\n depends_on('py-mpi4py', type=['run'], when='+mpi')\n depends_on('py-cython', type=['build'])\n\n phases = ['build_clib', 'install']\n build_directory = 'wrappers/numpy'\n\n def setup_file(self):\n \"\"\"Returns the name of the setup file to use.\"\"\"\n if '+mpi' in self.spec:\n return 'setup_mpi.py'\n else:\n return 'setup.py'\n\n def build_clib(self, spec, prefix):\n # calls: make [MPI=y] python\n args = ''\n if '+mpi' in self.spec:\n args = 'MPI=y '\n args += 'python'\n with working_dir(self.build_directory):\n make(args)\n", "path": "var/spack/repos/builtin/packages/py-adios/package.py"}], "after_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyAdios(PythonPackage):\n \"\"\"NumPy bindings of ADIOS1\"\"\"\n\n homepage = \"https://www.olcf.ornl.gov/center-projects/adios/\"\n url = \"https://github.com/ornladios/ADIOS/archive/v1.12.0.tar.gz\"\n git = \"https://github.com/ornladios/ADIOS.git\"\n\n maintainers = ['ax3l']\n\n version('develop', branch='master')\n version('1.13.0', '68af36b821debbdf4748b20320a990ce')\n version('1.12.0', '84a1c71b6698009224f6f748c5257fc9')\n version('1.11.1', '5639bfc235e50bf17ba9dafb14ea4185')\n version('1.11.0', '5eead5b2ccf962f5e6d5f254d29d5238')\n version('1.10.0', 'eff450a4c0130479417cfd63186957f3')\n version('1.9.0', '310ff02388bbaa2b1c1710ee970b5678')\n\n variant('mpi', default=True,\n description='Enable MPI support')\n\n for v in ['1.9.0', '1.10.0', '1.11.0', '1.11.1', '1.12.0', '1.13.0',\n 'develop']:\n depends_on('adios@{0} ~mpi'.format(v),\n when='@{0} ~mpi'.format(v),\n type=['build', 'link', 'run'])\n depends_on('adios@{0} +mpi'.format(v),\n when='@{0} +mpi'.format(v),\n type=['build', 'link', 'run'])\n\n # NOTE: this dependency is a work-around for a bug in Adios itself.\n # Specifically, Adios uses code that was generated by Cython 0.28.2.\n # This code won't compile against the Python 3.7 C API.\n # See https://github.com/ornladios/ADIOS/issues/202 and\n # the first entry under \"Bug Fixes\" at\n # https://github.com/cython/cython/blob/0.29.x/CHANGES.rst\n depends_on('python@:3.6')\n\n depends_on('py-numpy', type=['build', 'run'])\n depends_on('mpi', when='+mpi')\n depends_on('py-mpi4py', type=['run'], when='+mpi')\n depends_on('py-cython', type=['build'])\n\n phases = ['build_clib', 'install']\n build_directory = 'wrappers/numpy'\n\n def setup_file(self):\n \"\"\"Returns the name of the setup file to use.\"\"\"\n if '+mpi' in self.spec:\n return 'setup_mpi.py'\n else:\n return 'setup.py'\n\n def build_clib(self, spec, prefix):\n # calls: make [MPI=y] python\n args = ''\n if '+mpi' in self.spec:\n args = 'MPI=y '\n args += 'python'\n with working_dir(self.build_directory):\n make(args)\n", "path": "var/spack/repos/builtin/packages/py-adios/package.py"}]}
| 1,459 | 278 |
gh_patches_debug_11980
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-1659
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update elasticsearch-dsl to 5.1.0
There's a new version of [elasticsearch-dsl](https://pypi.python.org/pypi/elasticsearch-dsl) available.
You are currently using **5.0.0**. I have updated it to **5.1.0**
These links might come in handy: <a href="http://pypi.python.org/pypi/elasticsearch-dsl">PyPI</a> | <a href="https://pyup.io/changelogs/elasticsearch-dsl/">Changelog</a> | <a href="https://github.com/elasticsearch/elasticsearch-dsl-py">Repo</a>
### Changelog
>
>### 5.1.0
>------------------
> * Renamed ``Result`` and ``ResultMeta`` to ``Hit`` and ``HitMeta`` respectively
> * ``Response`` now stores ``Search`` which it gets as first arg to ``__init__``
> * aggregation results are now wrapped in classes and properly deserialized
> * ``Date`` fields now allow for numerical timestamps in the java format (in millis)
> * Added API documentation
> * replaced generated classes with manually created
*Got merge conflicts? Close this PR and delete the branch. I'll create a new PR for you.*
Happy merging! 🤖
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/search.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import urllib.parse
14
15 import certifi
16 import elasticsearch
17 import venusian
18
19 from elasticsearch_dsl import Index
20
21
22 def doc_type(cls):
23 def callback(scanner, _name, item):
24 types_ = scanner.config.registry.setdefault("search.doc_types", set())
25 types_.add(item)
26
27 venusian.attach(cls, callback)
28
29 return cls
30
31
32 def get_index(name, doc_types, *, using, shards=1, replicas=0, interval="1s"):
33 index = Index(name, using=using)
34 for doc_type in doc_types:
35 index.doc_type(doc_type)
36 index.settings(
37 number_of_shards=shards,
38 number_of_replicas=replicas,
39 refresh_interval=interval,
40 )
41 return index
42
43
44 def es(request):
45 client = request.registry["elasticsearch.client"]
46 doc_types = request.registry.get("search.doc_types", set())
47 index_name = request.registry["elasticsearch.index"]
48 index = get_index(
49 index_name,
50 doc_types,
51 using=client,
52 shards=request.registry.get("elasticsearch.shards", 1),
53 replicas=request.registry.get("elasticsearch.replicas", 0),
54 )
55 return index.search()
56
57
58 def includeme(config):
59 p = urllib.parse.urlparse(config.registry.settings["elasticsearch.url"])
60 qs = urllib.parse.parse_qs(p.query)
61 config.registry["elasticsearch.client"] = elasticsearch.Elasticsearch(
62 [urllib.parse.urlunparse(p[:2] + ("",) * 4)],
63 verify_certs=True,
64 ca_certs=certifi.where(),
65 timeout=30,
66 retry_on_timeout=True,
67 )
68 config.registry["elasticsearch.index"] = p.path.strip("/")
69 config.registry["elasticsearch.shards"] = int(qs.get("shards", ["1"])[0])
70 config.registry["elasticsearch.replicas"] = \
71 int(qs.get("replicas", ["0"])[0])
72 config.add_request_method(es, name="es", reify=True)
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/search.py b/warehouse/search.py
--- a/warehouse/search.py
+++ b/warehouse/search.py
@@ -16,7 +16,7 @@
import elasticsearch
import venusian
-from elasticsearch_dsl import Index
+from elasticsearch_dsl import Index, serializer
def doc_type(cls):
@@ -64,6 +64,7 @@
ca_certs=certifi.where(),
timeout=30,
retry_on_timeout=True,
+ serializer=serializer.serializer,
)
config.registry["elasticsearch.index"] = p.path.strip("/")
config.registry["elasticsearch.shards"] = int(qs.get("shards", ["1"])[0])
|
{"golden_diff": "diff --git a/warehouse/search.py b/warehouse/search.py\n--- a/warehouse/search.py\n+++ b/warehouse/search.py\n@@ -16,7 +16,7 @@\n import elasticsearch\n import venusian\n \n-from elasticsearch_dsl import Index\n+from elasticsearch_dsl import Index, serializer\n \n \n def doc_type(cls):\n@@ -64,6 +64,7 @@\n ca_certs=certifi.where(),\n timeout=30,\n retry_on_timeout=True,\n+ serializer=serializer.serializer,\n )\n config.registry[\"elasticsearch.index\"] = p.path.strip(\"/\")\n config.registry[\"elasticsearch.shards\"] = int(qs.get(\"shards\", [\"1\"])[0])\n", "issue": "Update elasticsearch-dsl to 5.1.0\n\nThere's a new version of [elasticsearch-dsl](https://pypi.python.org/pypi/elasticsearch-dsl) available.\nYou are currently using **5.0.0**. I have updated it to **5.1.0**\n\n\n\nThese links might come in handy: <a href=\"http://pypi.python.org/pypi/elasticsearch-dsl\">PyPI</a> | <a href=\"https://pyup.io/changelogs/elasticsearch-dsl/\">Changelog</a> | <a href=\"https://github.com/elasticsearch/elasticsearch-dsl-py\">Repo</a> \n\n\n\n### Changelog\n> \n>### 5.1.0\n\n>------------------\n> * Renamed ``Result`` and ``ResultMeta`` to ``Hit`` and ``HitMeta`` respectively\n> * ``Response`` now stores ``Search`` which it gets as first arg to ``__init__``\n> * aggregation results are now wrapped in classes and properly deserialized\n> * ``Date`` fields now allow for numerical timestamps in the java format (in millis)\n> * Added API documentation\n> * replaced generated classes with manually created\n\n\n\n\n\n\n\n*Got merge conflicts? Close this PR and delete the branch. I'll create a new PR for you.*\n\nHappy merging! \ud83e\udd16\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport urllib.parse\n\nimport certifi\nimport elasticsearch\nimport venusian\n\nfrom elasticsearch_dsl import Index\n\n\ndef doc_type(cls):\n def callback(scanner, _name, item):\n types_ = scanner.config.registry.setdefault(\"search.doc_types\", set())\n types_.add(item)\n\n venusian.attach(cls, callback)\n\n return cls\n\n\ndef get_index(name, doc_types, *, using, shards=1, replicas=0, interval=\"1s\"):\n index = Index(name, using=using)\n for doc_type in doc_types:\n index.doc_type(doc_type)\n index.settings(\n number_of_shards=shards,\n number_of_replicas=replicas,\n refresh_interval=interval,\n )\n return index\n\n\ndef es(request):\n client = request.registry[\"elasticsearch.client\"]\n doc_types = request.registry.get(\"search.doc_types\", set())\n index_name = request.registry[\"elasticsearch.index\"]\n index = get_index(\n index_name,\n doc_types,\n using=client,\n shards=request.registry.get(\"elasticsearch.shards\", 1),\n replicas=request.registry.get(\"elasticsearch.replicas\", 0),\n )\n return index.search()\n\n\ndef includeme(config):\n p = urllib.parse.urlparse(config.registry.settings[\"elasticsearch.url\"])\n qs = urllib.parse.parse_qs(p.query)\n config.registry[\"elasticsearch.client\"] = elasticsearch.Elasticsearch(\n [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n verify_certs=True,\n ca_certs=certifi.where(),\n timeout=30,\n retry_on_timeout=True,\n )\n config.registry[\"elasticsearch.index\"] = p.path.strip(\"/\")\n config.registry[\"elasticsearch.shards\"] = int(qs.get(\"shards\", [\"1\"])[0])\n config.registry[\"elasticsearch.replicas\"] = \\\n int(qs.get(\"replicas\", [\"0\"])[0])\n config.add_request_method(es, name=\"es\", reify=True)\n", "path": "warehouse/search.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport urllib.parse\n\nimport certifi\nimport elasticsearch\nimport venusian\n\nfrom elasticsearch_dsl import Index, serializer\n\n\ndef doc_type(cls):\n def callback(scanner, _name, item):\n types_ = scanner.config.registry.setdefault(\"search.doc_types\", set())\n types_.add(item)\n\n venusian.attach(cls, callback)\n\n return cls\n\n\ndef get_index(name, doc_types, *, using, shards=1, replicas=0, interval=\"1s\"):\n index = Index(name, using=using)\n for doc_type in doc_types:\n index.doc_type(doc_type)\n index.settings(\n number_of_shards=shards,\n number_of_replicas=replicas,\n refresh_interval=interval,\n )\n return index\n\n\ndef es(request):\n client = request.registry[\"elasticsearch.client\"]\n doc_types = request.registry.get(\"search.doc_types\", set())\n index_name = request.registry[\"elasticsearch.index\"]\n index = get_index(\n index_name,\n doc_types,\n using=client,\n shards=request.registry.get(\"elasticsearch.shards\", 1),\n replicas=request.registry.get(\"elasticsearch.replicas\", 0),\n )\n return index.search()\n\n\ndef includeme(config):\n p = urllib.parse.urlparse(config.registry.settings[\"elasticsearch.url\"])\n qs = urllib.parse.parse_qs(p.query)\n config.registry[\"elasticsearch.client\"] = elasticsearch.Elasticsearch(\n [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n verify_certs=True,\n ca_certs=certifi.where(),\n timeout=30,\n retry_on_timeout=True,\n serializer=serializer.serializer,\n )\n config.registry[\"elasticsearch.index\"] = p.path.strip(\"/\")\n config.registry[\"elasticsearch.shards\"] = int(qs.get(\"shards\", [\"1\"])[0])\n config.registry[\"elasticsearch.replicas\"] = \\\n int(qs.get(\"replicas\", [\"0\"])[0])\n config.add_request_method(es, name=\"es\", reify=True)\n", "path": "warehouse/search.py"}]}
| 1,228 | 153 |
gh_patches_debug_638
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-2278
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.150
On the docket:
+ [x] Add support for Pip 23.3.1. #2276
+ [x] Support .egg-info dist metadata. #2264
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.149"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.149"
+__version__ = "2.1.150"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.149\"\n+__version__ = \"2.1.150\"\n", "issue": "Release 2.1.150\nOn the docket:\r\n+ [x] Add support for Pip 23.3.1. #2276\r\n+ [x] Support .egg-info dist metadata. #2264\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.149\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.150\"\n", "path": "pex/version.py"}]}
| 362 | 98 |
gh_patches_debug_2529
|
rasdani/github-patches
|
git_diff
|
systemd__mkosi-1847
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tput smam breaks build
Using latest on Debian Sid.
```
‣ Running finalize script…
‣ Creating tar archive /home/ander/Desktop/mkosi/tools/mkosi.workspace/.mkosi-tmp9zitpbja/staging/image.tar…
‣ /home/ander/Desktop/mkosi/tools/mkosi.output/image size is 1016.1M, consumes 1016.1M.
‣ "tput smam" returned non-zero exit code 1.
make: *** [Makefile:13: build] Error 1
```
`tput` in Debian do not understand `smam`?
```
$ tput smam; echo $?
1
```
Introduced in e651b88.
Ugly workaround:
```
$ ln -s /usr/bin/true ~/.local/bin/tput
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkosi/__main__.py`
Content:
```
1 # SPDX-License-Identifier: LGPL-2.1+
2 # PYTHON_ARGCOMPLETE_OK
3
4 import contextlib
5 import logging
6 import shutil
7 import subprocess
8 import sys
9 from collections.abc import Iterator
10
11 from mkosi import run_verb
12 from mkosi.config import MkosiConfigParser
13 from mkosi.log import ARG_DEBUG, log_setup
14 from mkosi.run import ensure_exc_info, run
15
16
17 @contextlib.contextmanager
18 def propagate_failed_return() -> Iterator[None]:
19 try:
20 yield
21 except SystemExit as e:
22 if ARG_DEBUG.get():
23 sys.excepthook(*ensure_exc_info())
24
25 sys.exit(e.code)
26 except KeyboardInterrupt:
27 if ARG_DEBUG.get():
28 sys.excepthook(*ensure_exc_info())
29 else:
30 logging.error("Interrupted")
31
32 sys.exit(1)
33 except subprocess.CalledProcessError as e:
34 # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those.
35 if ARG_DEBUG.get() and e.cmd and e.cmd[0] not in ("qemu", "ssh", "systemd-nspawn"):
36 sys.excepthook(*ensure_exc_info())
37
38 # We always log when subprocess.CalledProcessError is raised, so we don't log again here.
39 sys.exit(e.returncode)
40
41
42 @propagate_failed_return()
43 def main() -> None:
44 log_setup()
45 args, presets = MkosiConfigParser().parse()
46
47 if ARG_DEBUG.get():
48 logging.getLogger().setLevel(logging.DEBUG)
49
50 try:
51 run_verb(args, presets)
52 finally:
53 if sys.stderr.isatty() and shutil.which("tput"):
54 run(["tput", "cnorm"])
55 run(["tput", "smam"])
56
57
58 if __name__ == "__main__":
59 main()
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkosi/__main__.py b/mkosi/__main__.py
--- a/mkosi/__main__.py
+++ b/mkosi/__main__.py
@@ -51,8 +51,8 @@
run_verb(args, presets)
finally:
if sys.stderr.isatty() and shutil.which("tput"):
- run(["tput", "cnorm"])
- run(["tput", "smam"])
+ run(["tput", "cnorm"], check=False)
+ run(["tput", "smam"], check=False)
if __name__ == "__main__":
|
{"golden_diff": "diff --git a/mkosi/__main__.py b/mkosi/__main__.py\n--- a/mkosi/__main__.py\n+++ b/mkosi/__main__.py\n@@ -51,8 +51,8 @@\n run_verb(args, presets)\n finally:\n if sys.stderr.isatty() and shutil.which(\"tput\"):\n- run([\"tput\", \"cnorm\"])\n- run([\"tput\", \"smam\"])\n+ run([\"tput\", \"cnorm\"], check=False)\n+ run([\"tput\", \"smam\"], check=False)\n \n \n if __name__ == \"__main__\":\n", "issue": "tput smam breaks build\nUsing latest on Debian Sid.\r\n\r\n```\r\n\u2023 Running finalize script\u2026\r\n\u2023 Creating tar archive /home/ander/Desktop/mkosi/tools/mkosi.workspace/.mkosi-tmp9zitpbja/staging/image.tar\u2026\r\n\u2023 /home/ander/Desktop/mkosi/tools/mkosi.output/image size is 1016.1M, consumes 1016.1M.\r\n\u2023 \"tput smam\" returned non-zero exit code 1.\r\nmake: *** [Makefile:13: build] Error 1\r\n```\r\n\r\n`tput` in Debian do not understand `smam`?\r\n\r\n```\r\n$ tput smam; echo $?\r\n1\r\n```\r\n\r\nIntroduced in e651b88.\r\n\r\nUgly workaround:\r\n```\r\n$ ln -s /usr/bin/true ~/.local/bin/tput\r\n```\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n# PYTHON_ARGCOMPLETE_OK\n\nimport contextlib\nimport logging\nimport shutil\nimport subprocess\nimport sys\nfrom collections.abc import Iterator\n\nfrom mkosi import run_verb\nfrom mkosi.config import MkosiConfigParser\nfrom mkosi.log import ARG_DEBUG, log_setup\nfrom mkosi.run import ensure_exc_info, run\n\n\[email protected]\ndef propagate_failed_return() -> Iterator[None]:\n try:\n yield\n except SystemExit as e:\n if ARG_DEBUG.get():\n sys.excepthook(*ensure_exc_info())\n\n sys.exit(e.code)\n except KeyboardInterrupt:\n if ARG_DEBUG.get():\n sys.excepthook(*ensure_exc_info())\n else:\n logging.error(\"Interrupted\")\n\n sys.exit(1)\n except subprocess.CalledProcessError as e:\n # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those.\n if ARG_DEBUG.get() and e.cmd and e.cmd[0] not in (\"qemu\", \"ssh\", \"systemd-nspawn\"):\n sys.excepthook(*ensure_exc_info())\n\n # We always log when subprocess.CalledProcessError is raised, so we don't log again here.\n sys.exit(e.returncode)\n\n\n@propagate_failed_return()\ndef main() -> None:\n log_setup()\n args, presets = MkosiConfigParser().parse()\n\n if ARG_DEBUG.get():\n logging.getLogger().setLevel(logging.DEBUG)\n\n try:\n run_verb(args, presets)\n finally:\n if sys.stderr.isatty() and shutil.which(\"tput\"):\n run([\"tput\", \"cnorm\"])\n run([\"tput\", \"smam\"])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mkosi/__main__.py"}], "after_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n# PYTHON_ARGCOMPLETE_OK\n\nimport contextlib\nimport logging\nimport shutil\nimport subprocess\nimport sys\nfrom collections.abc import Iterator\n\nfrom mkosi import run_verb\nfrom mkosi.config import MkosiConfigParser\nfrom mkosi.log import ARG_DEBUG, log_setup\nfrom mkosi.run import ensure_exc_info, run\n\n\[email protected]\ndef propagate_failed_return() -> Iterator[None]:\n try:\n yield\n except SystemExit as e:\n if ARG_DEBUG.get():\n sys.excepthook(*ensure_exc_info())\n\n sys.exit(e.code)\n except KeyboardInterrupt:\n if ARG_DEBUG.get():\n sys.excepthook(*ensure_exc_info())\n else:\n logging.error(\"Interrupted\")\n\n sys.exit(1)\n except subprocess.CalledProcessError as e:\n # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those.\n if ARG_DEBUG.get() and e.cmd and e.cmd[0] not in (\"qemu\", \"ssh\", \"systemd-nspawn\"):\n sys.excepthook(*ensure_exc_info())\n\n # We always log when subprocess.CalledProcessError is raised, so we don't log again here.\n sys.exit(e.returncode)\n\n\n@propagate_failed_return()\ndef main() -> None:\n log_setup()\n args, presets = MkosiConfigParser().parse()\n\n if ARG_DEBUG.get():\n logging.getLogger().setLevel(logging.DEBUG)\n\n try:\n run_verb(args, presets)\n finally:\n if sys.stderr.isatty() and shutil.which(\"tput\"):\n run([\"tput\", \"cnorm\"], check=False)\n run([\"tput\", \"smam\"], check=False)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mkosi/__main__.py"}]}
| 948 | 136 |
gh_patches_debug_28259
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-302
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add option to pass additional dependencies to hooks
I am currently working on implementing this framework and one of the things I am trying to run is eslint. As part of that I have a number of plugins that are in my configuration file. I think that, rather than forcing anyone who is using plugins to create a new hook definition with a corresponding package.json it might be useful to add a global option to pass a list of dependencies in the configuration file.
For instance, something lilke this:
``` yaml
- repo: https://github.com/pre-commit/mirrors-eslint
sha: 135f285caf8e6e886b28c8e98fdff402b69c4490
hooks:
- id: eslint
language_version: '0.12.7'
dependencies: [eslint-plugin-react, eslint-plugin-html]
```
and have those dependencies installed into the generated environment for that language.
I am going to work on implementing this in my forked repo but would like feedback on whether this is a desired feature or any implementation advice on how best to facilitate this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/output.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import os
4 import subprocess
5 import sys
6
7 from pre_commit import color
8 from pre_commit import five
9
10
11 # TODO: smell: import side-effects
12 try:
13 if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)
14 raise OSError('Cannot determine width without TERM')
15 COLS = int(
16 subprocess.Popen(
17 ('tput', 'cols'), stdout=subprocess.PIPE,
18 ).communicate()[0] or
19 # Default in the case of no terminal
20 80
21 )
22 except OSError: # pragma: no cover (windows)
23 COLS = 80
24
25
26 def get_hook_message(
27 start,
28 postfix='',
29 end_msg=None,
30 end_len=0,
31 end_color=None,
32 use_color=None,
33 cols=COLS,
34 ):
35 """Prints a message for running a hook.
36
37 This currently supports three approaches:
38
39 # Print `start` followed by dots, leaving 6 characters at the end
40 >>> print_hook_message('start', end_len=6)
41 start...............................................................
42
43 # Print `start` followed by dots with the end message colored if coloring
44 # is specified and a newline afterwards
45 >>> print_hook_message(
46 'start',
47 end_msg='end',
48 end_color=color.RED,
49 use_color=True,
50 )
51 start...................................................................end
52
53 # Print `start` followed by dots, followed by the `postfix` message
54 # uncolored, followed by the `end_msg` colored if specified and a newline
55 # afterwards
56 >>> print_hook_message(
57 'start',
58 postfix='postfix ',
59 end_msg='end',
60 end_color=color.RED,
61 use_color=True,
62 )
63 start...........................................................postfix end
64 """
65 if bool(end_msg) == bool(end_len):
66 raise ValueError('Expected one of (`end_msg`, `end_len`)')
67 if end_msg is not None and (end_color is None or use_color is None):
68 raise ValueError(
69 '`end_color` and `use_color` are required with `end_msg`'
70 )
71
72 if end_len:
73 return start + '.' * (cols - len(start) - end_len - 1)
74 else:
75 return '{0}{1}{2}{3}\n'.format(
76 start,
77 '.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),
78 postfix,
79 color.format_color(end_msg, end_color, use_color),
80 )
81
82
83 stdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)
84
85
86 def sys_stdout_write_wrapper(s, stream=stdout_byte_stream):
87 stream.write(five.to_bytes(s))
88
```
Path: `pre_commit/languages/python.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import distutils.spawn
5 import os
6 import sys
7
8 import virtualenv
9
10 from pre_commit.languages import helpers
11 from pre_commit.util import clean_path_on_failure
12 from pre_commit.util import shell_escape
13
14
15 ENVIRONMENT_DIR = 'py_env'
16
17
18 class PythonEnv(helpers.Environment):
19 @property
20 def env_prefix(self):
21 return ". '{{prefix}}{0}activate' &&".format(
22 virtualenv.path_locations(
23 helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)
24 )[-1].rstrip(os.sep) + os.sep,
25 )
26
27
28 @contextlib.contextmanager
29 def in_env(repo_cmd_runner, language_version):
30 yield PythonEnv(repo_cmd_runner, language_version)
31
32
33 def norm_version(version):
34 version = os.path.expanduser(version)
35 if os.name == 'nt': # pragma: no cover (windows)
36 if not distutils.spawn.find_executable(version):
37 # expanduser introduces a leading slash
38 version = version.strip('\\')
39 # The default place for python on windows is:
40 # C:\PythonXX\python.exe
41 version = r'C:\{0}\python.exe'.format(version.replace('.', ''))
42 return version
43
44
45 def install_environment(
46 repo_cmd_runner,
47 version='default',
48 additional_dependencies=None,
49 ):
50 assert repo_cmd_runner.exists('setup.py')
51 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
52
53 # Install a virtualenv
54 with clean_path_on_failure(repo_cmd_runner.path(directory)):
55 venv_cmd = [
56 sys.executable, '-m', 'virtualenv',
57 '{{prefix}}{0}'.format(directory)
58 ]
59 if version != 'default':
60 venv_cmd.extend(['-p', norm_version(version)])
61 repo_cmd_runner.run(venv_cmd)
62 with in_env(repo_cmd_runner, version) as env:
63 env.run("cd '{prefix}' && pip install .")
64 if additional_dependencies:
65 env.run(
66 "cd '{prefix}' && pip install " +
67 ' '.join(
68 shell_escape(dep) for dep in additional_dependencies
69 )
70 )
71
72
73 def run_hook(repo_cmd_runner, hook, file_args):
74 with in_env(repo_cmd_runner, hook['language_version']) as env:
75 return helpers.run_hook(env, hook, file_args)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -31,15 +31,18 @@
def norm_version(version):
- version = os.path.expanduser(version)
if os.name == 'nt': # pragma: no cover (windows)
- if not distutils.spawn.find_executable(version):
- # expanduser introduces a leading slash
- version = version.strip('\\')
- # The default place for python on windows is:
- # C:\PythonXX\python.exe
- version = r'C:\{0}\python.exe'.format(version.replace('.', ''))
- return version
+ # Try looking up by name
+ if distutils.spawn.find_executable(version):
+ return version
+
+ # If it is in the form pythonx.x search in the default
+ # place on windows
+ if version.startswith('python'):
+ return r'C:\{0}\python.exe'.format(version.replace('.', ''))
+
+ # Otherwise assume it is a path
+ return os.path.expanduser(version)
def install_environment(
diff --git a/pre_commit/output.py b/pre_commit/output.py
--- a/pre_commit/output.py
+++ b/pre_commit/output.py
@@ -12,13 +12,14 @@
try:
if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)
raise OSError('Cannot determine width without TERM')
- COLS = int(
- subprocess.Popen(
- ('tput', 'cols'), stdout=subprocess.PIPE,
- ).communicate()[0] or
- # Default in the case of no terminal
- 80
- )
+ else: # pragma no cover (windows)
+ COLS = int(
+ subprocess.Popen(
+ ('tput', 'cols'), stdout=subprocess.PIPE,
+ ).communicate()[0] or
+ # Default in the case of no terminal
+ 80
+ )
except OSError: # pragma: no cover (windows)
COLS = 80
|
{"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -31,15 +31,18 @@\n \n \n def norm_version(version):\n- version = os.path.expanduser(version)\n if os.name == 'nt': # pragma: no cover (windows)\n- if not distutils.spawn.find_executable(version):\n- # expanduser introduces a leading slash\n- version = version.strip('\\\\')\n- # The default place for python on windows is:\n- # C:\\PythonXX\\python.exe\n- version = r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n- return version\n+ # Try looking up by name\n+ if distutils.spawn.find_executable(version):\n+ return version\n+\n+ # If it is in the form pythonx.x search in the default\n+ # place on windows\n+ if version.startswith('python'):\n+ return r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n+\n+ # Otherwise assume it is a path\n+ return os.path.expanduser(version)\n \n \n def install_environment(\ndiff --git a/pre_commit/output.py b/pre_commit/output.py\n--- a/pre_commit/output.py\n+++ b/pre_commit/output.py\n@@ -12,13 +12,14 @@\n try:\n if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)\n raise OSError('Cannot determine width without TERM')\n- COLS = int(\n- subprocess.Popen(\n- ('tput', 'cols'), stdout=subprocess.PIPE,\n- ).communicate()[0] or\n- # Default in the case of no terminal\n- 80\n- )\n+ else: # pragma no cover (windows)\n+ COLS = int(\n+ subprocess.Popen(\n+ ('tput', 'cols'), stdout=subprocess.PIPE,\n+ ).communicate()[0] or\n+ # Default in the case of no terminal\n+ 80\n+ )\n except OSError: # pragma: no cover (windows)\n COLS = 80\n", "issue": "Add option to pass additional dependencies to hooks\nI am currently working on implementing this framework and one of the things I am trying to run is eslint. As part of that I have a number of plugins that are in my configuration file. I think that, rather than forcing anyone who is using plugins to create a new hook definition with a corresponding package.json it might be useful to add a global option to pass a list of dependencies in the configuration file.\n\nFor instance, something lilke this:\n\n``` yaml\n- repo: https://github.com/pre-commit/mirrors-eslint\n sha: 135f285caf8e6e886b28c8e98fdff402b69c4490\n hooks:\n - id: eslint\n language_version: '0.12.7'\n dependencies: [eslint-plugin-react, eslint-plugin-html]\n```\n\nand have those dependencies installed into the generated environment for that language.\n\nI am going to work on implementing this in my forked repo but would like feedback on whether this is a desired feature or any implementation advice on how best to facilitate this.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport subprocess\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import five\n\n\n# TODO: smell: import side-effects\ntry:\n if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)\n raise OSError('Cannot determine width without TERM')\n COLS = int(\n subprocess.Popen(\n ('tput', 'cols'), stdout=subprocess.PIPE,\n ).communicate()[0] or\n # Default in the case of no terminal\n 80\n )\nexcept OSError: # pragma: no cover (windows)\n COLS = 80\n\n\ndef get_hook_message(\n start,\n postfix='',\n end_msg=None,\n end_len=0,\n end_color=None,\n use_color=None,\n cols=COLS,\n):\n \"\"\"Prints a message for running a hook.\n\n This currently supports three approaches:\n\n # Print `start` followed by dots, leaving 6 characters at the end\n >>> print_hook_message('start', end_len=6)\n start...............................................................\n\n # Print `start` followed by dots with the end message colored if coloring\n # is specified and a newline afterwards\n >>> print_hook_message(\n 'start',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...................................................................end\n\n # Print `start` followed by dots, followed by the `postfix` message\n # uncolored, followed by the `end_msg` colored if specified and a newline\n # afterwards\n >>> print_hook_message(\n 'start',\n postfix='postfix ',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...........................................................postfix end\n \"\"\"\n if bool(end_msg) == bool(end_len):\n raise ValueError('Expected one of (`end_msg`, `end_len`)')\n if end_msg is not None and (end_color is None or use_color is None):\n raise ValueError(\n '`end_color` and `use_color` are required with `end_msg`'\n )\n\n if end_len:\n return start + '.' * (cols - len(start) - end_len - 1)\n else:\n return '{0}{1}{2}{3}\\n'.format(\n start,\n '.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),\n postfix,\n color.format_color(end_msg, end_color, use_color),\n )\n\n\nstdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)\n\n\ndef sys_stdout_write_wrapper(s, stream=stdout_byte_stream):\n stream.write(five.to_bytes(s))\n", "path": "pre_commit/output.py"}, {"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\nimport sys\n\nimport virtualenv\n\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import shell_escape\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\nclass PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n return \". '{{prefix}}{0}activate' &&\".format(\n virtualenv.path_locations(\n helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)\n )[-1].rstrip(os.sep) + os.sep,\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version):\n yield PythonEnv(repo_cmd_runner, language_version)\n\n\ndef norm_version(version):\n version = os.path.expanduser(version)\n if os.name == 'nt': # pragma: no cover (windows)\n if not distutils.spawn.find_executable(version):\n # expanduser introduces a leading slash\n version = version.strip('\\\\')\n # The default place for python on windows is:\n # C:\\PythonXX\\python.exe\n version = r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n return version\n\n\ndef install_environment(\n repo_cmd_runner,\n version='default',\n additional_dependencies=None,\n):\n assert repo_cmd_runner.exists('setup.py')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(directory)):\n venv_cmd = [\n sys.executable, '-m', 'virtualenv',\n '{{prefix}}{0}'.format(directory)\n ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n with in_env(repo_cmd_runner, version) as env:\n env.run(\"cd '{prefix}' && pip install .\")\n if additional_dependencies:\n env.run(\n \"cd '{prefix}' && pip install \" +\n ' '.join(\n shell_escape(dep) for dep in additional_dependencies\n )\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner, hook['language_version']) as env:\n return helpers.run_hook(env, hook, file_args)\n", "path": "pre_commit/languages/python.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport subprocess\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import five\n\n\n# TODO: smell: import side-effects\ntry:\n if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)\n raise OSError('Cannot determine width without TERM')\n else: # pragma no cover (windows)\n COLS = int(\n subprocess.Popen(\n ('tput', 'cols'), stdout=subprocess.PIPE,\n ).communicate()[0] or\n # Default in the case of no terminal\n 80\n )\nexcept OSError: # pragma: no cover (windows)\n COLS = 80\n\n\ndef get_hook_message(\n start,\n postfix='',\n end_msg=None,\n end_len=0,\n end_color=None,\n use_color=None,\n cols=COLS,\n):\n \"\"\"Prints a message for running a hook.\n\n This currently supports three approaches:\n\n # Print `start` followed by dots, leaving 6 characters at the end\n >>> print_hook_message('start', end_len=6)\n start...............................................................\n\n # Print `start` followed by dots with the end message colored if coloring\n # is specified and a newline afterwards\n >>> print_hook_message(\n 'start',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...................................................................end\n\n # Print `start` followed by dots, followed by the `postfix` message\n # uncolored, followed by the `end_msg` colored if specified and a newline\n # afterwards\n >>> print_hook_message(\n 'start',\n postfix='postfix ',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...........................................................postfix end\n \"\"\"\n if bool(end_msg) == bool(end_len):\n raise ValueError('Expected one of (`end_msg`, `end_len`)')\n if end_msg is not None and (end_color is None or use_color is None):\n raise ValueError(\n '`end_color` and `use_color` are required with `end_msg`'\n )\n\n if end_len:\n return start + '.' * (cols - len(start) - end_len - 1)\n else:\n return '{0}{1}{2}{3}\\n'.format(\n start,\n '.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),\n postfix,\n color.format_color(end_msg, end_color, use_color),\n )\n\n\nstdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)\n\n\ndef sys_stdout_write_wrapper(s, stream=stdout_byte_stream):\n stream.write(five.to_bytes(s))\n", "path": "pre_commit/output.py"}, {"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\nimport sys\n\nimport virtualenv\n\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import shell_escape\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\nclass PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n return \". '{{prefix}}{0}activate' &&\".format(\n virtualenv.path_locations(\n helpers.environment_dir(ENVIRONMENT_DIR, self.language_version)\n )[-1].rstrip(os.sep) + os.sep,\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version):\n yield PythonEnv(repo_cmd_runner, language_version)\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n # Try looking up by name\n if distutils.spawn.find_executable(version):\n return version\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n return r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef install_environment(\n repo_cmd_runner,\n version='default',\n additional_dependencies=None,\n):\n assert repo_cmd_runner.exists('setup.py')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(directory)):\n venv_cmd = [\n sys.executable, '-m', 'virtualenv',\n '{{prefix}}{0}'.format(directory)\n ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n with in_env(repo_cmd_runner, version) as env:\n env.run(\"cd '{prefix}' && pip install .\")\n if additional_dependencies:\n env.run(\n \"cd '{prefix}' && pip install \" +\n ' '.join(\n shell_escape(dep) for dep in additional_dependencies\n )\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner, hook['language_version']) as env:\n return helpers.run_hook(env, hook, file_args)\n", "path": "pre_commit/languages/python.py"}]}
| 1,913 | 477 |
gh_patches_debug_18579
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-62
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove responder exception handling
Can hide problems, encourage bad coding practices.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/api.py`
Content:
```
1 """Defines the API class.
2
3 Copyright 2013 by Rackspace Hosting, Inc.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 """
18
19 import traceback
20
21 from .request import Request
22 from .response import Response
23 from . import responders
24 from .status_codes import *
25 from .api_helpers import *
26
27 from .http_error import HTTPError
28
29
30 class API(object):
31 """Provides routing and such for building a web service application
32
33 This class is the main entry point into a Falcon-based app. It provides a
34 callable WSGI interface and a simple routing engine based on URI templates.
35
36 """
37
38 __slots__ = ('_routes')
39
40 def __init__(self):
41 """Initialize default values"""
42 self._routes = []
43
44 def __call__(self, env, start_response):
45 """WSGI "app" method
46
47 Makes instances of API callable by any WSGI server. See also PEP 333.
48
49 Args:
50 env: A WSGI environment dictionary
51 start_response: A WSGI helper method for setting status and headers
52 on a response.
53
54 """
55
56 req = Request(env)
57 resp = Response()
58
59 responder, params = self._get_responder(req.path, req.method)
60
61 try:
62 responder(req, resp, **params)
63
64 except HTTPError as ex:
65 resp.status = ex.status
66 if ex.headers is not None:
67 resp.set_headers(ex.headers)
68
69 if req.client_accepts_json():
70 resp.body = ex.json()
71
72 except Exception as ex:
73 # Reset to a known state and respond with a generic error
74 req = Request(env)
75 resp = Response()
76
77 message = ['Responder raised ', ex.__class__.__name__]
78
79 details = str(ex)
80 if details:
81 message.append(': ')
82 message.append(details)
83
84 stack = traceback.format_exc()
85 message.append('\n')
86 message.append(stack)
87
88 req.log_error(''.join(message))
89 responders.server_error(req, resp)
90
91 #
92 # Set status and headers
93 #
94 use_body = not should_ignore_body(resp.status, req.method)
95 if use_body:
96 set_content_length(resp)
97
98 start_response(resp.status, resp._wsgi_headers())
99
100 # Return an iterable for the body, per the WSGI spec
101 if use_body:
102 return prepare_wsgi_content(resp)
103
104 # Default: return an empty body
105 return []
106
107 def add_route(self, uri_template, resource):
108 """Associate a URI path with a resource
109
110 Args:
111 uri_template: Relative URI template. Currently only Level 1
112 templates are supported. See also RFC 6570.
113 resource: Object which represents an HTTP/REST "resource". Falcon
114 will pass "GET" requests to on_get, "PUT" requests to on_put,
115 etc. If any HTTP methods are not supported by your resource,
116 simply don't define the corresponding request handlers, and
117 Falcon will do the right thing.
118
119 """
120
121 if not uri_template:
122 uri_template = '/'
123
124 path_template = compile_uri_template(uri_template)
125 method_map = create_http_method_map(resource)
126
127 # Insert at the head of the list in case we get duplicate
128 # adds (will cause the last one to win).
129 self._routes.insert(0, (path_template, method_map))
130
131 def _get_responder(self, path, method):
132 """Searches routes for a matching responder
133
134 Args:
135 path: URI path to search (without query stirng)
136 method: HTTP method (uppercase) requested
137 Returns:
138 A 2-member tuple, containing a responder callable and a dict
139 containing parsed path fields, if any were specified in
140 the matching route's URI template
141
142 """
143
144 for path_template, method_map in self._routes:
145 m = path_template.match(path)
146 if m:
147 params = m.groupdict()
148
149 try:
150 responder = method_map[method]
151 except KeyError:
152 responder = responders.bad_request
153
154 break
155 else:
156 responder = responders.path_not_found
157 params = {}
158
159 return (responder, params)
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/falcon/api.py b/falcon/api.py
--- a/falcon/api.py
+++ b/falcon/api.py
@@ -16,8 +16,6 @@
"""
-import traceback
-
from .request import Request
from .response import Response
from . import responders
@@ -69,25 +67,6 @@
if req.client_accepts_json():
resp.body = ex.json()
- except Exception as ex:
- # Reset to a known state and respond with a generic error
- req = Request(env)
- resp = Response()
-
- message = ['Responder raised ', ex.__class__.__name__]
-
- details = str(ex)
- if details:
- message.append(': ')
- message.append(details)
-
- stack = traceback.format_exc()
- message.append('\n')
- message.append(stack)
-
- req.log_error(''.join(message))
- responders.server_error(req, resp)
-
#
# Set status and headers
#
|
{"golden_diff": "diff --git a/falcon/api.py b/falcon/api.py\n--- a/falcon/api.py\n+++ b/falcon/api.py\n@@ -16,8 +16,6 @@\n \n \"\"\"\n \n-import traceback\n-\n from .request import Request\n from .response import Response\n from . import responders\n@@ -69,25 +67,6 @@\n if req.client_accepts_json():\n resp.body = ex.json()\n \n- except Exception as ex:\n- # Reset to a known state and respond with a generic error\n- req = Request(env)\n- resp = Response()\n-\n- message = ['Responder raised ', ex.__class__.__name__]\n-\n- details = str(ex)\n- if details:\n- message.append(': ')\n- message.append(details)\n-\n- stack = traceback.format_exc()\n- message.append('\\n')\n- message.append(stack)\n-\n- req.log_error(''.join(message))\n- responders.server_error(req, resp)\n-\n #\n # Set status and headers\n #\n", "issue": "Remove responder exception handling\nCan hide problems, encourage bad coding practices.\n\n", "before_files": [{"content": "\"\"\"Defines the API class.\n\nCopyright 2013 by Rackspace Hosting, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nimport traceback\n\nfrom .request import Request\nfrom .response import Response\nfrom . import responders\nfrom .status_codes import *\nfrom .api_helpers import *\n\nfrom .http_error import HTTPError\n\n\nclass API(object):\n \"\"\"Provides routing and such for building a web service application\n\n This class is the main entry point into a Falcon-based app. It provides a\n callable WSGI interface and a simple routing engine based on URI templates.\n\n \"\"\"\n\n __slots__ = ('_routes')\n\n def __init__(self):\n \"\"\"Initialize default values\"\"\"\n self._routes = []\n\n def __call__(self, env, start_response):\n \"\"\"WSGI \"app\" method\n\n Makes instances of API callable by any WSGI server. See also PEP 333.\n\n Args:\n env: A WSGI environment dictionary\n start_response: A WSGI helper method for setting status and headers\n on a response.\n\n \"\"\"\n\n req = Request(env)\n resp = Response()\n\n responder, params = self._get_responder(req.path, req.method)\n\n try:\n responder(req, resp, **params)\n\n except HTTPError as ex:\n resp.status = ex.status\n if ex.headers is not None:\n resp.set_headers(ex.headers)\n\n if req.client_accepts_json():\n resp.body = ex.json()\n\n except Exception as ex:\n # Reset to a known state and respond with a generic error\n req = Request(env)\n resp = Response()\n\n message = ['Responder raised ', ex.__class__.__name__]\n\n details = str(ex)\n if details:\n message.append(': ')\n message.append(details)\n\n stack = traceback.format_exc()\n message.append('\\n')\n message.append(stack)\n\n req.log_error(''.join(message))\n responders.server_error(req, resp)\n\n #\n # Set status and headers\n #\n use_body = not should_ignore_body(resp.status, req.method)\n if use_body:\n set_content_length(resp)\n\n start_response(resp.status, resp._wsgi_headers())\n\n # Return an iterable for the body, per the WSGI spec\n if use_body:\n return prepare_wsgi_content(resp)\n\n # Default: return an empty body\n return []\n\n def add_route(self, uri_template, resource):\n \"\"\"Associate a URI path with a resource\n\n Args:\n uri_template: Relative URI template. Currently only Level 1\n templates are supported. See also RFC 6570.\n resource: Object which represents an HTTP/REST \"resource\". Falcon\n will pass \"GET\" requests to on_get, \"PUT\" requests to on_put,\n etc. If any HTTP methods are not supported by your resource,\n simply don't define the corresponding request handlers, and\n Falcon will do the right thing.\n\n \"\"\"\n\n if not uri_template:\n uri_template = '/'\n\n path_template = compile_uri_template(uri_template)\n method_map = create_http_method_map(resource)\n\n # Insert at the head of the list in case we get duplicate\n # adds (will cause the last one to win).\n self._routes.insert(0, (path_template, method_map))\n\n def _get_responder(self, path, method):\n \"\"\"Searches routes for a matching responder\n\n Args:\n path: URI path to search (without query stirng)\n method: HTTP method (uppercase) requested\n Returns:\n A 2-member tuple, containing a responder callable and a dict\n containing parsed path fields, if any were specified in\n the matching route's URI template\n\n \"\"\"\n\n for path_template, method_map in self._routes:\n m = path_template.match(path)\n if m:\n params = m.groupdict()\n\n try:\n responder = method_map[method]\n except KeyError:\n responder = responders.bad_request\n\n break\n else:\n responder = responders.path_not_found\n params = {}\n\n return (responder, params)\n", "path": "falcon/api.py"}], "after_files": [{"content": "\"\"\"Defines the API class.\n\nCopyright 2013 by Rackspace Hosting, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\"\"\"\n\nfrom .request import Request\nfrom .response import Response\nfrom . import responders\nfrom .status_codes import *\nfrom .api_helpers import *\n\nfrom .http_error import HTTPError\n\n\nclass API(object):\n \"\"\"Provides routing and such for building a web service application\n\n This class is the main entry point into a Falcon-based app. It provides a\n callable WSGI interface and a simple routing engine based on URI templates.\n\n \"\"\"\n\n __slots__ = ('_routes')\n\n def __init__(self):\n \"\"\"Initialize default values\"\"\"\n self._routes = []\n\n def __call__(self, env, start_response):\n \"\"\"WSGI \"app\" method\n\n Makes instances of API callable by any WSGI server. See also PEP 333.\n\n Args:\n env: A WSGI environment dictionary\n start_response: A WSGI helper method for setting status and headers\n on a response.\n\n \"\"\"\n\n req = Request(env)\n resp = Response()\n\n responder, params = self._get_responder(req.path, req.method)\n\n try:\n responder(req, resp, **params)\n\n except HTTPError as ex:\n resp.status = ex.status\n if ex.headers is not None:\n resp.set_headers(ex.headers)\n\n if req.client_accepts_json():\n resp.body = ex.json()\n\n #\n # Set status and headers\n #\n use_body = not should_ignore_body(resp.status, req.method)\n if use_body:\n set_content_length(resp)\n\n start_response(resp.status, resp._wsgi_headers())\n\n # Return an iterable for the body, per the WSGI spec\n if use_body:\n return prepare_wsgi_content(resp)\n\n # Default: return an empty body\n return []\n\n def add_route(self, uri_template, resource):\n \"\"\"Associate a URI path with a resource\n\n Args:\n uri_template: Relative URI template. Currently only Level 1\n templates are supported. See also RFC 6570.\n resource: Object which represents an HTTP/REST \"resource\". Falcon\n will pass \"GET\" requests to on_get, \"PUT\" requests to on_put,\n etc. If any HTTP methods are not supported by your resource,\n simply don't define the corresponding request handlers, and\n Falcon will do the right thing.\n\n \"\"\"\n\n if not uri_template:\n uri_template = '/'\n\n path_template = compile_uri_template(uri_template)\n method_map = create_http_method_map(resource)\n\n # Insert at the head of the list in case we get duplicate\n # adds (will cause the last one to win).\n self._routes.insert(0, (path_template, method_map))\n\n def _get_responder(self, path, method):\n \"\"\"Searches routes for a matching responder\n\n Args:\n path: URI path to search (without query stirng)\n method: HTTP method (uppercase) requested\n Returns:\n A 2-member tuple, containing a responder callable and a dict\n containing parsed path fields, if any were specified in\n the matching route's URI template\n\n \"\"\"\n\n for path_template, method_map in self._routes:\n m = path_template.match(path)\n if m:\n params = m.groupdict()\n\n try:\n responder = method_map[method]\n except KeyError:\n responder = responders.bad_request\n\n break\n else:\n responder = responders.path_not_found\n params = {}\n\n return (responder, params)\n", "path": "falcon/api.py"}]}
| 1,651 | 223 |
gh_patches_debug_8483
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__PaddleSpeech-2171
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
切换英文语音合成报错 get_input_ids() got an unexpected keyword argument 'get_tone_ids'
要切换成英文语音合成时,更改了/paddlespeech/server/conf/application.yaml这个配置文件中的tts_python里面的声学模型和声码器,声学模型用的是fastspeech2_ljspeech,声码器用的pwgan_ljspeech,并且lang改为en,但是报错 get_input_ids() got an unexpected keyword argument 'get_tone_ids'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `paddlespeech/server/engine/engine_warmup.py`
Content:
```
1 # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import time
15
16 from paddlespeech.cli.log import logger
17 from paddlespeech.server.engine.engine_pool import get_engine_pool
18
19
20 def warm_up(engine_and_type: str, warm_up_time: int=3) -> bool:
21 engine_pool = get_engine_pool()
22
23 if "tts" in engine_and_type:
24 tts_engine = engine_pool['tts']
25 flag_online = False
26 if tts_engine.lang == 'zh':
27 sentence = "您好,欢迎使用语音合成服务。"
28 elif tts_engine.lang == 'en':
29 sentence = "Hello and welcome to the speech synthesis service."
30 else:
31 logger.error("tts engine only support lang: zh or en.")
32 sys.exit(-1)
33
34 if engine_and_type == "tts_python":
35 from paddlespeech.server.engine.tts.python.tts_engine import PaddleTTSConnectionHandler
36 elif engine_and_type == "tts_inference":
37 from paddlespeech.server.engine.tts.paddleinference.tts_engine import PaddleTTSConnectionHandler
38 elif engine_and_type == "tts_online":
39 from paddlespeech.server.engine.tts.online.python.tts_engine import PaddleTTSConnectionHandler
40 flag_online = True
41 elif engine_and_type == "tts_online-onnx":
42 from paddlespeech.server.engine.tts.online.onnx.tts_engine import PaddleTTSConnectionHandler
43 flag_online = True
44 else:
45 logger.error("Please check tte engine type.")
46
47 try:
48 logger.debug("Start to warm up tts engine.")
49 for i in range(warm_up_time):
50 connection_handler = PaddleTTSConnectionHandler(tts_engine)
51 if flag_online:
52 for wav in connection_handler.infer(
53 text=sentence,
54 lang=tts_engine.lang,
55 am=tts_engine.config.am):
56 logger.debug(
57 f"The first response time of the {i} warm up: {connection_handler.first_response_time} s"
58 )
59 break
60
61 else:
62 st = time.time()
63 connection_handler.infer(text=sentence)
64 et = time.time()
65 logger.debug(
66 f"The response time of the {i} warm up: {et - st} s")
67 except Exception as e:
68 logger.error("Failed to warm up on tts engine.")
69 logger.error(e)
70 return False
71
72 else:
73 pass
74
75 return True
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/paddlespeech/server/engine/engine_warmup.py b/paddlespeech/server/engine/engine_warmup.py
--- a/paddlespeech/server/engine/engine_warmup.py
+++ b/paddlespeech/server/engine/engine_warmup.py
@@ -60,7 +60,10 @@
else:
st = time.time()
- connection_handler.infer(text=sentence)
+ connection_handler.infer(
+ text=sentence,
+ lang=tts_engine.lang,
+ am=tts_engine.config.am)
et = time.time()
logger.debug(
f"The response time of the {i} warm up: {et - st} s")
|
{"golden_diff": "diff --git a/paddlespeech/server/engine/engine_warmup.py b/paddlespeech/server/engine/engine_warmup.py\n--- a/paddlespeech/server/engine/engine_warmup.py\n+++ b/paddlespeech/server/engine/engine_warmup.py\n@@ -60,7 +60,10 @@\n \n else:\n st = time.time()\n- connection_handler.infer(text=sentence)\n+ connection_handler.infer(\n+ text=sentence,\n+ lang=tts_engine.lang,\n+ am=tts_engine.config.am)\n et = time.time()\n logger.debug(\n f\"The response time of the {i} warm up: {et - st} s\")\n", "issue": "\u5207\u6362\u82f1\u6587\u8bed\u97f3\u5408\u6210\u62a5\u9519 get_input_ids() got an unexpected keyword argument 'get_tone_ids'\n\u8981\u5207\u6362\u6210\u82f1\u6587\u8bed\u97f3\u5408\u6210\u65f6\uff0c\u66f4\u6539\u4e86/paddlespeech/server/conf/application.yaml\u8fd9\u4e2a\u914d\u7f6e\u6587\u4ef6\u4e2d\u7684tts_python\u91cc\u9762\u7684\u58f0\u5b66\u6a21\u578b\u548c\u58f0\u7801\u5668\uff0c\u58f0\u5b66\u6a21\u578b\u7528\u7684\u662ffastspeech2_ljspeech\uff0c\u58f0\u7801\u5668\u7528\u7684pwgan_ljspeech\uff0c\u5e76\u4e14lang\u6539\u4e3aen\uff0c\u4f46\u662f\u62a5\u9519 get_input_ids() got an unexpected keyword argument 'get_tone_ids'\n", "before_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\n\nfrom paddlespeech.cli.log import logger\nfrom paddlespeech.server.engine.engine_pool import get_engine_pool\n\n\ndef warm_up(engine_and_type: str, warm_up_time: int=3) -> bool:\n engine_pool = get_engine_pool()\n\n if \"tts\" in engine_and_type:\n tts_engine = engine_pool['tts']\n flag_online = False\n if tts_engine.lang == 'zh':\n sentence = \"\u60a8\u597d\uff0c\u6b22\u8fce\u4f7f\u7528\u8bed\u97f3\u5408\u6210\u670d\u52a1\u3002\"\n elif tts_engine.lang == 'en':\n sentence = \"Hello and welcome to the speech synthesis service.\"\n else:\n logger.error(\"tts engine only support lang: zh or en.\")\n sys.exit(-1)\n\n if engine_and_type == \"tts_python\":\n from paddlespeech.server.engine.tts.python.tts_engine import PaddleTTSConnectionHandler\n elif engine_and_type == \"tts_inference\":\n from paddlespeech.server.engine.tts.paddleinference.tts_engine import PaddleTTSConnectionHandler\n elif engine_and_type == \"tts_online\":\n from paddlespeech.server.engine.tts.online.python.tts_engine import PaddleTTSConnectionHandler\n flag_online = True\n elif engine_and_type == \"tts_online-onnx\":\n from paddlespeech.server.engine.tts.online.onnx.tts_engine import PaddleTTSConnectionHandler\n flag_online = True\n else:\n logger.error(\"Please check tte engine type.\")\n\n try:\n logger.debug(\"Start to warm up tts engine.\")\n for i in range(warm_up_time):\n connection_handler = PaddleTTSConnectionHandler(tts_engine)\n if flag_online:\n for wav in connection_handler.infer(\n text=sentence,\n lang=tts_engine.lang,\n am=tts_engine.config.am):\n logger.debug(\n f\"The first response time of the {i} warm up: {connection_handler.first_response_time} s\"\n )\n break\n\n else:\n st = time.time()\n connection_handler.infer(text=sentence)\n et = time.time()\n logger.debug(\n f\"The response time of the {i} warm up: {et - st} s\")\n except Exception as e:\n logger.error(\"Failed to warm up on tts engine.\")\n logger.error(e)\n return False\n\n else:\n pass\n\n return True\n", "path": "paddlespeech/server/engine/engine_warmup.py"}], "after_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport time\n\nfrom paddlespeech.cli.log import logger\nfrom paddlespeech.server.engine.engine_pool import get_engine_pool\n\n\ndef warm_up(engine_and_type: str, warm_up_time: int=3) -> bool:\n engine_pool = get_engine_pool()\n\n if \"tts\" in engine_and_type:\n tts_engine = engine_pool['tts']\n flag_online = False\n if tts_engine.lang == 'zh':\n sentence = \"\u60a8\u597d\uff0c\u6b22\u8fce\u4f7f\u7528\u8bed\u97f3\u5408\u6210\u670d\u52a1\u3002\"\n elif tts_engine.lang == 'en':\n sentence = \"Hello and welcome to the speech synthesis service.\"\n else:\n logger.error(\"tts engine only support lang: zh or en.\")\n sys.exit(-1)\n\n if engine_and_type == \"tts_python\":\n from paddlespeech.server.engine.tts.python.tts_engine import PaddleTTSConnectionHandler\n elif engine_and_type == \"tts_inference\":\n from paddlespeech.server.engine.tts.paddleinference.tts_engine import PaddleTTSConnectionHandler\n elif engine_and_type == \"tts_online\":\n from paddlespeech.server.engine.tts.online.python.tts_engine import PaddleTTSConnectionHandler\n flag_online = True\n elif engine_and_type == \"tts_online-onnx\":\n from paddlespeech.server.engine.tts.online.onnx.tts_engine import PaddleTTSConnectionHandler\n flag_online = True\n else:\n logger.error(\"Please check tte engine type.\")\n\n try:\n logger.debug(\"Start to warm up tts engine.\")\n for i in range(warm_up_time):\n connection_handler = PaddleTTSConnectionHandler(tts_engine)\n if flag_online:\n for wav in connection_handler.infer(\n text=sentence,\n lang=tts_engine.lang,\n am=tts_engine.config.am):\n logger.debug(\n f\"The first response time of the {i} warm up: {connection_handler.first_response_time} s\"\n )\n break\n\n else:\n st = time.time()\n connection_handler.infer(\n text=sentence,\n lang=tts_engine.lang,\n am=tts_engine.config.am)\n et = time.time()\n logger.debug(\n f\"The response time of the {i} warm up: {et - st} s\")\n except Exception as e:\n logger.error(\"Failed to warm up on tts engine.\")\n logger.error(e)\n return False\n\n else:\n pass\n\n return True\n", "path": "paddlespeech/server/engine/engine_warmup.py"}]}
| 1,158 | 149 |
gh_patches_debug_35491
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-4874
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Proposal: aws ecr get-login-password
This is a proposal for a new AWS CLI command for ECR
```
$ aws ecr get-login-password
cGFzc3dvcmQ=
```
This command can be used in the following ways:
```
$ aws ecr get-login-password | docker login --username AWS --password-stdin 111111111111.dkr.ecr.us-west-2.amazonaws.com
Login Succeeded
$ docker login --username AWS --password "$(aws ecr get-login-password)" 111111111111.dkr.ecr.us-west-2.amazonaws.com
Login Succeeded
```
This idea has been previously proposed by @theY4Kman https://github.com/aws/aws-cli/issues/2875#issuecomment-433565983 and @kojiromike https://github.com/aws/aws-cli/issues/3687#issue-374397564
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/ecr.py`
Content:
```
1 # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 from awscli.customizations.commands import BasicCommand
14 from awscli.customizations.utils import create_client_from_parsed_globals
15
16 from base64 import b64decode
17 import sys
18
19
20 def register_ecr_commands(cli):
21 cli.register('building-command-table.ecr', _inject_get_login)
22
23
24 def _inject_get_login(command_table, session, **kwargs):
25 command_table['get-login'] = ECRLogin(session)
26
27
28 class ECRLogin(BasicCommand):
29 """Log in with docker login"""
30 NAME = 'get-login'
31
32 DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')
33
34 ARG_TABLE = [
35 {
36 'name': 'registry-ids',
37 'help_text': 'A list of AWS account IDs that correspond to the '
38 'Amazon ECR registries that you want to log in to.',
39 'required': False,
40 'nargs': '+'
41 },
42 {
43 'name': 'include-email',
44 'action': 'store_true',
45 'group_name': 'include-email',
46 'dest': 'include_email',
47 'default': True,
48 'required': False,
49 'help_text': (
50 "Specify if the '-e' flag should be included in the "
51 "'docker login' command. The '-e' option has been deprecated "
52 "and is removed in docker version 17.06 and later. You must "
53 "specify --no-include-email if you're using docker version "
54 "17.06 or later. The default behavior is to include the "
55 "'-e' flag in the 'docker login' output."),
56 },
57 {
58 'name': 'no-include-email',
59 'help_text': 'Include email arg',
60 'action': 'store_false',
61 'default': True,
62 'group_name': 'include-email',
63 'dest': 'include_email',
64 'required': False,
65 },
66 ]
67
68 def _run_main(self, parsed_args, parsed_globals):
69 ecr_client = create_client_from_parsed_globals(
70 self._session, 'ecr', parsed_globals)
71 if not parsed_args.registry_ids:
72 result = ecr_client.get_authorization_token()
73 else:
74 result = ecr_client.get_authorization_token(
75 registryIds=parsed_args.registry_ids)
76 for auth in result['authorizationData']:
77 auth_token = b64decode(auth['authorizationToken']).decode()
78 username, password = auth_token.split(':')
79 command = ['docker', 'login', '-u', username, '-p', password]
80 if parsed_args.include_email:
81 command.extend(['-e', 'none'])
82 command.append(auth['proxyEndpoint'])
83 sys.stdout.write(' '.join(command))
84 sys.stdout.write('\n')
85 return 0
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awscli/customizations/ecr.py b/awscli/customizations/ecr.py
--- a/awscli/customizations/ecr.py
+++ b/awscli/customizations/ecr.py
@@ -18,15 +18,16 @@
def register_ecr_commands(cli):
- cli.register('building-command-table.ecr', _inject_get_login)
+ cli.register('building-command-table.ecr', _inject_commands)
-def _inject_get_login(command_table, session, **kwargs):
+def _inject_commands(command_table, session, **kwargs):
command_table['get-login'] = ECRLogin(session)
+ command_table['get-login-password'] = ECRGetLoginPassword(session)
class ECRLogin(BasicCommand):
- """Log in with docker login"""
+ """Log in with 'docker login'"""
NAME = 'get-login'
DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')
@@ -49,8 +50,8 @@
'help_text': (
"Specify if the '-e' flag should be included in the "
"'docker login' command. The '-e' option has been deprecated "
- "and is removed in docker version 17.06 and later. You must "
- "specify --no-include-email if you're using docker version "
+ "and is removed in Docker version 17.06 and later. You must "
+ "specify --no-include-email if you're using Docker version "
"17.06 or later. The default behavior is to include the "
"'-e' flag in the 'docker login' output."),
},
@@ -83,3 +84,24 @@
sys.stdout.write(' '.join(command))
sys.stdout.write('\n')
return 0
+
+
+class ECRGetLoginPassword(BasicCommand):
+ """Get a password to be used with container clients such as Docker"""
+ NAME = 'get-login-password'
+
+ DESCRIPTION = BasicCommand.FROM_FILE(
+ 'ecr/get-login-password_description.rst')
+
+ def _run_main(self, parsed_args, parsed_globals):
+ ecr_client = create_client_from_parsed_globals(
+ self._session,
+ 'ecr',
+ parsed_globals)
+ result = ecr_client.get_authorization_token()
+ auth = result['authorizationData'][0]
+ auth_token = b64decode(auth['authorizationToken']).decode()
+ _, password = auth_token.split(':')
+ sys.stdout.write(password)
+ sys.stdout.write('\n')
+ return 0
|
{"golden_diff": "diff --git a/awscli/customizations/ecr.py b/awscli/customizations/ecr.py\n--- a/awscli/customizations/ecr.py\n+++ b/awscli/customizations/ecr.py\n@@ -18,15 +18,16 @@\n \n \n def register_ecr_commands(cli):\n- cli.register('building-command-table.ecr', _inject_get_login)\n+ cli.register('building-command-table.ecr', _inject_commands)\n \n \n-def _inject_get_login(command_table, session, **kwargs):\n+def _inject_commands(command_table, session, **kwargs):\n command_table['get-login'] = ECRLogin(session)\n+ command_table['get-login-password'] = ECRGetLoginPassword(session)\n \n \n class ECRLogin(BasicCommand):\n- \"\"\"Log in with docker login\"\"\"\n+ \"\"\"Log in with 'docker login'\"\"\"\n NAME = 'get-login'\n \n DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')\n@@ -49,8 +50,8 @@\n 'help_text': (\n \"Specify if the '-e' flag should be included in the \"\n \"'docker login' command. The '-e' option has been deprecated \"\n- \"and is removed in docker version 17.06 and later. You must \"\n- \"specify --no-include-email if you're using docker version \"\n+ \"and is removed in Docker version 17.06 and later. You must \"\n+ \"specify --no-include-email if you're using Docker version \"\n \"17.06 or later. The default behavior is to include the \"\n \"'-e' flag in the 'docker login' output.\"),\n },\n@@ -83,3 +84,24 @@\n sys.stdout.write(' '.join(command))\n sys.stdout.write('\\n')\n return 0\n+\n+\n+class ECRGetLoginPassword(BasicCommand):\n+ \"\"\"Get a password to be used with container clients such as Docker\"\"\"\n+ NAME = 'get-login-password'\n+\n+ DESCRIPTION = BasicCommand.FROM_FILE(\n+ 'ecr/get-login-password_description.rst')\n+\n+ def _run_main(self, parsed_args, parsed_globals):\n+ ecr_client = create_client_from_parsed_globals(\n+ self._session,\n+ 'ecr',\n+ parsed_globals)\n+ result = ecr_client.get_authorization_token()\n+ auth = result['authorizationData'][0]\n+ auth_token = b64decode(auth['authorizationToken']).decode()\n+ _, password = auth_token.split(':')\n+ sys.stdout.write(password)\n+ sys.stdout.write('\\n')\n+ return 0\n", "issue": "Proposal: aws ecr get-login-password\nThis is a proposal for a new AWS CLI command for ECR\r\n\r\n```\r\n$ aws ecr get-login-password\r\ncGFzc3dvcmQ=\r\n```\r\n\r\nThis command can be used in the following ways:\r\n\r\n```\r\n$ aws ecr get-login-password | docker login --username AWS --password-stdin 111111111111.dkr.ecr.us-west-2.amazonaws.com\r\nLogin Succeeded\r\n\r\n$ docker login --username AWS --password \"$(aws ecr get-login-password)\" 111111111111.dkr.ecr.us-west-2.amazonaws.com\r\nLogin Succeeded\r\n```\r\n\r\nThis idea has been previously proposed by @theY4Kman https://github.com/aws/aws-cli/issues/2875#issuecomment-433565983 and @kojiromike https://github.com/aws/aws-cli/issues/3687#issue-374397564\n", "before_files": [{"content": "# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import create_client_from_parsed_globals\n\nfrom base64 import b64decode\nimport sys\n\n\ndef register_ecr_commands(cli):\n cli.register('building-command-table.ecr', _inject_get_login)\n\n\ndef _inject_get_login(command_table, session, **kwargs):\n command_table['get-login'] = ECRLogin(session)\n\n\nclass ECRLogin(BasicCommand):\n \"\"\"Log in with docker login\"\"\"\n NAME = 'get-login'\n\n DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')\n\n ARG_TABLE = [\n {\n 'name': 'registry-ids',\n 'help_text': 'A list of AWS account IDs that correspond to the '\n 'Amazon ECR registries that you want to log in to.',\n 'required': False,\n 'nargs': '+'\n },\n {\n 'name': 'include-email',\n 'action': 'store_true',\n 'group_name': 'include-email',\n 'dest': 'include_email',\n 'default': True,\n 'required': False,\n 'help_text': (\n \"Specify if the '-e' flag should be included in the \"\n \"'docker login' command. The '-e' option has been deprecated \"\n \"and is removed in docker version 17.06 and later. You must \"\n \"specify --no-include-email if you're using docker version \"\n \"17.06 or later. The default behavior is to include the \"\n \"'-e' flag in the 'docker login' output.\"),\n },\n {\n 'name': 'no-include-email',\n 'help_text': 'Include email arg',\n 'action': 'store_false',\n 'default': True,\n 'group_name': 'include-email',\n 'dest': 'include_email',\n 'required': False,\n },\n ]\n\n def _run_main(self, parsed_args, parsed_globals):\n ecr_client = create_client_from_parsed_globals(\n self._session, 'ecr', parsed_globals)\n if not parsed_args.registry_ids:\n result = ecr_client.get_authorization_token()\n else:\n result = ecr_client.get_authorization_token(\n registryIds=parsed_args.registry_ids)\n for auth in result['authorizationData']:\n auth_token = b64decode(auth['authorizationToken']).decode()\n username, password = auth_token.split(':')\n command = ['docker', 'login', '-u', username, '-p', password]\n if parsed_args.include_email:\n command.extend(['-e', 'none'])\n command.append(auth['proxyEndpoint'])\n sys.stdout.write(' '.join(command))\n sys.stdout.write('\\n')\n return 0\n", "path": "awscli/customizations/ecr.py"}], "after_files": [{"content": "# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import create_client_from_parsed_globals\n\nfrom base64 import b64decode\nimport sys\n\n\ndef register_ecr_commands(cli):\n cli.register('building-command-table.ecr', _inject_commands)\n\n\ndef _inject_commands(command_table, session, **kwargs):\n command_table['get-login'] = ECRLogin(session)\n command_table['get-login-password'] = ECRGetLoginPassword(session)\n\n\nclass ECRLogin(BasicCommand):\n \"\"\"Log in with 'docker login'\"\"\"\n NAME = 'get-login'\n\n DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')\n\n ARG_TABLE = [\n {\n 'name': 'registry-ids',\n 'help_text': 'A list of AWS account IDs that correspond to the '\n 'Amazon ECR registries that you want to log in to.',\n 'required': False,\n 'nargs': '+'\n },\n {\n 'name': 'include-email',\n 'action': 'store_true',\n 'group_name': 'include-email',\n 'dest': 'include_email',\n 'default': True,\n 'required': False,\n 'help_text': (\n \"Specify if the '-e' flag should be included in the \"\n \"'docker login' command. The '-e' option has been deprecated \"\n \"and is removed in Docker version 17.06 and later. You must \"\n \"specify --no-include-email if you're using Docker version \"\n \"17.06 or later. The default behavior is to include the \"\n \"'-e' flag in the 'docker login' output.\"),\n },\n {\n 'name': 'no-include-email',\n 'help_text': 'Include email arg',\n 'action': 'store_false',\n 'default': True,\n 'group_name': 'include-email',\n 'dest': 'include_email',\n 'required': False,\n },\n ]\n\n def _run_main(self, parsed_args, parsed_globals):\n ecr_client = create_client_from_parsed_globals(\n self._session, 'ecr', parsed_globals)\n if not parsed_args.registry_ids:\n result = ecr_client.get_authorization_token()\n else:\n result = ecr_client.get_authorization_token(\n registryIds=parsed_args.registry_ids)\n for auth in result['authorizationData']:\n auth_token = b64decode(auth['authorizationToken']).decode()\n username, password = auth_token.split(':')\n command = ['docker', 'login', '-u', username, '-p', password]\n if parsed_args.include_email:\n command.extend(['-e', 'none'])\n command.append(auth['proxyEndpoint'])\n sys.stdout.write(' '.join(command))\n sys.stdout.write('\\n')\n return 0\n\n\nclass ECRGetLoginPassword(BasicCommand):\n \"\"\"Get a password to be used with container clients such as Docker\"\"\"\n NAME = 'get-login-password'\n\n DESCRIPTION = BasicCommand.FROM_FILE(\n 'ecr/get-login-password_description.rst')\n\n def _run_main(self, parsed_args, parsed_globals):\n ecr_client = create_client_from_parsed_globals(\n self._session,\n 'ecr',\n parsed_globals)\n result = ecr_client.get_authorization_token()\n auth = result['authorizationData'][0]\n auth_token = b64decode(auth['authorizationToken']).decode()\n _, password = auth_token.split(':')\n sys.stdout.write(password)\n sys.stdout.write('\\n')\n return 0\n", "path": "awscli/customizations/ecr.py"}]}
| 1,372 | 576 |
gh_patches_debug_13272
|
rasdani/github-patches
|
git_diff
|
arviz-devs__arviz-1133
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `Matplotlib` framework classifier to `setup.py`
`Matplotlib` now has a [trove classifier on pypi](https://twitter.com/matplotlib/status/1235216347925286913). We can add:
```python
classifiers = [
'Framework :: Matplotlib',
]
```
to `arviz`'s `setup.py` to acknowledge that it is part of `Matplotlib` ecosystem.
I believe that `arviz` currently doesn't have any classifiers ([there are many!](https://pypi.org/classifiers/)). We could add something like the following to `setup.py`:
```python
classifiers = [
'Framework :: Matplotlib',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License'
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Visualization',
]
```
I'm not sure if you would say if `arviz` is:
```
'Development Status :: 5 - Production/Stable',
```
or
```
'Development Status :: 4 - Beta',
```
There may be thoughts on other classifiers to add, but I can quickly put together a PR for this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import codecs
2 import os
3 import re
4
5 import setuptools
6 from setuptools import setup, find_packages
7 from setuptools.command.install import install
8 from setuptools.command.develop import develop
9
10
11 PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
12 REQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, "requirements.txt")
13 REQUIREMENTS_OPTIONAL_FILE = os.path.join(PROJECT_ROOT, "requirements-optional.txt")
14 REQUIREMENTS_DEV_FILE = os.path.join(PROJECT_ROOT, "requirements-dev.txt")
15 README_FILE = os.path.join(PROJECT_ROOT, "README.md")
16 VERSION_FILE = os.path.join(PROJECT_ROOT, "arviz", "__init__.py")
17
18
19 def get_requirements():
20 with codecs.open(REQUIREMENTS_FILE) as buff:
21 return buff.read().splitlines()
22
23
24 def get_requirements_dev():
25 with codecs.open(REQUIREMENTS_DEV_FILE) as buff:
26 return buff.read().splitlines()
27
28
29 def get_requirements_optional():
30 with codecs.open(REQUIREMENTS_OPTIONAL_FILE) as buff:
31 return buff.read().splitlines()
32
33
34 def get_long_description():
35 with codecs.open(README_FILE, "rt") as buff:
36 return buff.read()
37
38
39 def get_version():
40 lines = open(VERSION_FILE, "rt").readlines()
41 version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
42 for line in lines:
43 mo = re.search(version_regex, line, re.M)
44 if mo:
45 return mo.group(1)
46 raise RuntimeError("Unable to find version in %s." % (VERSION_FILE,))
47
48
49 setup(
50 name="arviz",
51 license="Apache-2.0",
52 version=get_version(),
53 description="Exploratory analysis of Bayesian models",
54 author="ArviZ Developers",
55 url="http://github.com/arviz-devs/arviz",
56 packages=find_packages(),
57 install_requires=get_requirements(),
58 extras_require=dict(all=get_requirements_optional()), # test=get_requirements_dev(),
59 long_description=get_long_description(),
60 long_description_content_type="text/markdown",
61 include_package_data=True,
62 )
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,4 +59,19 @@
long_description=get_long_description(),
long_description_content_type="text/markdown",
include_package_data=True,
+ classifiers=[
+ "Development Status :: 4 - Beta",
+ "Framework :: Matplotlib",
+ "Intended Audience :: Science/Research",
+ "Intended Audience :: Education",
+ "License :: OSI Approved :: Apache Software License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Topic :: Scientific/Engineering",
+ "Topic :: Scientific/Engineering :: Visualization",
+ "Topic :: Scientific/Engineering :: Mathematics",
+ ],
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,4 +59,19 @@\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n+ classifiers=[\n+ \"Development Status :: 4 - Beta\",\n+ \"Framework :: Matplotlib\",\n+ \"Intended Audience :: Science/Research\",\n+ \"Intended Audience :: Education\",\n+ \"License :: OSI Approved :: Apache Software License\",\n+ \"Programming Language :: Python\",\n+ \"Programming Language :: Python :: 3\",\n+ \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Topic :: Scientific/Engineering\",\n+ \"Topic :: Scientific/Engineering :: Visualization\",\n+ \"Topic :: Scientific/Engineering :: Mathematics\",\n+ ],\n )\n", "issue": "Add `Matplotlib` framework classifier to `setup.py`\n`Matplotlib` now has a [trove classifier on pypi](https://twitter.com/matplotlib/status/1235216347925286913). We can add:\r\n\r\n```python\r\nclassifiers = [\r\n 'Framework :: Matplotlib',\r\n ]\r\n```\r\nto `arviz`'s `setup.py` to acknowledge that it is part of `Matplotlib` ecosystem.\r\n\r\nI believe that `arviz` currently doesn't have any classifiers ([there are many!](https://pypi.org/classifiers/)). We could add something like the following to `setup.py`:\r\n\r\n```python\r\nclassifiers = [\r\n 'Framework :: Matplotlib',\r\n 'Intended Audience :: Science/Research',\r\n 'License :: OSI Approved :: Apache Software License'\r\n 'Programming Language :: Python',\r\n 'Programming Language :: Python :: 3',\r\n 'Programming Language :: Python :: 3.5',\r\n 'Programming Language :: Python :: 3.6',\r\n 'Programming Language :: Python :: 3.7',\r\n 'Topic :: Scientific/Engineering :: Visualization',\r\n ]\r\n```\r\n\r\nI'm not sure if you would say if `arviz` is:\r\n```\r\n'Development Status :: 5 - Production/Stable',\r\n```\r\nor\r\n```\r\n'Development Status :: 4 - Beta',\r\n```\r\n\r\nThere may be thoughts on other classifiers to add, but I can quickly put together a PR for this\n", "before_files": [{"content": "import codecs\nimport os\nimport re\n\nimport setuptools\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\n\n\nPROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))\nREQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, \"requirements.txt\")\nREQUIREMENTS_OPTIONAL_FILE = os.path.join(PROJECT_ROOT, \"requirements-optional.txt\")\nREQUIREMENTS_DEV_FILE = os.path.join(PROJECT_ROOT, \"requirements-dev.txt\")\nREADME_FILE = os.path.join(PROJECT_ROOT, \"README.md\")\nVERSION_FILE = os.path.join(PROJECT_ROOT, \"arviz\", \"__init__.py\")\n\n\ndef get_requirements():\n with codecs.open(REQUIREMENTS_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_requirements_dev():\n with codecs.open(REQUIREMENTS_DEV_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_requirements_optional():\n with codecs.open(REQUIREMENTS_OPTIONAL_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_long_description():\n with codecs.open(README_FILE, \"rt\") as buff:\n return buff.read()\n\n\ndef get_version():\n lines = open(VERSION_FILE, \"rt\").readlines()\n version_regex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in lines:\n mo = re.search(version_regex, line, re.M)\n if mo:\n return mo.group(1)\n raise RuntimeError(\"Unable to find version in %s.\" % (VERSION_FILE,))\n\n\nsetup(\n name=\"arviz\",\n license=\"Apache-2.0\",\n version=get_version(),\n description=\"Exploratory analysis of Bayesian models\",\n author=\"ArviZ Developers\",\n url=\"http://github.com/arviz-devs/arviz\",\n packages=find_packages(),\n install_requires=get_requirements(),\n extras_require=dict(all=get_requirements_optional()), # test=get_requirements_dev(),\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n)\n", "path": "setup.py"}], "after_files": [{"content": "import codecs\nimport os\nimport re\n\nimport setuptools\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\n\n\nPROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))\nREQUIREMENTS_FILE = os.path.join(PROJECT_ROOT, \"requirements.txt\")\nREQUIREMENTS_OPTIONAL_FILE = os.path.join(PROJECT_ROOT, \"requirements-optional.txt\")\nREQUIREMENTS_DEV_FILE = os.path.join(PROJECT_ROOT, \"requirements-dev.txt\")\nREADME_FILE = os.path.join(PROJECT_ROOT, \"README.md\")\nVERSION_FILE = os.path.join(PROJECT_ROOT, \"arviz\", \"__init__.py\")\n\n\ndef get_requirements():\n with codecs.open(REQUIREMENTS_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_requirements_dev():\n with codecs.open(REQUIREMENTS_DEV_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_requirements_optional():\n with codecs.open(REQUIREMENTS_OPTIONAL_FILE) as buff:\n return buff.read().splitlines()\n\n\ndef get_long_description():\n with codecs.open(README_FILE, \"rt\") as buff:\n return buff.read()\n\n\ndef get_version():\n lines = open(VERSION_FILE, \"rt\").readlines()\n version_regex = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in lines:\n mo = re.search(version_regex, line, re.M)\n if mo:\n return mo.group(1)\n raise RuntimeError(\"Unable to find version in %s.\" % (VERSION_FILE,))\n\n\nsetup(\n name=\"arviz\",\n license=\"Apache-2.0\",\n version=get_version(),\n description=\"Exploratory analysis of Bayesian models\",\n author=\"ArviZ Developers\",\n url=\"http://github.com/arviz-devs/arviz\",\n packages=find_packages(),\n install_requires=get_requirements(),\n extras_require=dict(all=get_requirements_optional()), # test=get_requirements_dev(),\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Framework :: Matplotlib\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Education\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n ],\n)\n", "path": "setup.py"}]}
| 1,122 | 204 |
gh_patches_debug_30956
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3602
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider superdrug is broken
During the global build at 2021-06-30-14-42-26, spider **superdrug** failed with **0 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/superdrug.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/superdrug.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/superdrug.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/superdrug.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3
4 import scrapy
5
6 from locations.items import GeojsonPointItem
7
8
9 class SuperdrugSpider(scrapy.Spider):
10 name = "superdrug"
11 item_attributes = {"brand": "Superdrug", "brand_wikidata": "Q7643261"}
12 allowed_domains = ["superdrug.com"]
13 download_delay = 0.5
14
15 start_urls = ["https://www.superdrug.com/stores/a-to-z"]
16
17 def parse(self, response):
18 urls = response.xpath('//a[@class="row store-link"]/@href').extract()
19
20 for url in urls:
21 yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
22
23 def parse_location(self, response):
24 data = json.loads(
25 response.xpath(
26 '//script[@type="application/ld+json" and contains(text(), "streetAddress")]/text()'
27 ).extract_first()
28 )
29
30 properties = {
31 "name": data["name"],
32 "ref": data["name"],
33 "addr_full": data["address"]["streetAddress"],
34 "city": data["address"]["addressLocality"],
35 "state": data["address"]["addressRegion"],
36 "postcode": data["address"]["postalCode"],
37 "country": data["address"]["addressCountry"],
38 "phone": data.get("telephone"),
39 "website": response.url,
40 "lat": float(
41 response.xpath(
42 '//div[@class="store-locator store-locator__overview"]/@data-lat'
43 ).extract_first()
44 ),
45 "lon": float(
46 response.xpath(
47 '//div[@class="store-locator store-locator__overview"]/@data-lng'
48 ).extract_first()
49 ),
50 }
51 yield GeojsonPointItem(**properties)
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/superdrug.py b/locations/spiders/superdrug.py
--- a/locations/spiders/superdrug.py
+++ b/locations/spiders/superdrug.py
@@ -4,6 +4,7 @@
import scrapy
from locations.items import GeojsonPointItem
+from locations.hours import OpeningHours
class SuperdrugSpider(scrapy.Spider):
@@ -14,6 +15,10 @@
start_urls = ["https://www.superdrug.com/stores/a-to-z"]
+ custom_settings = {
+ "USER_AGENT": "Mozilla/5.0 (X11; Linux x86_64; rv:99.0) Gecko/20100101 Firefox/99.0"
+ }
+
def parse(self, response):
urls = response.xpath('//a[@class="row store-link"]/@href').extract()
@@ -28,9 +33,11 @@
)
properties = {
- "name": data["name"],
- "ref": data["name"],
- "addr_full": data["address"]["streetAddress"],
+ "name": data["name"].replace("Superdrug", "").strip(),
+ "ref": data["@id"],
+ "street_address": data["address"]["streetAddress"]
+ .replace("Superdrug", "")
+ .strip(),
"city": data["address"]["addressLocality"],
"state": data["address"]["addressRegion"],
"postcode": data["address"]["postalCode"],
@@ -48,4 +55,15 @@
).extract_first()
),
}
+
+ oh = OpeningHours()
+
+ for rule in data["OpeningHoursSpecification"]:
+ oh.add_range(
+ day=rule["dayOfWeek"][0:2],
+ open_time=rule["opens"],
+ close_time=rule["closes"],
+ time_format="%I:%M %p",
+ )
+
yield GeojsonPointItem(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/superdrug.py b/locations/spiders/superdrug.py\n--- a/locations/spiders/superdrug.py\n+++ b/locations/spiders/superdrug.py\n@@ -4,6 +4,7 @@\n import scrapy\n \n from locations.items import GeojsonPointItem\n+from locations.hours import OpeningHours\n \n \n class SuperdrugSpider(scrapy.Spider):\n@@ -14,6 +15,10 @@\n \n start_urls = [\"https://www.superdrug.com/stores/a-to-z\"]\n \n+ custom_settings = {\n+ \"USER_AGENT\": \"Mozilla/5.0 (X11; Linux x86_64; rv:99.0) Gecko/20100101 Firefox/99.0\"\n+ }\n+\n def parse(self, response):\n urls = response.xpath('//a[@class=\"row store-link\"]/@href').extract()\n \n@@ -28,9 +33,11 @@\n )\n \n properties = {\n- \"name\": data[\"name\"],\n- \"ref\": data[\"name\"],\n- \"addr_full\": data[\"address\"][\"streetAddress\"],\n+ \"name\": data[\"name\"].replace(\"Superdrug\", \"\").strip(),\n+ \"ref\": data[\"@id\"],\n+ \"street_address\": data[\"address\"][\"streetAddress\"]\n+ .replace(\"Superdrug\", \"\")\n+ .strip(),\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n@@ -48,4 +55,15 @@\n ).extract_first()\n ),\n }\n+\n+ oh = OpeningHours()\n+\n+ for rule in data[\"OpeningHoursSpecification\"]:\n+ oh.add_range(\n+ day=rule[\"dayOfWeek\"][0:2],\n+ open_time=rule[\"opens\"],\n+ close_time=rule[\"closes\"],\n+ time_format=\"%I:%M %p\",\n+ )\n+\n yield GeojsonPointItem(**properties)\n", "issue": "Spider superdrug is broken\nDuring the global build at 2021-06-30-14-42-26, spider **superdrug** failed with **0 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/superdrug.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/superdrug.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/superdrug.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass SuperdrugSpider(scrapy.Spider):\n name = \"superdrug\"\n item_attributes = {\"brand\": \"Superdrug\", \"brand_wikidata\": \"Q7643261\"}\n allowed_domains = [\"superdrug.com\"]\n download_delay = 0.5\n\n start_urls = [\"https://www.superdrug.com/stores/a-to-z\"]\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"row store-link\"]/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n\n def parse_location(self, response):\n data = json.loads(\n response.xpath(\n '//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()'\n ).extract_first()\n )\n\n properties = {\n \"name\": data[\"name\"],\n \"ref\": data[\"name\"],\n \"addr_full\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"country\": data[\"address\"][\"addressCountry\"],\n \"phone\": data.get(\"telephone\"),\n \"website\": response.url,\n \"lat\": float(\n response.xpath(\n '//div[@class=\"store-locator store-locator__overview\"]/@data-lat'\n ).extract_first()\n ),\n \"lon\": float(\n response.xpath(\n '//div[@class=\"store-locator store-locator__overview\"]/@data-lng'\n ).extract_first()\n ),\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/superdrug.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass SuperdrugSpider(scrapy.Spider):\n name = \"superdrug\"\n item_attributes = {\"brand\": \"Superdrug\", \"brand_wikidata\": \"Q7643261\"}\n allowed_domains = [\"superdrug.com\"]\n download_delay = 0.5\n\n start_urls = [\"https://www.superdrug.com/stores/a-to-z\"]\n\n custom_settings = {\n \"USER_AGENT\": \"Mozilla/5.0 (X11; Linux x86_64; rv:99.0) Gecko/20100101 Firefox/99.0\"\n }\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"row store-link\"]/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n\n def parse_location(self, response):\n data = json.loads(\n response.xpath(\n '//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()'\n ).extract_first()\n )\n\n properties = {\n \"name\": data[\"name\"].replace(\"Superdrug\", \"\").strip(),\n \"ref\": data[\"@id\"],\n \"street_address\": data[\"address\"][\"streetAddress\"]\n .replace(\"Superdrug\", \"\")\n .strip(),\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"country\": data[\"address\"][\"addressCountry\"],\n \"phone\": data.get(\"telephone\"),\n \"website\": response.url,\n \"lat\": float(\n response.xpath(\n '//div[@class=\"store-locator store-locator__overview\"]/@data-lat'\n ).extract_first()\n ),\n \"lon\": float(\n response.xpath(\n '//div[@class=\"store-locator store-locator__overview\"]/@data-lng'\n ).extract_first()\n ),\n }\n\n oh = OpeningHours()\n\n for rule in data[\"OpeningHoursSpecification\"]:\n oh.add_range(\n day=rule[\"dayOfWeek\"][0:2],\n open_time=rule[\"opens\"],\n close_time=rule[\"closes\"],\n time_format=\"%I:%M %p\",\n )\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/superdrug.py"}]}
| 921 | 444 |
gh_patches_debug_1104
|
rasdani/github-patches
|
git_diff
|
blaze__blaze-872
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Truncate column name is too verbose
Do we have to have a unique name for the result of such operations?
How about having it renamed to the unit, i.e. instead of `when_datetimetruncate` we use `when_day` or `when_week`, etc?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `blaze/expr/datetime.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 from .expressions import Expr, ElemWise
4 from datashape import dshape, Record, DataShape, Unit, Option, date_, datetime_
5 import datashape
6
7 __all__ = ['DateTime', 'Date', 'date', 'Year', 'year', 'Month', 'month', 'Day',
8 'day', 'Hour', 'hour', 'Second', 'second', 'Millisecond',
9 'millisecond', 'Microsecond', 'microsecond', 'Date', 'date', 'Time',
10 'time', 'UTCFromTimestamp', 'DateTimeTruncate']
11
12 class DateTime(ElemWise):
13 """ Superclass for datetime accessors """
14 __slots__ = '_hash', '_child',
15
16 def __str__(self):
17 return '%s.%s' % (str(self._child), type(self).__name__.lower())
18
19 @property
20 def schema(self):
21 return dshape(self._dtype)
22
23 @property
24 def _name(self):
25 return '%s_%s' % (self._child._name, self.attr)
26
27 @property
28 def attr(self):
29 return type(self).__name__.lower()
30
31
32 class Date(DateTime):
33 _dtype = datashape.date_
34
35 def date(expr):
36 return Date(expr)
37
38 class Year(DateTime):
39 _dtype = datashape.int32
40
41 def year(expr):
42 return Year(expr)
43
44 class Month(DateTime):
45 _dtype = datashape.int32
46
47 def month(expr):
48 return Month(expr)
49
50 class Day(DateTime):
51 _dtype = datashape.int32
52
53 def day(expr):
54 return Day(expr)
55
56 class Time(DateTime):
57 _dtype = datashape.time_
58
59 def time(expr):
60 return Time(Expr)
61
62 class Hour(DateTime):
63 _dtype = datashape.int32
64
65 def hour(expr):
66 return Hour(expr)
67
68 class Minute(DateTime):
69 _dtype = datashape.int32
70
71 def minute(expr):
72 return Minute(expr)
73
74 class Second(DateTime):
75 _dtype = datashape.int32
76
77 def second(expr):
78 return Second(expr)
79
80 class Millisecond(DateTime):
81 _dtype = datashape.int64
82
83 def millisecond(expr):
84 return Millisecond(expr)
85
86 class Microsecond(DateTime):
87 _dtype = datashape.int64
88
89 def microsecond(expr):
90 return Microsecond(expr)
91
92 class UTCFromTimestamp(DateTime):
93 _dtype = datashape.datetime_
94
95 def utcfromtimestamp(expr):
96 return UTCFromTimestamp(expr)
97
98 units = ['year', 'month', 'week', 'day', 'hour', 'minute', 'second',
99 'millisecond', 'microsecond', 'nanosecond']
100
101
102 _unit_aliases = {'y': 'year', 'w': 'week', 'd': 'day', 'date': 'day',
103 'h': 'hour', 's': 'second', 'ms': 'millisecond', 'us': 'microsecond',
104 'ns': 'nanosecond'}
105
106 def normalize_time_unit(s):
107 """ Normalize time input to one of 'year', 'second', 'millisecond', etc..
108
109 Example
110 -------
111
112 >>> normalize_time_unit('milliseconds')
113 'millisecond'
114 >>> normalize_time_unit('ms')
115 'millisecond'
116 """
117 s = s.lower().strip()
118 if s in units:
119 return s
120 if s in _unit_aliases:
121 return _unit_aliases[s]
122 if s[-1] == 's':
123 return normalize_time_unit(s.rstrip('s'))
124
125 raise ValueError("Do not understand time unit %s" % s)
126
127
128 class DateTimeTruncate(DateTime):
129 __slots__ = '_hash', '_child', 'measure', 'unit'
130
131 @property
132 def _dtype(self):
133 if units.index('day') >= units.index(self.unit):
134 return datashape.date_
135 else:
136 return datashape.datetime_
137
138
139 def truncate(expr, *args, **kwargs):
140 """ Truncate datetime expression
141
142 Example
143 -------
144
145 >>> from blaze import symbol, compute
146 >>> from datetime import datetime
147 >>> s = symbol('s', 'datetime')
148
149 >>> expr = s.truncate(10, 'minutes')
150 >>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))
151 datetime.datetime(2000, 6, 25, 12, 30)
152
153 >>> expr = s.truncate(1, 'week')
154 >>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))
155 datetime.date(2000, 6, 25)
156
157 Alternatively use keyword arguments to specify unit and measure
158
159 >>> # expr = s.truncate(2, 'weeks')
160 >>> expr = s.truncate(weeks=2)
161 """
162 if args:
163 assert not kwargs
164 measure, unit = args
165 if kwargs:
166 assert not args
167 [(unit, measure)] = kwargs.items()
168 return DateTimeTruncate(expr, measure, normalize_time_unit(unit))
169
170
171 from .expressions import schema_method_list, method_properties
172 from datashape.predicates import isdatelike, isnumeric
173
174 schema_method_list.extend([
175 (isdatelike, set([year, month, day, hour, minute, date, time, second,
176 millisecond, microsecond, truncate])),
177 (isnumeric, set([utcfromtimestamp]))
178 ])
179
180 method_properties |= set([year, month, day, hour, minute, second, millisecond,
181 microsecond, date, time, utcfromtimestamp])
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/blaze/expr/datetime.py b/blaze/expr/datetime.py
--- a/blaze/expr/datetime.py
+++ b/blaze/expr/datetime.py
@@ -135,6 +135,10 @@
else:
return datashape.datetime_
+ @property
+ def _name(self):
+ return self._child._name
+
def truncate(expr, *args, **kwargs):
""" Truncate datetime expression
|
{"golden_diff": "diff --git a/blaze/expr/datetime.py b/blaze/expr/datetime.py\n--- a/blaze/expr/datetime.py\n+++ b/blaze/expr/datetime.py\n@@ -135,6 +135,10 @@\n else:\n return datashape.datetime_\n \n+ @property\n+ def _name(self):\n+ return self._child._name\n+\n \n def truncate(expr, *args, **kwargs):\n \"\"\" Truncate datetime expression\n", "issue": "Truncate column name is too verbose\nDo we have to have a unique name for the result of such operations?\n\nHow about having it renamed to the unit, i.e. instead of `when_datetimetruncate` we use `when_day` or `when_week`, etc?\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom .expressions import Expr, ElemWise\nfrom datashape import dshape, Record, DataShape, Unit, Option, date_, datetime_\nimport datashape\n\n__all__ = ['DateTime', 'Date', 'date', 'Year', 'year', 'Month', 'month', 'Day',\n 'day', 'Hour', 'hour', 'Second', 'second', 'Millisecond',\n 'millisecond', 'Microsecond', 'microsecond', 'Date', 'date', 'Time',\n 'time', 'UTCFromTimestamp', 'DateTimeTruncate']\n\nclass DateTime(ElemWise):\n \"\"\" Superclass for datetime accessors \"\"\"\n __slots__ = '_hash', '_child',\n\n def __str__(self):\n return '%s.%s' % (str(self._child), type(self).__name__.lower())\n\n @property\n def schema(self):\n return dshape(self._dtype)\n\n @property\n def _name(self):\n return '%s_%s' % (self._child._name, self.attr)\n\n @property\n def attr(self):\n return type(self).__name__.lower()\n\n\nclass Date(DateTime):\n _dtype = datashape.date_\n\ndef date(expr):\n return Date(expr)\n\nclass Year(DateTime):\n _dtype = datashape.int32\n\ndef year(expr):\n return Year(expr)\n\nclass Month(DateTime):\n _dtype = datashape.int32\n\ndef month(expr):\n return Month(expr)\n\nclass Day(DateTime):\n _dtype = datashape.int32\n\ndef day(expr):\n return Day(expr)\n\nclass Time(DateTime):\n _dtype = datashape.time_\n\ndef time(expr):\n return Time(Expr)\n\nclass Hour(DateTime):\n _dtype = datashape.int32\n\ndef hour(expr):\n return Hour(expr)\n\nclass Minute(DateTime):\n _dtype = datashape.int32\n\ndef minute(expr):\n return Minute(expr)\n\nclass Second(DateTime):\n _dtype = datashape.int32\n\ndef second(expr):\n return Second(expr)\n\nclass Millisecond(DateTime):\n _dtype = datashape.int64\n\ndef millisecond(expr):\n return Millisecond(expr)\n\nclass Microsecond(DateTime):\n _dtype = datashape.int64\n\ndef microsecond(expr):\n return Microsecond(expr)\n\nclass UTCFromTimestamp(DateTime):\n _dtype = datashape.datetime_\n\ndef utcfromtimestamp(expr):\n return UTCFromTimestamp(expr)\n\nunits = ['year', 'month', 'week', 'day', 'hour', 'minute', 'second',\n'millisecond', 'microsecond', 'nanosecond']\n\n\n_unit_aliases = {'y': 'year', 'w': 'week', 'd': 'day', 'date': 'day',\n 'h': 'hour', 's': 'second', 'ms': 'millisecond', 'us': 'microsecond',\n 'ns': 'nanosecond'}\n\ndef normalize_time_unit(s):\n \"\"\" Normalize time input to one of 'year', 'second', 'millisecond', etc..\n\n Example\n -------\n\n >>> normalize_time_unit('milliseconds')\n 'millisecond'\n >>> normalize_time_unit('ms')\n 'millisecond'\n \"\"\"\n s = s.lower().strip()\n if s in units:\n return s\n if s in _unit_aliases:\n return _unit_aliases[s]\n if s[-1] == 's':\n return normalize_time_unit(s.rstrip('s'))\n\n raise ValueError(\"Do not understand time unit %s\" % s)\n\n\nclass DateTimeTruncate(DateTime):\n __slots__ = '_hash', '_child', 'measure', 'unit'\n\n @property\n def _dtype(self):\n if units.index('day') >= units.index(self.unit):\n return datashape.date_\n else:\n return datashape.datetime_\n\n\ndef truncate(expr, *args, **kwargs):\n \"\"\" Truncate datetime expression\n\n Example\n -------\n\n >>> from blaze import symbol, compute\n >>> from datetime import datetime\n >>> s = symbol('s', 'datetime')\n\n >>> expr = s.truncate(10, 'minutes')\n >>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))\n datetime.datetime(2000, 6, 25, 12, 30)\n\n >>> expr = s.truncate(1, 'week')\n >>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))\n datetime.date(2000, 6, 25)\n\n Alternatively use keyword arguments to specify unit and measure\n\n >>> # expr = s.truncate(2, 'weeks')\n >>> expr = s.truncate(weeks=2)\n \"\"\"\n if args:\n assert not kwargs\n measure, unit = args\n if kwargs:\n assert not args\n [(unit, measure)] = kwargs.items()\n return DateTimeTruncate(expr, measure, normalize_time_unit(unit))\n\n\nfrom .expressions import schema_method_list, method_properties\nfrom datashape.predicates import isdatelike, isnumeric\n\nschema_method_list.extend([\n (isdatelike, set([year, month, day, hour, minute, date, time, second,\n millisecond, microsecond, truncate])),\n (isnumeric, set([utcfromtimestamp]))\n ])\n\nmethod_properties |= set([year, month, day, hour, minute, second, millisecond,\n microsecond, date, time, utcfromtimestamp])\n", "path": "blaze/expr/datetime.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom .expressions import Expr, ElemWise\nfrom datashape import dshape, Record, DataShape, Unit, Option, date_, datetime_\nimport datashape\n\n__all__ = ['DateTime', 'Date', 'date', 'Year', 'year', 'Month', 'month', 'Day',\n 'day', 'Hour', 'hour', 'Second', 'second', 'Millisecond',\n 'millisecond', 'Microsecond', 'microsecond', 'Date', 'date', 'Time',\n 'time', 'UTCFromTimestamp', 'DateTimeTruncate']\n\nclass DateTime(ElemWise):\n \"\"\" Superclass for datetime accessors \"\"\"\n __slots__ = '_hash', '_child',\n\n def __str__(self):\n return '%s.%s' % (str(self._child), type(self).__name__.lower())\n\n @property\n def schema(self):\n return dshape(self._dtype)\n\n @property\n def _name(self):\n return '%s_%s' % (self._child._name, self.attr)\n\n @property\n def attr(self):\n return type(self).__name__.lower()\n\n\nclass Date(DateTime):\n _dtype = datashape.date_\n\ndef date(expr):\n return Date(expr)\n\nclass Year(DateTime):\n _dtype = datashape.int32\n\ndef year(expr):\n return Year(expr)\n\nclass Month(DateTime):\n _dtype = datashape.int32\n\ndef month(expr):\n return Month(expr)\n\nclass Day(DateTime):\n _dtype = datashape.int32\n\ndef day(expr):\n return Day(expr)\n\nclass Time(DateTime):\n _dtype = datashape.time_\n\ndef time(expr):\n return Time(Expr)\n\nclass Hour(DateTime):\n _dtype = datashape.int32\n\ndef hour(expr):\n return Hour(expr)\n\nclass Minute(DateTime):\n _dtype = datashape.int32\n\ndef minute(expr):\n return Minute(expr)\n\nclass Second(DateTime):\n _dtype = datashape.int32\n\ndef second(expr):\n return Second(expr)\n\nclass Millisecond(DateTime):\n _dtype = datashape.int64\n\ndef millisecond(expr):\n return Millisecond(expr)\n\nclass Microsecond(DateTime):\n _dtype = datashape.int64\n\ndef microsecond(expr):\n return Microsecond(expr)\n\nclass UTCFromTimestamp(DateTime):\n _dtype = datashape.datetime_\n\ndef utcfromtimestamp(expr):\n return UTCFromTimestamp(expr)\n\nunits = ['year', 'month', 'week', 'day', 'hour', 'minute', 'second',\n'millisecond', 'microsecond', 'nanosecond']\n\n\n_unit_aliases = {'y': 'year', 'w': 'week', 'd': 'day', 'date': 'day',\n 'h': 'hour', 's': 'second', 'ms': 'millisecond', 'us': 'microsecond',\n 'ns': 'nanosecond'}\n\ndef normalize_time_unit(s):\n \"\"\" Normalize time input to one of 'year', 'second', 'millisecond', etc..\n\n Example\n -------\n\n >>> normalize_time_unit('milliseconds')\n 'millisecond'\n >>> normalize_time_unit('ms')\n 'millisecond'\n \"\"\"\n s = s.lower().strip()\n if s in units:\n return s\n if s in _unit_aliases:\n return _unit_aliases[s]\n if s[-1] == 's':\n return normalize_time_unit(s.rstrip('s'))\n\n raise ValueError(\"Do not understand time unit %s\" % s)\n\n\nclass DateTimeTruncate(DateTime):\n __slots__ = '_hash', '_child', 'measure', 'unit'\n\n @property\n def _dtype(self):\n if units.index('day') >= units.index(self.unit):\n return datashape.date_\n else:\n return datashape.datetime_\n\n @property\n def _name(self):\n return self._child._name\n\n\ndef truncate(expr, *args, **kwargs):\n \"\"\" Truncate datetime expression\n\n Example\n -------\n\n >>> from blaze import symbol, compute\n >>> from datetime import datetime\n >>> s = symbol('s', 'datetime')\n\n >>> expr = s.truncate(10, 'minutes')\n >>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))\n datetime.datetime(2000, 6, 25, 12, 30)\n\n >>> expr = s.truncate(1, 'week')\n >>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))\n datetime.date(2000, 6, 25)\n\n Alternatively use keyword arguments to specify unit and measure\n\n >>> # expr = s.truncate(2, 'weeks')\n >>> expr = s.truncate(weeks=2)\n \"\"\"\n if args:\n assert not kwargs\n measure, unit = args\n if kwargs:\n assert not args\n [(unit, measure)] = kwargs.items()\n return DateTimeTruncate(expr, measure, normalize_time_unit(unit))\n\n\nfrom .expressions import schema_method_list, method_properties\nfrom datashape.predicates import isdatelike, isnumeric\n\nschema_method_list.extend([\n (isdatelike, set([year, month, day, hour, minute, date, time, second,\n millisecond, microsecond, truncate])),\n (isnumeric, set([utcfromtimestamp]))\n ])\n\nmethod_properties |= set([year, month, day, hour, minute, second, millisecond,\n microsecond, date, time, utcfromtimestamp])\n", "path": "blaze/expr/datetime.py"}]}
| 1,999 | 106 |
gh_patches_debug_59056
|
rasdani/github-patches
|
git_diff
|
google__jax-19166
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unexpected behavior of `jax.scipy.stats.binom.pmf`
### Description
pmf of a random variable should be zero outside of its range. While plotting the graph for `jax.scipy.stats.binom.pmf`, I notice that for $n>5$ and $p>0.5$, there are some oscillations in the values of the pmf, which should not be there. For evidence, I am attaching a plot too.
```python
import jax
from jax import numpy as jnp
from matplotlib import pyplot as plt
x = jnp.linspace(-1, 10, 1000)
xxf = jax.scipy.stats.binom.pmf(k=x, n=5, p=0.8)
plt.plot(x, xxf)
plt.tight_layout()
plt.show()
```

The side left to the zero is as expected.
### What jax/jaxlib version are you using?
jax v0.4.23
### Which accelerator(s) are you using?
CPU
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jax/_src/scipy/stats/binom.py`
Content:
```
1 # Copyright 2023 The JAX Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License
14
15
16 import scipy.stats as osp_stats
17
18 from jax import lax
19 import jax.numpy as jnp
20 from jax._src.numpy.util import _wraps, promote_args_inexact
21 from jax._src.scipy.special import gammaln, xlogy, xlog1py
22 from jax._src.typing import Array, ArrayLike
23
24
25 @_wraps(osp_stats.nbinom.logpmf, update_doc=False)
26 def logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:
27 """JAX implementation of scipy.stats.binom.logpmf."""
28 k, n, p, loc = promote_args_inexact("binom.logpmf", k, n, p, loc)
29 y = lax.sub(k, loc)
30 comb_term = lax.sub(
31 gammaln(n + 1),
32 lax.add(gammaln(y + 1), gammaln(n - y + 1))
33 )
34 log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))
35 log_probs = lax.add(comb_term, log_linear_term)
36 return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)
37
38
39 @_wraps(osp_stats.nbinom.pmf, update_doc=False)
40 def pmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:
41 """JAX implementation of scipy.stats.binom.pmf."""
42 return lax.exp(logpmf(k, n, p, loc))
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jax/_src/scipy/stats/binom.py b/jax/_src/scipy/stats/binom.py
--- a/jax/_src/scipy/stats/binom.py
+++ b/jax/_src/scipy/stats/binom.py
@@ -33,7 +33,7 @@
)
log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))
log_probs = lax.add(comb_term, log_linear_term)
- return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)
+ return jnp.where(lax.ge(k, loc) & lax.lt(k, loc + n + 1), log_probs, -jnp.inf)
@_wraps(osp_stats.nbinom.pmf, update_doc=False)
|
{"golden_diff": "diff --git a/jax/_src/scipy/stats/binom.py b/jax/_src/scipy/stats/binom.py\n--- a/jax/_src/scipy/stats/binom.py\n+++ b/jax/_src/scipy/stats/binom.py\n@@ -33,7 +33,7 @@\n )\n log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))\n log_probs = lax.add(comb_term, log_linear_term)\n- return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)\n+ return jnp.where(lax.ge(k, loc) & lax.lt(k, loc + n + 1), log_probs, -jnp.inf)\n \n \n @_wraps(osp_stats.nbinom.pmf, update_doc=False)\n", "issue": "Unexpected behavior of `jax.scipy.stats.binom.pmf`\n### Description\r\n\r\npmf of a random variable should be zero outside of its range. While plotting the graph for `jax.scipy.stats.binom.pmf`, I notice that for $n>5$ and $p>0.5$, there are some oscillations in the values of the pmf, which should not be there. For evidence, I am attaching a plot too.\r\n\r\n```python\r\nimport jax\r\nfrom jax import numpy as jnp\r\nfrom matplotlib import pyplot as plt\r\n\r\nx = jnp.linspace(-1, 10, 1000)\r\nxxf = jax.scipy.stats.binom.pmf(k=x, n=5, p=0.8)\r\n\r\nplt.plot(x, xxf)\r\nplt.tight_layout()\r\nplt.show()\r\n```\r\n\r\nThe side left to the zero is as expected.\r\n\r\n### What jax/jaxlib version are you using?\r\n\r\njax v0.4.23\r\n\r\n### Which accelerator(s) are you using?\r\n\r\nCPU\n", "before_files": [{"content": "# Copyright 2023 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\n\nimport scipy.stats as osp_stats\n\nfrom jax import lax\nimport jax.numpy as jnp\nfrom jax._src.numpy.util import _wraps, promote_args_inexact\nfrom jax._src.scipy.special import gammaln, xlogy, xlog1py\nfrom jax._src.typing import Array, ArrayLike\n\n\n@_wraps(osp_stats.nbinom.logpmf, update_doc=False)\ndef logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.logpmf.\"\"\"\n k, n, p, loc = promote_args_inexact(\"binom.logpmf\", k, n, p, loc)\n y = lax.sub(k, loc)\n comb_term = lax.sub(\n gammaln(n + 1),\n lax.add(gammaln(y + 1), gammaln(n - y + 1))\n )\n log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))\n log_probs = lax.add(comb_term, log_linear_term)\n return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)\n\n\n@_wraps(osp_stats.nbinom.pmf, update_doc=False)\ndef pmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.pmf.\"\"\"\n return lax.exp(logpmf(k, n, p, loc))\n", "path": "jax/_src/scipy/stats/binom.py"}], "after_files": [{"content": "# Copyright 2023 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\n\nimport scipy.stats as osp_stats\n\nfrom jax import lax\nimport jax.numpy as jnp\nfrom jax._src.numpy.util import _wraps, promote_args_inexact\nfrom jax._src.scipy.special import gammaln, xlogy, xlog1py\nfrom jax._src.typing import Array, ArrayLike\n\n\n@_wraps(osp_stats.nbinom.logpmf, update_doc=False)\ndef logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.logpmf.\"\"\"\n k, n, p, loc = promote_args_inexact(\"binom.logpmf\", k, n, p, loc)\n y = lax.sub(k, loc)\n comb_term = lax.sub(\n gammaln(n + 1),\n lax.add(gammaln(y + 1), gammaln(n - y + 1))\n )\n log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))\n log_probs = lax.add(comb_term, log_linear_term)\n return jnp.where(lax.ge(k, loc) & lax.lt(k, loc + n + 1), log_probs, -jnp.inf)\n\n\n@_wraps(osp_stats.nbinom.pmf, update_doc=False)\ndef pmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:\n \"\"\"JAX implementation of scipy.stats.binom.pmf.\"\"\"\n return lax.exp(logpmf(k, n, p, loc))\n", "path": "jax/_src/scipy/stats/binom.py"}]}
| 1,092 | 178 |
gh_patches_debug_1261
|
rasdani/github-patches
|
git_diff
|
swcarpentry__python-novice-inflammation-736
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lesson 10 - numpy.mean(data) and data.mean
In lesson 10, when the lesson refers to readings_03.py, the code shows that to calculate the mean over 'data' across all days, numpy.mean is used: numpy.mean(data, axis=1). However when looking at the file readings_03.py (at least the version I downloaded recently) uses the instruction data.mean(axis=1). Both lead to the same result, but for consistency I would suggest to either modify the readings_*.py to use numpy.mean (as this is what it has been used throughout the entire lesson), or explain explicitly that both expressions lead to the same result (it would be a good time to remind students about object attributes).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `code/readings_03.py`
Content:
```
1 import sys
2 import numpy
3
4
5 def main():
6 script = sys.argv[0]
7 for filename in sys.argv[1:]:
8 data = numpy.loadtxt(filename, delimiter=',')
9 for m in data.mean(axis=1):
10 print(m)
11
12
13 if __name__ == '__main__':
14 main()
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/code/readings_03.py b/code/readings_03.py
--- a/code/readings_03.py
+++ b/code/readings_03.py
@@ -6,7 +6,7 @@
script = sys.argv[0]
for filename in sys.argv[1:]:
data = numpy.loadtxt(filename, delimiter=',')
- for m in data.mean(axis=1):
+ for m in numpy.mean(data, axis=1):
print(m)
|
{"golden_diff": "diff --git a/code/readings_03.py b/code/readings_03.py\n--- a/code/readings_03.py\n+++ b/code/readings_03.py\n@@ -6,7 +6,7 @@\n script = sys.argv[0]\n for filename in sys.argv[1:]:\n data = numpy.loadtxt(filename, delimiter=',')\n- for m in data.mean(axis=1):\n+ for m in numpy.mean(data, axis=1):\n print(m)\n", "issue": "Lesson 10 - numpy.mean(data) and data.mean\nIn lesson 10, when the lesson refers to readings_03.py, the code shows that to calculate the mean over 'data' across all days, numpy.mean is used: numpy.mean(data, axis=1). However when looking at the file readings_03.py (at least the version I downloaded recently) uses the instruction data.mean(axis=1). Both lead to the same result, but for consistency I would suggest to either modify the readings_*.py to use numpy.mean (as this is what it has been used throughout the entire lesson), or explain explicitly that both expressions lead to the same result (it would be a good time to remind students about object attributes). \n", "before_files": [{"content": "import sys\nimport numpy\n\n\ndef main():\n script = sys.argv[0]\n for filename in sys.argv[1:]:\n data = numpy.loadtxt(filename, delimiter=',')\n for m in data.mean(axis=1):\n print(m)\n\n\nif __name__ == '__main__':\n main()\n", "path": "code/readings_03.py"}], "after_files": [{"content": "import sys\nimport numpy\n\n\ndef main():\n script = sys.argv[0]\n for filename in sys.argv[1:]:\n data = numpy.loadtxt(filename, delimiter=',')\n for m in numpy.mean(data, axis=1):\n print(m)\n\n\nif __name__ == '__main__':\n main()\n", "path": "code/readings_03.py"}]}
| 502 | 105 |
gh_patches_debug_6547
|
rasdani/github-patches
|
git_diff
|
lk-geimfari__mimesis-376
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
I can't compile my project by pyinstaller
I have a script with code:
```
from mimesis import Personal
person = Personal('en')
person.full_name()
```
and it works well, but after compiling this code to .exe via pyinstaller I have an error **FileNotFoundError: [Errno 2] No such file or directory: 'B:\\_MEI131682\\mimesis\\data/es\\personal.json'
[20624] Failed to execute script myproject**
So, I think that problem in path (`data/es\\personal`). What ways of solving this problem can you recommend?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mimesis/utils.py`
Content:
```
1 """This module is provide internal util functions."""
2
3 import collections
4 import functools
5 import json
6 import ssl
7 from os import path
8 from typing import Mapping, Optional, Union
9 from urllib import request
10
11 from mimesis import config
12 from mimesis.exceptions import UnsupportedLocale
13 from mimesis.typing import JSON
14
15 __all__ = ['download_image', 'locale_info',
16 'luhn_checksum', 'setup_locale', 'pull']
17
18 DATA_DIR = path.abspath(path.join(path.dirname(__file__), 'data'))
19
20
21 def locale_info(locale: str) -> str:
22 """Check information about locale.
23
24 :param locale: Locale abbreviation.
25 :return: Locale name.
26 :raises UnsupportedLocale: if locale is not supported.
27 """
28 locale = locale.lower()
29 supported = config.SUPPORTED_LOCALES
30
31 if locale not in supported:
32 raise UnsupportedLocale(locale)
33
34 return supported[locale]['name']
35
36
37 def luhn_checksum(num: str) -> str:
38 """Calculate a checksum for num using the Luhn algorithm.
39
40 :param num: The number to calculate a checksum for as a string.
41 :return: Checksum for number.
42 """
43 check = 0
44 for i, s in enumerate(reversed(num)):
45 sx = int(s)
46 sx = sx * 2 if i % 2 == 0 else sx
47 sx = sx - 9 if sx > 9 else sx
48 check += sx
49 return str(check * 9 % 10)
50
51
52 def update_dict(initial: JSON, other: Mapping) -> JSON:
53 """Recursively update a dictionary.
54
55 :param initial: Dict to update.
56 :type initial: dict or list
57 :param other: Dict to update from.
58 :type other: Mapping
59 :return: Updated dict.
60 :rtype: dict
61 """
62 for key, value in other.items():
63 if isinstance(value, collections.Mapping):
64 r = update_dict(initial.get(key, {}), value)
65 initial[key] = r
66 else:
67 initial[key] = other[key]
68 return initial
69
70
71 @functools.lru_cache(maxsize=None)
72 def pull(file: str, locale: str = 'en') -> JSON:
73 """Pull the content from the JSON and memorize one.
74
75 Opens JSON file ``file`` in the folder ``data/locale``
76 and get content from the file and memorize ones using lru_cache.
77
78 :param file: The name of file.
79 :param locale: Locale.
80 :return: The content of the file.
81 :rtype: dict
82 :raises UnsupportedLocale: if locale is not supported.
83
84 :Example:
85
86 >>> from mimesis.utils import pull
87 >>> en = pull(file='datetime.json', locale='en')
88 >>> isinstance(en, dict)
89 True
90 >>> en['day']['abbr'][0]
91 'Mon.'
92 """
93 def get_data(locale_name: str) -> JSON:
94 """Pull JSON data from file.
95
96 :param locale_name: Locale name.
97 :return: Content of JSON file as dict.
98 """
99 file_path = path.join(DATA_DIR + '/' + locale_name, file)
100 # Needs explicit encoding for Windows
101 with open(file_path, 'r', encoding='utf8') as f:
102 return json.load(f)
103
104 locale = locale.lower()
105
106 if locale not in config.SUPPORTED_LOCALES:
107 raise UnsupportedLocale(locale)
108
109 master_locale = locale.split('-')[0]
110 data = get_data(master_locale)
111
112 # Handle sub-locales
113 if '-' in locale:
114 data = update_dict(data, get_data(locale))
115
116 return data
117
118
119 def download_image(url: str = '', save_path: str = '',
120 unverified_ctx: bool = False) -> Union[None, str]:
121 """Download image and save in current directory on local machine.
122
123 :param url: URL to image.
124 :param save_path: Saving path.
125 :param unverified_ctx: Create unverified context.
126 :return: Image name.
127 :rtype: str or None
128 """
129 if unverified_ctx:
130 ssl._create_default_https_context = ssl._create_unverified_context
131
132 if url is not None:
133 image_name = url.rsplit('/')[-1]
134 request.urlretrieve(url, save_path + image_name)
135 return image_name
136 return None
137
138
139 def setup_locale(locale: Optional[str] = None) -> str:
140 """Set up locale after pre-check.
141
142 :param str locale: Locale
143 :return: Locale in lowercase.
144 :raises UnsupportedLocale: if locales is not supported.
145 """
146 if not locale:
147 return config.DEFAULT_LOCALE
148
149 locale = locale.lower()
150 if locale not in config.SUPPORTED_LOCALES:
151 raise UnsupportedLocale(locale)
152
153 return locale
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mimesis/utils.py b/mimesis/utils.py
--- a/mimesis/utils.py
+++ b/mimesis/utils.py
@@ -96,7 +96,7 @@
:param locale_name: Locale name.
:return: Content of JSON file as dict.
"""
- file_path = path.join(DATA_DIR + '/' + locale_name, file)
+ file_path = path.join(DATA_DIR, locale_name, file)
# Needs explicit encoding for Windows
with open(file_path, 'r', encoding='utf8') as f:
return json.load(f)
|
{"golden_diff": "diff --git a/mimesis/utils.py b/mimesis/utils.py\n--- a/mimesis/utils.py\n+++ b/mimesis/utils.py\n@@ -96,7 +96,7 @@\n :param locale_name: Locale name.\n :return: Content of JSON file as dict.\n \"\"\"\n- file_path = path.join(DATA_DIR + '/' + locale_name, file)\n+ file_path = path.join(DATA_DIR, locale_name, file)\n # Needs explicit encoding for Windows\n with open(file_path, 'r', encoding='utf8') as f:\n return json.load(f)\n", "issue": "I can't compile my project by pyinstaller\nI have a script with code:\r\n```\r\nfrom mimesis import Personal\r\nperson = Personal('en')\r\nperson.full_name()\r\n```\r\nand it works well, but after compiling this code to .exe via pyinstaller I have an error **FileNotFoundError: [Errno 2] No such file or directory: 'B:\\\\_MEI131682\\\\mimesis\\\\data/es\\\\personal.json'\r\n[20624] Failed to execute script myproject**\r\nSo, I think that problem in path (`data/es\\\\personal`). What ways of solving this problem can you recommend?\n", "before_files": [{"content": "\"\"\"This module is provide internal util functions.\"\"\"\n\nimport collections\nimport functools\nimport json\nimport ssl\nfrom os import path\nfrom typing import Mapping, Optional, Union\nfrom urllib import request\n\nfrom mimesis import config\nfrom mimesis.exceptions import UnsupportedLocale\nfrom mimesis.typing import JSON\n\n__all__ = ['download_image', 'locale_info',\n 'luhn_checksum', 'setup_locale', 'pull']\n\nDATA_DIR = path.abspath(path.join(path.dirname(__file__), 'data'))\n\n\ndef locale_info(locale: str) -> str:\n \"\"\"Check information about locale.\n\n :param locale: Locale abbreviation.\n :return: Locale name.\n :raises UnsupportedLocale: if locale is not supported.\n \"\"\"\n locale = locale.lower()\n supported = config.SUPPORTED_LOCALES\n\n if locale not in supported:\n raise UnsupportedLocale(locale)\n\n return supported[locale]['name']\n\n\ndef luhn_checksum(num: str) -> str:\n \"\"\"Calculate a checksum for num using the Luhn algorithm.\n\n :param num: The number to calculate a checksum for as a string.\n :return: Checksum for number.\n \"\"\"\n check = 0\n for i, s in enumerate(reversed(num)):\n sx = int(s)\n sx = sx * 2 if i % 2 == 0 else sx\n sx = sx - 9 if sx > 9 else sx\n check += sx\n return str(check * 9 % 10)\n\n\ndef update_dict(initial: JSON, other: Mapping) -> JSON:\n \"\"\"Recursively update a dictionary.\n\n :param initial: Dict to update.\n :type initial: dict or list\n :param other: Dict to update from.\n :type other: Mapping\n :return: Updated dict.\n :rtype: dict\n \"\"\"\n for key, value in other.items():\n if isinstance(value, collections.Mapping):\n r = update_dict(initial.get(key, {}), value)\n initial[key] = r\n else:\n initial[key] = other[key]\n return initial\n\n\[email protected]_cache(maxsize=None)\ndef pull(file: str, locale: str = 'en') -> JSON:\n \"\"\"Pull the content from the JSON and memorize one.\n\n Opens JSON file ``file`` in the folder ``data/locale``\n and get content from the file and memorize ones using lru_cache.\n\n :param file: The name of file.\n :param locale: Locale.\n :return: The content of the file.\n :rtype: dict\n :raises UnsupportedLocale: if locale is not supported.\n\n :Example:\n\n >>> from mimesis.utils import pull\n >>> en = pull(file='datetime.json', locale='en')\n >>> isinstance(en, dict)\n True\n >>> en['day']['abbr'][0]\n 'Mon.'\n \"\"\"\n def get_data(locale_name: str) -> JSON:\n \"\"\"Pull JSON data from file.\n\n :param locale_name: Locale name.\n :return: Content of JSON file as dict.\n \"\"\"\n file_path = path.join(DATA_DIR + '/' + locale_name, file)\n # Needs explicit encoding for Windows\n with open(file_path, 'r', encoding='utf8') as f:\n return json.load(f)\n\n locale = locale.lower()\n\n if locale not in config.SUPPORTED_LOCALES:\n raise UnsupportedLocale(locale)\n\n master_locale = locale.split('-')[0]\n data = get_data(master_locale)\n\n # Handle sub-locales\n if '-' in locale:\n data = update_dict(data, get_data(locale))\n\n return data\n\n\ndef download_image(url: str = '', save_path: str = '',\n unverified_ctx: bool = False) -> Union[None, str]:\n \"\"\"Download image and save in current directory on local machine.\n\n :param url: URL to image.\n :param save_path: Saving path.\n :param unverified_ctx: Create unverified context.\n :return: Image name.\n :rtype: str or None\n \"\"\"\n if unverified_ctx:\n ssl._create_default_https_context = ssl._create_unverified_context\n\n if url is not None:\n image_name = url.rsplit('/')[-1]\n request.urlretrieve(url, save_path + image_name)\n return image_name\n return None\n\n\ndef setup_locale(locale: Optional[str] = None) -> str:\n \"\"\"Set up locale after pre-check.\n\n :param str locale: Locale\n :return: Locale in lowercase.\n :raises UnsupportedLocale: if locales is not supported.\n \"\"\"\n if not locale:\n return config.DEFAULT_LOCALE\n\n locale = locale.lower()\n if locale not in config.SUPPORTED_LOCALES:\n raise UnsupportedLocale(locale)\n\n return locale\n", "path": "mimesis/utils.py"}], "after_files": [{"content": "\"\"\"This module is provide internal util functions.\"\"\"\n\nimport collections\nimport functools\nimport json\nimport ssl\nfrom os import path\nfrom typing import Mapping, Optional, Union\nfrom urllib import request\n\nfrom mimesis import config\nfrom mimesis.exceptions import UnsupportedLocale\nfrom mimesis.typing import JSON\n\n__all__ = ['download_image', 'locale_info',\n 'luhn_checksum', 'setup_locale', 'pull']\n\nDATA_DIR = path.abspath(path.join(path.dirname(__file__), 'data'))\n\n\ndef locale_info(locale: str) -> str:\n \"\"\"Check information about locale.\n\n :param locale: Locale abbreviation.\n :return: Locale name.\n :raises UnsupportedLocale: if locale is not supported.\n \"\"\"\n locale = locale.lower()\n supported = config.SUPPORTED_LOCALES\n\n if locale not in supported:\n raise UnsupportedLocale(locale)\n\n return supported[locale]['name']\n\n\ndef luhn_checksum(num: str) -> str:\n \"\"\"Calculate a checksum for num using the Luhn algorithm.\n\n :param num: The number to calculate a checksum for as a string.\n :return: Checksum for number.\n \"\"\"\n check = 0\n for i, s in enumerate(reversed(num)):\n sx = int(s)\n sx = sx * 2 if i % 2 == 0 else sx\n sx = sx - 9 if sx > 9 else sx\n check += sx\n return str(check * 9 % 10)\n\n\ndef update_dict(initial: JSON, other: Mapping) -> JSON:\n \"\"\"Recursively update a dictionary.\n\n :param initial: Dict to update.\n :type initial: dict or list\n :param other: Dict to update from.\n :type other: Mapping\n :return: Updated dict.\n :rtype: dict\n \"\"\"\n for key, value in other.items():\n if isinstance(value, collections.Mapping):\n r = update_dict(initial.get(key, {}), value)\n initial[key] = r\n else:\n initial[key] = other[key]\n return initial\n\n\[email protected]_cache(maxsize=None)\ndef pull(file: str, locale: str = 'en') -> JSON:\n \"\"\"Pull the content from the JSON and memorize one.\n\n Opens JSON file ``file`` in the folder ``data/locale``\n and get content from the file and memorize ones using lru_cache.\n\n :param file: The name of file.\n :param locale: Locale.\n :return: The content of the file.\n :rtype: dict\n :raises UnsupportedLocale: if locale is not supported.\n\n :Example:\n\n >>> from mimesis.utils import pull\n >>> en = pull(file='datetime.json', locale='en')\n >>> isinstance(en, dict)\n True\n >>> en['day']['abbr'][0]\n 'Mon.'\n \"\"\"\n def get_data(locale_name: str) -> JSON:\n \"\"\"Pull JSON data from file.\n\n :param locale_name: Locale name.\n :return: Content of JSON file as dict.\n \"\"\"\n file_path = path.join(DATA_DIR, locale_name, file)\n # Needs explicit encoding for Windows\n with open(file_path, 'r', encoding='utf8') as f:\n return json.load(f)\n\n locale = locale.lower()\n\n if locale not in config.SUPPORTED_LOCALES:\n raise UnsupportedLocale(locale)\n\n master_locale = locale.split('-')[0]\n data = get_data(master_locale)\n\n # Handle sub-locales\n if '-' in locale:\n data = update_dict(data, get_data(locale))\n\n return data\n\n\ndef download_image(url: str = '', save_path: str = '',\n unverified_ctx: bool = False) -> Union[None, str]:\n \"\"\"Download image and save in current directory on local machine.\n\n :param url: URL to image.\n :param save_path: Saving path.\n :param unverified_ctx: Create unverified context.\n :return: Image name.\n :rtype: str or None\n \"\"\"\n if unverified_ctx:\n ssl._create_default_https_context = ssl._create_unverified_context\n\n if url is not None:\n image_name = url.rsplit('/')[-1]\n request.urlretrieve(url, save_path + image_name)\n return image_name\n return None\n\n\ndef setup_locale(locale: Optional[str] = None) -> str:\n \"\"\"Set up locale after pre-check.\n\n :param str locale: Locale\n :return: Locale in lowercase.\n :raises UnsupportedLocale: if locales is not supported.\n \"\"\"\n if not locale:\n return config.DEFAULT_LOCALE\n\n locale = locale.lower()\n if locale not in config.SUPPORTED_LOCALES:\n raise UnsupportedLocale(locale)\n\n return locale\n", "path": "mimesis/utils.py"}]}
| 1,790 | 129 |
gh_patches_debug_11035
|
rasdani/github-patches
|
git_diff
|
python-pillow__Pillow-821
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyPy performance on test_image_point is awful
Hoisted from #476, test_image_point.py takes ~ 2 minutes to run, vs < 1 sec for cpython.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `profile-installed.py`
Content:
```
1 #!/usr/bin/env python
2 import nose
3 import os
4 import sys
5 import glob
6
7 import profile
8
9 # monkey with the path, removing the local directory but adding the Tests/
10 # directory for helper.py and the other local imports there.
11
12 del(sys.path[0])
13 sys.path.insert(0, os.path.abspath('./Tests'))
14
15 # if there's no test selected (mostly) choose a working default.
16 # Something is required, because if we import the tests from the local
17 # directory, once again, we've got the non-installed PIL in the way
18 if len(sys.argv) == 1:
19 sys.argv.extend(glob.glob('Tests/test*.py'))
20
21 # Make sure that nose doesn't muck with our paths.
22 if ('--no-path-adjustment' not in sys.argv) and ('-P' not in sys.argv):
23 sys.argv.insert(1, '--no-path-adjustment')
24
25 if 'NOSE_PROCESSES' not in os.environ:
26 for arg in sys.argv:
27 if '--processes' in arg:
28 break
29 else: # for
30 sys.argv.insert(1, '--processes=-1') # -1 == number of cores
31 sys.argv.insert(1, '--process-timeout=30')
32
33 if __name__ == '__main__':
34 profile.run("nose.main()", sort=2)
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/profile-installed.py b/profile-installed.py
--- a/profile-installed.py
+++ b/profile-installed.py
@@ -21,14 +21,6 @@
# Make sure that nose doesn't muck with our paths.
if ('--no-path-adjustment' not in sys.argv) and ('-P' not in sys.argv):
sys.argv.insert(1, '--no-path-adjustment')
-
-if 'NOSE_PROCESSES' not in os.environ:
- for arg in sys.argv:
- if '--processes' in arg:
- break
- else: # for
- sys.argv.insert(1, '--processes=-1') # -1 == number of cores
- sys.argv.insert(1, '--process-timeout=30')
if __name__ == '__main__':
profile.run("nose.main()", sort=2)
|
{"golden_diff": "diff --git a/profile-installed.py b/profile-installed.py\n--- a/profile-installed.py\n+++ b/profile-installed.py\n@@ -21,14 +21,6 @@\n # Make sure that nose doesn't muck with our paths.\n if ('--no-path-adjustment' not in sys.argv) and ('-P' not in sys.argv):\n sys.argv.insert(1, '--no-path-adjustment')\n-\n-if 'NOSE_PROCESSES' not in os.environ:\n- for arg in sys.argv:\n- if '--processes' in arg:\n- break\n- else: # for\n- sys.argv.insert(1, '--processes=-1') # -1 == number of cores\n- sys.argv.insert(1, '--process-timeout=30') \n \n if __name__ == '__main__':\n profile.run(\"nose.main()\", sort=2)\n", "issue": "PyPy performance on test_image_point is awful\nHoisted from #476, test_image_point.py takes ~ 2 minutes to run, vs < 1 sec for cpython.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport nose\nimport os\nimport sys\nimport glob\n\nimport profile\n\n# monkey with the path, removing the local directory but adding the Tests/\n# directory for helper.py and the other local imports there.\n\ndel(sys.path[0])\nsys.path.insert(0, os.path.abspath('./Tests'))\n\n# if there's no test selected (mostly) choose a working default.\n# Something is required, because if we import the tests from the local\n# directory, once again, we've got the non-installed PIL in the way\nif len(sys.argv) == 1:\n sys.argv.extend(glob.glob('Tests/test*.py'))\n\n# Make sure that nose doesn't muck with our paths.\nif ('--no-path-adjustment' not in sys.argv) and ('-P' not in sys.argv):\n sys.argv.insert(1, '--no-path-adjustment')\n\nif 'NOSE_PROCESSES' not in os.environ:\n for arg in sys.argv:\n if '--processes' in arg:\n break\n else: # for\n sys.argv.insert(1, '--processes=-1') # -1 == number of cores\n sys.argv.insert(1, '--process-timeout=30') \n \nif __name__ == '__main__':\n profile.run(\"nose.main()\", sort=2)\n", "path": "profile-installed.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport nose\nimport os\nimport sys\nimport glob\n\nimport profile\n\n# monkey with the path, removing the local directory but adding the Tests/\n# directory for helper.py and the other local imports there.\n\ndel(sys.path[0])\nsys.path.insert(0, os.path.abspath('./Tests'))\n\n# if there's no test selected (mostly) choose a working default.\n# Something is required, because if we import the tests from the local\n# directory, once again, we've got the non-installed PIL in the way\nif len(sys.argv) == 1:\n sys.argv.extend(glob.glob('Tests/test*.py'))\n\n# Make sure that nose doesn't muck with our paths.\nif ('--no-path-adjustment' not in sys.argv) and ('-P' not in sys.argv):\n sys.argv.insert(1, '--no-path-adjustment')\n \nif __name__ == '__main__':\n profile.run(\"nose.main()\", sort=2)\n", "path": "profile-installed.py"}]}
| 647 | 194 |
gh_patches_debug_2448
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-3200
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't create config object
Much like https://github.com/docker/docker-py/issues/2025 the config model is failing to create a new object due to 'name' KeyError
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "docker\models\configs.py", line 10, in __repr__
return f"<{self.__class__.__name__}: '{self.name}'>"
File "docker\models\configs.py", line 14, in name
return self.attrs['Spec']['Name']
```
This https://github.com/docker/docker-py/pull/2793 appears to be the fix that was implemented and should likely be implements for configs as well (if not other models that might have this issue)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/models/configs.py`
Content:
```
1 from ..api import APIClient
2 from .resource import Model, Collection
3
4
5 class Config(Model):
6 """A config."""
7 id_attribute = 'ID'
8
9 def __repr__(self):
10 return f"<{self.__class__.__name__}: '{self.name}'>"
11
12 @property
13 def name(self):
14 return self.attrs['Spec']['Name']
15
16 def remove(self):
17 """
18 Remove this config.
19
20 Raises:
21 :py:class:`docker.errors.APIError`
22 If config failed to remove.
23 """
24 return self.client.api.remove_config(self.id)
25
26
27 class ConfigCollection(Collection):
28 """Configs on the Docker server."""
29 model = Config
30
31 def create(self, **kwargs):
32 obj = self.client.api.create_config(**kwargs)
33 return self.prepare_model(obj)
34 create.__doc__ = APIClient.create_config.__doc__
35
36 def get(self, config_id):
37 """
38 Get a config.
39
40 Args:
41 config_id (str): Config ID.
42
43 Returns:
44 (:py:class:`Config`): The config.
45
46 Raises:
47 :py:class:`docker.errors.NotFound`
48 If the config does not exist.
49 :py:class:`docker.errors.APIError`
50 If the server returns an error.
51 """
52 return self.prepare_model(self.client.api.inspect_config(config_id))
53
54 def list(self, **kwargs):
55 """
56 List configs. Similar to the ``docker config ls`` command.
57
58 Args:
59 filters (dict): Server-side list filtering options.
60
61 Returns:
62 (list of :py:class:`Config`): The configs.
63
64 Raises:
65 :py:class:`docker.errors.APIError`
66 If the server returns an error.
67 """
68 resp = self.client.api.configs(**kwargs)
69 return [self.prepare_model(obj) for obj in resp]
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/models/configs.py b/docker/models/configs.py
--- a/docker/models/configs.py
+++ b/docker/models/configs.py
@@ -30,6 +30,7 @@
def create(self, **kwargs):
obj = self.client.api.create_config(**kwargs)
+ obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_config.__doc__
|
{"golden_diff": "diff --git a/docker/models/configs.py b/docker/models/configs.py\n--- a/docker/models/configs.py\n+++ b/docker/models/configs.py\n@@ -30,6 +30,7 @@\n \n def create(self, **kwargs):\n obj = self.client.api.create_config(**kwargs)\n+ obj.setdefault(\"Spec\", {})[\"Name\"] = kwargs.get(\"name\")\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_config.__doc__\n", "issue": "Can't create config object\nMuch like https://github.com/docker/docker-py/issues/2025 the config model is failing to create a new object due to 'name' KeyError\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"docker\\models\\configs.py\", line 10, in __repr__\r\n return f\"<{self.__class__.__name__}: '{self.name}'>\"\r\n File \"docker\\models\\configs.py\", line 14, in name\r\n return self.attrs['Spec']['Name']\r\n```\r\n\r\nThis https://github.com/docker/docker-py/pull/2793 appears to be the fix that was implemented and should likely be implements for configs as well (if not other models that might have this issue)\n", "before_files": [{"content": "from ..api import APIClient\nfrom .resource import Model, Collection\n\n\nclass Config(Model):\n \"\"\"A config.\"\"\"\n id_attribute = 'ID'\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}: '{self.name}'>\"\n\n @property\n def name(self):\n return self.attrs['Spec']['Name']\n\n def remove(self):\n \"\"\"\n Remove this config.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If config failed to remove.\n \"\"\"\n return self.client.api.remove_config(self.id)\n\n\nclass ConfigCollection(Collection):\n \"\"\"Configs on the Docker server.\"\"\"\n model = Config\n\n def create(self, **kwargs):\n obj = self.client.api.create_config(**kwargs)\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_config.__doc__\n\n def get(self, config_id):\n \"\"\"\n Get a config.\n\n Args:\n config_id (str): Config ID.\n\n Returns:\n (:py:class:`Config`): The config.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the config does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_config(config_id))\n\n def list(self, **kwargs):\n \"\"\"\n List configs. Similar to the ``docker config ls`` command.\n\n Args:\n filters (dict): Server-side list filtering options.\n\n Returns:\n (list of :py:class:`Config`): The configs.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.configs(**kwargs)\n return [self.prepare_model(obj) for obj in resp]\n", "path": "docker/models/configs.py"}], "after_files": [{"content": "from ..api import APIClient\nfrom .resource import Model, Collection\n\n\nclass Config(Model):\n \"\"\"A config.\"\"\"\n id_attribute = 'ID'\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}: '{self.name}'>\"\n\n @property\n def name(self):\n return self.attrs['Spec']['Name']\n\n def remove(self):\n \"\"\"\n Remove this config.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If config failed to remove.\n \"\"\"\n return self.client.api.remove_config(self.id)\n\n\nclass ConfigCollection(Collection):\n \"\"\"Configs on the Docker server.\"\"\"\n model = Config\n\n def create(self, **kwargs):\n obj = self.client.api.create_config(**kwargs)\n obj.setdefault(\"Spec\", {})[\"Name\"] = kwargs.get(\"name\")\n return self.prepare_model(obj)\n create.__doc__ = APIClient.create_config.__doc__\n\n def get(self, config_id):\n \"\"\"\n Get a config.\n\n Args:\n config_id (str): Config ID.\n\n Returns:\n (:py:class:`Config`): The config.\n\n Raises:\n :py:class:`docker.errors.NotFound`\n If the config does not exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_config(config_id))\n\n def list(self, **kwargs):\n \"\"\"\n List configs. Similar to the ``docker config ls`` command.\n\n Args:\n filters (dict): Server-side list filtering options.\n\n Returns:\n (list of :py:class:`Config`): The configs.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.configs(**kwargs)\n return [self.prepare_model(obj) for obj in resp]\n", "path": "docker/models/configs.py"}]}
| 948 | 101 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.