repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
open-telemetry/opentelemetry-python-contrib
1,553
open-telemetry__opentelemetry-python-contrib-1553
[ "1041" ]
092d8c86bb8b66c959d898e7e74d1efa02a76b15
diff --git a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py --- a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py @@ -63,10 +63,9 @@ def response_hook(span, request_obj, response) import functools import types import typing - -# from urllib import response from http import client -from typing import Collection +from timeit import default_timer +from typing import Collection, Dict from urllib.request import ( # pylint: disable=no-name-in-module,import-error OpenerDirector, Request, @@ -83,7 +82,9 @@ def response_hook(span, request_obj, response) _SUPPRESS_INSTRUMENTATION_KEY, http_status_to_status_code, ) +from opentelemetry.metrics import Histogram, get_meter from opentelemetry.propagate import inject +from opentelemetry.semconv.metrics import MetricInstruments from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.trace import Span, SpanKind, get_tracer from opentelemetry.trace.status import Status @@ -114,8 +115,15 @@ def _instrument(self, **kwargs): """ tracer_provider = kwargs.get("tracer_provider") tracer = get_tracer(__name__, __version__, tracer_provider) + + meter_provider = kwargs.get("meter_provider") + meter = get_meter(__name__, __version__, meter_provider) + + histograms = _create_client_histograms(meter) + _instrument( tracer, + histograms, request_hook=kwargs.get("request_hook"), response_hook=kwargs.get("response_hook"), ) @@ -132,6 +140,7 @@ def uninstrument_opener( def _instrument( tracer, + histograms: Dict[str, Histogram], request_hook: _RequestHookT = None, response_hook: _ResponseHookT = None, ): @@ -192,11 +201,13 @@ def _instrumented_open_call( context.set_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY, True) ) try: + start_time = default_timer() result = call_wrapped() # *** PROCEED except Exception as exc: # pylint: disable=W0703 exception = exc result = getattr(exc, "file", None) finally: + elapsed_time = round((default_timer() - start_time) * 1000) context.detach(token) if result is not None: @@ -214,6 +225,10 @@ def _instrumented_open_call( SpanAttributes.HTTP_FLAVOR ] = f"{ver_[:1]}.{ver_[:-1]}" + _record_histograms( + histograms, labels, request, result, elapsed_time + ) + if callable(response_hook): response_hook(span, request, result) @@ -248,3 +263,45 @@ def _uninstrument_from(instr_root, restore_as_bound_func=False): if restore_as_bound_func: original = types.MethodType(original, instr_root) setattr(instr_root, instr_func_name, original) + + +def _create_client_histograms(meter) -> Dict[str, Histogram]: + histograms = { + MetricInstruments.HTTP_CLIENT_DURATION: meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_DURATION, + unit="ms", + description="measures the duration outbound HTTP requests", + ), + MetricInstruments.HTTP_CLIENT_REQUEST_SIZE: meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_REQUEST_SIZE, + unit="By", + description="measures the size of HTTP request messages (compressed)", + ), + MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE: meter.create_histogram( + name=MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE, + unit="By", + description="measures the size of HTTP response messages (compressed)", + ), + } + + return histograms + + +def _record_histograms( + histograms, metric_attributes, request, response, elapsed_time +): + histograms[MetricInstruments.HTTP_CLIENT_DURATION].record( + elapsed_time, attributes=metric_attributes + ) + + data = getattr(request, "data", None) + request_size = 0 if data is None else len(data) + histograms[MetricInstruments.HTTP_CLIENT_REQUEST_SIZE].record( + request_size, attributes=metric_attributes + ) + + if response is not None: + response_size = int(response.headers.get("Content-Length", 0)) + histograms[MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE].record( + response_size, attributes=metric_attributes + ) diff --git a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/package.py b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/package.py --- a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/package.py +++ b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/package.py @@ -14,3 +14,5 @@ _instruments = tuple() + +_supports_metrics = True
diff --git a/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py b/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py new file mode 100644 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py @@ -0,0 +1,246 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from timeit import default_timer +from typing import Optional, Union +from urllib import request +from urllib.parse import urlencode + +import httpretty + +from opentelemetry.instrumentation.urllib import ( # pylint: disable=no-name-in-module,import-error + URLLibInstrumentor, +) +from opentelemetry.sdk.metrics._internal.point import Metric +from opentelemetry.sdk.metrics.export import ( + HistogramDataPoint, + NumberDataPoint, +) +from opentelemetry.semconv.metrics import MetricInstruments +from opentelemetry.test.test_base import TestBase + + +class TestRequestsIntegration(TestBase): + URL = "http://httpbin.org/status/200" + URL_POST = "http://httpbin.org/post" + + def setUp(self): + super().setUp() + URLLibInstrumentor().instrument() + httpretty.enable() + httpretty.register_uri(httpretty.GET, self.URL, body=b"Hello!") + httpretty.register_uri( + httpretty.POST, self.URL_POST, body=b"Hello World!" + ) + + def tearDown(self): + super().tearDown() + URLLibInstrumentor().uninstrument() + httpretty.disable() + + def get_sorted_metrics(self): + resource_metrics = ( + self.memory_metrics_reader.get_metrics_data().resource_metrics + ) + + all_metrics = [] + for metrics in resource_metrics: + for scope_metrics in metrics.scope_metrics: + all_metrics.extend(scope_metrics.metrics) + + return self.sorted_metrics(all_metrics) + + @staticmethod + def sorted_metrics(metrics): + """ + Sorts metrics by metric name. + """ + return sorted( + metrics, + key=lambda m: m.name, + ) + + def assert_metric_expected( + self, + metric: Metric, + expected_value: Union[int, float], + expected_attributes: dict, + est_delta: Optional[float] = None, + ): + data_point = next(iter(metric.data.data_points)) + + if isinstance(data_point, HistogramDataPoint): + self.assertEqual( + data_point.count, + 1, + ) + if est_delta is None: + self.assertEqual( + data_point.sum, + expected_value, + ) + else: + self.assertAlmostEqual( + data_point.sum, + expected_value, + delta=est_delta, + ) + elif isinstance(data_point, NumberDataPoint): + self.assertEqual( + data_point.value, + expected_value, + ) + + self.assertDictEqual( + expected_attributes, + dict(data_point.attributes), + ) + + def test_basic_metric(self): + start_time = default_timer() + with request.urlopen(self.URL) as result: + client_duration_estimated = (default_timer() - start_time) * 1000 + + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 3) + + ( + client_duration, + client_request_size, + client_response_size, + ) = metrics[:3] + + self.assertEqual( + client_duration.name, MetricInstruments.HTTP_CLIENT_DURATION + ) + self.assert_metric_expected( + client_duration, + client_duration_estimated, + { + "http.status_code": str(result.code), + "http.method": "GET", + "http.url": str(result.url), + "http.flavor": "1.1", + }, + est_delta=200, + ) + + # net.peer.name + + self.assertEqual( + client_request_size.name, + MetricInstruments.HTTP_CLIENT_REQUEST_SIZE, + ) + self.assert_metric_expected( + client_request_size, + 0, + { + "http.status_code": str(result.code), + "http.method": "GET", + "http.url": str(result.url), + "http.flavor": "1.1", + }, + ) + + self.assertEqual( + client_response_size.name, + MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE, + ) + self.assert_metric_expected( + client_response_size, + result.length, + { + "http.status_code": str(result.code), + "http.method": "GET", + "http.url": str(result.url), + "http.flavor": "1.1", + }, + ) + + def test_basic_metric_request_not_empty(self): + data = {"header1": "value1", "header2": "value2"} + data_encoded = urlencode(data).encode() + + start_time = default_timer() + with request.urlopen(self.URL_POST, data=data_encoded) as result: + client_duration_estimated = (default_timer() - start_time) * 1000 + + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 3) + + ( + client_duration, + client_request_size, + client_response_size, + ) = metrics[:3] + + self.assertEqual( + client_duration.name, MetricInstruments.HTTP_CLIENT_DURATION + ) + self.assert_metric_expected( + client_duration, + client_duration_estimated, + { + "http.status_code": str(result.code), + "http.method": "POST", + "http.url": str(result.url), + "http.flavor": "1.1", + }, + est_delta=200, + ) + + self.assertEqual( + client_request_size.name, + MetricInstruments.HTTP_CLIENT_REQUEST_SIZE, + ) + self.assert_metric_expected( + client_request_size, + len(data_encoded), + { + "http.status_code": str(result.code), + "http.method": "POST", + "http.url": str(result.url), + "http.flavor": "1.1", + }, + ) + + self.assertEqual( + client_response_size.name, + MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE, + ) + self.assert_metric_expected( + client_response_size, + result.length, + { + "http.status_code": str(result.code), + "http.method": "POST", + "http.url": str(result.url), + "http.flavor": "1.1", + }, + ) + + def test_metric_uninstrument(self): + with request.urlopen(self.URL): + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 3) + + URLLibInstrumentor().uninstrument() + with request.urlopen(self.URL): + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 3) + + for metric in metrics: + for point in list(metric.data.data_points): + self.assertEqual(point.count, 1)
[instrumentation-urllib] Restore metrics The urllib instrumentation used to produce metrics: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/metrics/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py This issue is to restore that functionality.
Can you please assign this to me? :)
2022-12-28T10:46:06
open-telemetry/opentelemetry-python-contrib
1,555
open-telemetry__opentelemetry-python-contrib-1555
[ "1539" ]
d1dec9220b620cfaf4a4f04fde3d5bfc6e183a27
diff --git a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py --- a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py @@ -126,6 +126,7 @@ def started(self, event: monitoring.CommandStartedEvent): statement = event.command_name if command: statement += " " + str(command) + collection = event.command.get(event.command_name) try: span = self._tracer.start_span(name, kind=SpanKind.CLIENT) @@ -135,6 +136,10 @@ def started(self, event: monitoring.CommandStartedEvent): ) span.set_attribute(SpanAttributes.DB_NAME, event.database_name) span.set_attribute(SpanAttributes.DB_STATEMENT, statement) + if collection: + span.set_attribute( + SpanAttributes.DB_MONGODB_COLLECTION, collection + ) if event.connection_id is not None: span.set_attribute( SpanAttributes.NET_PEER_NAME, event.connection_id[0]
diff --git a/tests/opentelemetry-docker-tests/tests/pymongo/test_pymongo_functional.py b/tests/opentelemetry-docker-tests/tests/pymongo/test_pymongo_functional.py --- a/tests/opentelemetry-docker-tests/tests/pymongo/test_pymongo_functional.py +++ b/tests/opentelemetry-docker-tests/tests/pymongo/test_pymongo_functional.py @@ -68,6 +68,10 @@ def validate_spans(self): self.assertEqual( pymongo_span.attributes[SpanAttributes.NET_PEER_PORT], MONGODB_PORT ) + self.assertEqual( + pymongo_span.attributes[SpanAttributes.DB_MONGODB_COLLECTION], + MONGODB_COLLECTION_NAME, + ) def test_insert(self): """Should create a child span for insert"""
pymongo is not collecting the property: db.mongodb.collection According to the [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md#call-level-attributes-for-specific-technologies) - mongodb should capture: "The collection being accessed within the database stated in db.name." and save it in: `db.mongodb.collection` **Steps to reproduce** Instrument a client using PymongoInstrumentor(). Send a request to the db. **What is the expected behavior?** Produce a span with `db.mongodb.collection` value containing the collection name. **What is the actual behavior?** Produce a span without generating `db.mongodb.collection`. **Example:** Here is a simple code example: ``` PymongoInstrumentor().instrument() client = MongoClient() RECORD = {"test": "123"} db = client["MongoDB_Database"] collection = db["MongoDB_Collection"] collection.find_one(RECORD) ``` and the result is missing the collection: ``` "attributes": { "db.system": "mongodb", "db.name": "MongoDB_Database", "db.statement": "find", "net.peer.name": "localhost", "net.peer.port": 27017 } ``` If you can - assign this to me, thanks :)
2022-12-29T08:42:41
open-telemetry/opentelemetry-python-contrib
1,572
open-telemetry__opentelemetry-python-contrib-1572
[ "1548" ]
3770e574eb0497551eea9306e865046f93c55f3b
diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py @@ -64,6 +64,8 @@ async def redis_get(): response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request this function signature is: def response_hook(span: Span, instance: redis.connection.Connection, response) -> None +sanitize_query (Boolean) - default False, enable the Redis query sanitization + for example: .. code: python @@ -139,9 +141,11 @@ def _instrument( tracer, request_hook: _RequestHookT = None, response_hook: _ResponseHookT = None, + sanitize_query: bool = False, ): def _traced_execute_command(func, instance, args, kwargs): - query = _format_command_args(args) + query = _format_command_args(args, sanitize_query) + if len(args) > 0 and args[0]: name = args[0] else: @@ -169,7 +173,9 @@ def _traced_execute_pipeline(func, instance, args, kwargs): ) cmds = [ - _format_command_args(c.args if hasattr(c, "args") else c[0]) + _format_command_args( + c.args if hasattr(c, "args") else c[0], sanitize_query + ) for c in command_stack ] resource = "\n".join(cmds) @@ -281,6 +287,7 @@ def _instrument(self, **kwargs): tracer, request_hook=kwargs.get("request_hook"), response_hook=kwargs.get("response_hook"), + sanitize_query=kwargs.get("sanitize_query", False), ) def _uninstrument(self, **kwargs): diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py @@ -48,11 +48,27 @@ def _extract_conn_attributes(conn_kwargs): return attributes -def _format_command_args(args): - """Format command arguments and trim them as needed""" - value_max_len = 100 - value_too_long_mark = "..." +def _format_command_args(args, sanitize_query): + """Format and sanitize command arguments, and trim them as needed""" cmd_max_len = 1000 + value_too_long_mark = "..." + if sanitize_query: + # Sanitized query format: "COMMAND ? ?" + args_length = len(args) + if args_length > 0: + out = [str(args[0])] + ["?"] * (args_length - 1) + out_str = " ".join(out) + + if len(out_str) > cmd_max_len: + out_str = ( + out_str[: cmd_max_len - len(value_too_long_mark)] + + value_too_long_mark + ) + else: + out_str = "" + return out_str + + value_max_len = 100 length = 0 out = [] for arg in args:
diff --git a/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py b/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py --- a/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py +++ b/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py @@ -148,6 +148,40 @@ def request_hook(span, conn, args, kwargs): span = spans[0] self.assertEqual(span.attributes.get(custom_attribute_name), "GET") + def test_query_sanitizer_enabled(self): + redis_client = redis.Redis() + connection = redis.connection.Connection() + redis_client.connection = connection + + RedisInstrumentor().uninstrument() + RedisInstrumentor().instrument( + tracer_provider=self.tracer_provider, + sanitize_query=True, + ) + + with mock.patch.object(redis_client, "connection"): + redis_client.set("key", "value") + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + + span = spans[0] + self.assertEqual(span.attributes.get("db.statement"), "SET ? ?") + + def test_query_sanitizer_disabled(self): + redis_client = redis.Redis() + connection = redis.connection.Connection() + redis_client.connection = connection + + with mock.patch.object(redis_client, "connection"): + redis_client.set("key", "value") + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + + span = spans[0] + self.assertEqual(span.attributes.get("db.statement"), "SET key value") + def test_no_op_tracer_provider(self): RedisInstrumentor().uninstrument() tracer_provider = trace.NoOpTracerProvider() diff --git a/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py b/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py --- a/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py +++ b/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py @@ -45,6 +45,27 @@ def _check_span(self, span, name): ) self.assertEqual(span.attributes[SpanAttributes.NET_PEER_PORT], 6379) + def test_long_command_sanitized(self): + RedisInstrumentor().uninstrument() + RedisInstrumentor().instrument( + tracer_provider=self.tracer_provider, sanitize_query=True + ) + + self.redis_client.mget(*range(2000)) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self._check_span(span, "MGET") + self.assertTrue( + span.attributes.get(SpanAttributes.DB_STATEMENT).startswith( + "MGET ? ? ? ?" + ) + ) + self.assertTrue( + span.attributes.get(SpanAttributes.DB_STATEMENT).endswith("...") + ) + def test_long_command(self): self.redis_client.mget(*range(1000)) @@ -61,6 +82,22 @@ def test_long_command(self): span.attributes.get(SpanAttributes.DB_STATEMENT).endswith("...") ) + def test_basics_sanitized(self): + RedisInstrumentor().uninstrument() + RedisInstrumentor().instrument( + tracer_provider=self.tracer_provider, sanitize_query=True + ) + + self.assertIsNone(self.redis_client.get("cheese")) + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self._check_span(span, "GET") + self.assertEqual( + span.attributes.get(SpanAttributes.DB_STATEMENT), "GET ?" + ) + self.assertEqual(span.attributes.get("db.redis.args_length"), 2) + def test_basics(self): self.assertIsNone(self.redis_client.get("cheese")) spans = self.memory_exporter.get_finished_spans() @@ -72,6 +109,28 @@ def test_basics(self): ) self.assertEqual(span.attributes.get("db.redis.args_length"), 2) + def test_pipeline_traced_sanitized(self): + RedisInstrumentor().uninstrument() + RedisInstrumentor().instrument( + tracer_provider=self.tracer_provider, sanitize_query=True + ) + + with self.redis_client.pipeline(transaction=False) as pipeline: + pipeline.set("blah", 32) + pipeline.rpush("foo", "éé") + pipeline.hgetall("xxx") + pipeline.execute() + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self._check_span(span, "SET RPUSH HGETALL") + self.assertEqual( + span.attributes.get(SpanAttributes.DB_STATEMENT), + "SET ? ?\nRPUSH ? ?\nHGETALL ?", + ) + self.assertEqual(span.attributes.get("db.redis.pipeline_length"), 3) + def test_pipeline_traced(self): with self.redis_client.pipeline(transaction=False) as pipeline: pipeline.set("blah", 32) @@ -89,6 +148,27 @@ def test_pipeline_traced(self): ) self.assertEqual(span.attributes.get("db.redis.pipeline_length"), 3) + def test_pipeline_immediate_sanitized(self): + RedisInstrumentor().uninstrument() + RedisInstrumentor().instrument( + tracer_provider=self.tracer_provider, sanitize_query=True + ) + + with self.redis_client.pipeline() as pipeline: + pipeline.set("a", 1) + pipeline.immediate_execute_command("SET", "b", 2) + pipeline.execute() + + spans = self.memory_exporter.get_finished_spans() + # expecting two separate spans here, rather than a + # single span for the whole pipeline + self.assertEqual(len(spans), 2) + span = spans[0] + self._check_span(span, "SET") + self.assertEqual( + span.attributes.get(SpanAttributes.DB_STATEMENT), "SET ? ?" + ) + def test_pipeline_immediate(self): with self.redis_client.pipeline() as pipeline: pipeline.set("a", 1)
Implement sensitive data sanitization for redis instrumentation Aggregated by #1543
2023-01-10T15:59:02
open-telemetry/opentelemetry-python-contrib
1,580
open-telemetry__opentelemetry-python-contrib-1580
[ "1437" ]
e23dd5c25ad01bfc7d36f1862a3abc70901a5780
diff --git a/scripts/generate_instrumentation_metapackage.py b/scripts/generate_instrumentation_metapackage.py --- a/scripts/generate_instrumentation_metapackage.py +++ b/scripts/generate_instrumentation_metapackage.py @@ -71,7 +71,9 @@ def main(): root_path, "opentelemetry-contrib-instrumentations", "pyproject.toml" ) - deps = [f"{pkg}=={version}" for pkg, version in dependencies] + deps = [ + f"{pkg.strip()}=={version.strip()}" for pkg, version in dependencies + ] with open(pyproject_toml_path, "rb") as file: pyproject_toml = tomli.load(file) diff --git a/scripts/update_sha.py b/scripts/update_sha.py new file mode 100644 --- /dev/null +++ b/scripts/update_sha.py @@ -0,0 +1,60 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=import-error,unspecified-encoding + +import argparse + +import requests +from ruamel.yaml import YAML + +API_URL = ( + "https://api.github.com/repos/open-telemetry/opentelemetry-python/commits/" +) +WORKFLOW_FILE = ".github/workflows/test.yml" + + +def get_sha(branch): + url = API_URL + branch + response = requests.get(url) + response.raise_for_status() + return response.json()["sha"] + + +def update_sha(sha): + yaml = YAML() + yaml.preserve_quotes = True + with open(WORKFLOW_FILE, "r") as file: + workflow = yaml.load(file) + workflow["env"]["CORE_REPO_SHA"] = sha + with open(WORKFLOW_FILE, "w") as file: + yaml.dump(workflow, file) + + +def main(): + args = parse_args() + sha = get_sha(args.branch) + update_sha(sha) + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Updates the SHA in the workflow file" + ) + parser.add_argument("-b", "--branch", help="branch to use") + return parser.parse_args() + + +if __name__ == "__main__": + main()
Automate release workflow Similar to [this](https://github.com/open-telemetry/opentelemetry-python/pull/2649) for the core repo, automate the release workflow for contrib. This might be a bit tricky if we want to still require builds needing to pass for release branches (since the whole relationship between the core and contrib releases needing to change SHAs). We can either 1. Not require builds to pass for releases or 2. Change the SHAs automatically as part of the release workflow. @srikanthccv @ocelotl
@lzchen, here is what I am thinking about updating the SHAs. We will create a new workflow that runs a script to update the SHA automatically. This script takes the branch name for core/contrib and updates `test.yaml` with the latest commit ID. First, we will let the bot create the release branches and then run another workflow to update SHA. What do you think? @srikanthccv Would a new PR be created to update the SHA on the release branches or will that automatically be done committed? The way I was thinking was the prepare-release workflow creates PRs in core and contrib. And then you would add a label `update-sha` which runs another workflow, and the bot updates the SHA for its corresponding core/contrib repo. Adding a label makes sense to me. So the flow for maintainers when releasing would be: 1. Run release workflow for core, 2 PRs will be created (one against the release branch, one against main) 2. Run release workflow for contrib (when we automate it eventually), 2 PRS will be created as well 3. Add `update-sha` label to core repo release branch PR, OT bot will pull the latest SHA from contrib release branch PR and update `test.yml`. Builds should pass now for core. 4. Add `update-sha` label to contrib repo release branch PR, OT bot will pull the latest SHA from core release branch PR and update `test.yml`. Builds should pass now for contrib. 5. Merge both PRs. 6. Run release workflow and continue the workflow as [is](https://github.com/open-telemetry/opentelemetry-python/blob/main/RELEASING.md#making-the-release)
2023-01-15T04:14:43
open-telemetry/opentelemetry-python-contrib
1,581
open-telemetry__opentelemetry-python-contrib-1581
[ "1163" ]
a300d65ccfac0eba0b5c54d6bdc05fa4207dd812
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py @@ -186,3 +186,4 @@ def _uninstrument(self, **kwargs): unwrap(Engine, "connect") if parse_version(sqlalchemy.__version__).release >= (1, 4): unwrap(sqlalchemy.ext.asyncio, "create_async_engine") + EngineTracer.remove_all_event_listeners() diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py @@ -14,7 +14,10 @@ import os import re -from sqlalchemy.event import listen # pylint: disable=no-name-in-module +from sqlalchemy.event import ( # pylint: disable=no-name-in-module + listen, + remove, +) from opentelemetry import trace from opentelemetry.instrumentation.sqlalchemy.package import ( @@ -95,6 +98,8 @@ def _wrap_connect_internal(func, module, args, kwargs): class EngineTracer: + _remove_event_listener_params = [] + def __init__( self, tracer, engine, enable_commenter=False, commenter_options=None ): @@ -105,11 +110,24 @@ def __init__( self.commenter_options = commenter_options if commenter_options else {} self._leading_comment_remover = re.compile(r"^/\*.*?\*/") - listen( + self._register_event_listener( engine, "before_cursor_execute", self._before_cur_exec, retval=True ) - listen(engine, "after_cursor_execute", _after_cur_exec) - listen(engine, "handle_error", _handle_error) + self._register_event_listener( + engine, "after_cursor_execute", _after_cur_exec + ) + self._register_event_listener(engine, "handle_error", _handle_error) + + @classmethod + def _register_event_listener(cls, target, identifier, func, *args, **kw): + listen(target, identifier, func, *args, **kw) + cls._remove_event_listener_params.append((target, identifier, func)) + + @classmethod + def remove_all_event_listeners(cls): + for remove_params in cls._remove_event_listener_params: + remove(*remove_params) + cls._remove_event_listener_params.clear() def _operation_name(self, db_name, statement): parts = []
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy.py @@ -248,12 +248,41 @@ def test_uninstrument(self): self.memory_exporter.clear() SQLAlchemyInstrumentor().uninstrument() + cnx.execute("SELECT 1 + 1;").fetchall() engine2 = create_engine("sqlite:///:memory:") cnx2 = engine2.connect() cnx2.execute("SELECT 2 + 2;").fetchall() spans = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans), 0) + SQLAlchemyInstrumentor().instrument( + engine=engine, + tracer_provider=self.tracer_provider, + ) + cnx = engine.connect() + cnx.execute("SELECT 1 + 1;").fetchall() + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 2) + + def test_uninstrument_without_engine(self): + SQLAlchemyInstrumentor().instrument( + tracer_provider=self.tracer_provider + ) + from sqlalchemy import create_engine + + engine = create_engine("sqlite:///:memory:") + + cnx = engine.connect() + cnx.execute("SELECT 1 + 1;").fetchall() + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 2) + + self.memory_exporter.clear() + SQLAlchemyInstrumentor().uninstrument() + cnx.execute("SELECT 1 + 1;").fetchall() + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 0) + def test_no_op_tracer_provider(self): engine = create_engine("sqlite:///:memory:") SQLAlchemyInstrumentor().instrument(
Sqlalchemy uninstrument function doesnt work as expected **Describe your environment** Describe any aspect of your environment relevant to the problem, including your Python version: 3.9 **Steps to reproduce** Just create a SQLAlchemy instrumentation and call `uninstrument` function Example: ```python db_url = os.environ['DATABASE_URI'] engine = create_engine(db_url) instrumentation = SQLAlchemyInstrumentor() instrumentation.instrument( engine=engine ) def set_interval(func, sec): def func_wrapper(): # set_interval(func, sec) func() t = threading.Timer(sec, func_wrapper) t.start() return t def instrument(): print("instrument") instrumentation.instrument_app(app, engine) def uninstrument(): print("uninstrument") instrumentation.uninstrument() set_interval(instrument, 1) set_interval(uninstrument, 20) ``` **What is the expected behavior?** Don't collect spans **What is the actual behavior?** Still recording and collecting spans **Additional context** Add any other context about the problem here.
I don't think this is still relevant. validated by this PR: #1471 On a side note, you should call `instrument()` instead of `instrument_app()` when instrumenting @liorzmetis please share a fully reproducible example that still has the issue. I think I get the problem, The problem is with the Engine. inside the Engine we call listen on events: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/c92ba14316a90db2a8e8a7702aa21ce114a581a5/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py#L108 But no one call remove on the events after the uninstrument and for the uninstrument test, I created a new engine after uninstrument so the test missed this I will fix this
2023-01-15T14:52:54
open-telemetry/opentelemetry-python-contrib
1,584
open-telemetry__opentelemetry-python-contrib-1584
[ "1372" ]
afd842899700af49fe2825e4afd8fa615f5cf8d5
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -54,7 +54,13 @@ if isdir(join(sdk_ext, f)) ] -sys.path[:0] = exp_dirs + instr_dirs + sdk_ext_dirs + prop_dirs +resource = "../resource" +resource_dirs = [ + os.path.abspath("/".join(["../resource", f, "src"])) + for f in listdir(resource) + if isdir(join(resource, f)) +] +sys.path[:0] = exp_dirs + instr_dirs + sdk_ext_dirs + prop_dirs + resource_dirs # -- Project information ----------------------------------------------------- diff --git a/resource/opentelemetry-resource-detector-container/src/opentelemetry/resource/detector/container/__init__.py b/resource/opentelemetry-resource-detector-container/src/opentelemetry/resource/detector/container/__init__.py new file mode 100644 --- /dev/null +++ b/resource/opentelemetry-resource-detector-container/src/opentelemetry/resource/detector/container/__init__.py @@ -0,0 +1,95 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logging import getLogger + +from opentelemetry.sdk.resources import Resource, ResourceDetector +from opentelemetry.semconv.resource import ResourceAttributes + +logger = getLogger(__name__) +_DEFAULT_CGROUP_V1_PATH = "/proc/self/cgroup" +_DEFAULT_CGROUP_V2_PATH = "/proc/self/mountinfo" +_CONTAINER_ID_LENGTH = 64 + + +def _get_container_id_v1(): + container_id = None + try: + with open( + _DEFAULT_CGROUP_V1_PATH, encoding="utf8" + ) as container_info_file: + for raw_line in container_info_file.readlines(): + line = raw_line.strip() + if len(line) > _CONTAINER_ID_LENGTH: + container_id = line[-_CONTAINER_ID_LENGTH:] + break + except FileNotFoundError as exception: + logger.warning("Failed to get container id. Exception: %s", exception) + return container_id + + +def _get_container_id_v2(): + container_id = None + try: + with open( + _DEFAULT_CGROUP_V2_PATH, encoding="utf8" + ) as container_info_file: + for raw_line in container_info_file.readlines(): + line = raw_line.strip() + if any( + key_word in line for key_word in ["containers", "hostname"] + ): + container_id_list = [ + id_ + for id_ in line.split("/") + if len(id_) == _CONTAINER_ID_LENGTH + ] + if len(container_id_list) > 0: + container_id = container_id_list[0] + break + + except FileNotFoundError as exception: + logger.warning("Failed to get container id. Exception: %s", exception) + return container_id + + +def _get_container_id(): + return _get_container_id_v1() or _get_container_id_v2() + + +class ContainerResourceDetector(ResourceDetector): + """Detects container.id only available when app is running inside the + docker container and return it in a Resource + """ + + def detect(self) -> "Resource": + try: + container_id = _get_container_id() + resource = Resource.get_empty() + if container_id: + resource = resource.merge( + Resource({ResourceAttributes.CONTAINER_ID: container_id}) + ) + return resource + + # pylint: disable=broad-except + except Exception as exception: + logger.warning( + "%s Resource Detection failed silently: %s", + self.__class__.__name__, + exception, + ) + if self.raise_on_error: + raise exception + return Resource.get_empty() diff --git a/resource/opentelemetry-resource-detector-container/src/opentelemetry/resource/detector/container/version.py b/resource/opentelemetry-resource-detector-container/src/opentelemetry/resource/detector/container/version.py new file mode 100644 --- /dev/null +++ b/resource/opentelemetry-resource-detector-container/src/opentelemetry/resource/detector/container/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.38b0.dev"
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,7 +24,7 @@ jobs: fail-fast: false # ensures the entire test matrix is run, even if one permutation fails matrix: python-version: [ py37, py38, py39, py310, py311, pypy3 ] - package: ["instrumentation", "distro", "exporter", "sdkextension", "propagator"] + package: ["instrumentation", "distro", "exporter", "sdkextension", "propagator", "resource"] os: [ ubuntu-20.04 ] steps: - name: Checkout Contrib Repo @ SHA - ${{ github.sha }} diff --git a/resource/opentelemetry-resource-detector-container/tests/__init__.py b/resource/opentelemetry-resource-detector-container/tests/__init__.py new file mode 100644 diff --git a/resource/opentelemetry-resource-detector-container/tests/test_container.py b/resource/opentelemetry-resource-detector-container/tests/test_container.py new file mode 100644 --- /dev/null +++ b/resource/opentelemetry-resource-detector-container/tests/test_container.py @@ -0,0 +1,146 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import mock_open, patch + +from opentelemetry import trace as trace_api +from opentelemetry.resource.detector.container import ContainerResourceDetector +from opentelemetry.sdk.resources import get_aggregated_resources +from opentelemetry.semconv.resource import ResourceAttributes +from opentelemetry.test.test_base import TestBase + +MockContainerResourceAttributes = { + ResourceAttributes.CONTAINER_ID: "7be92808767a667f35c8505cbf40d14e931ef6db5b0210329cf193b15ba9d605", +} + + +class ContainerResourceDetectorTest(TestBase): + @patch( + "builtins.open", + new_callable=mock_open, + read_data=f"""14:name=systemd:/docker/{MockContainerResourceAttributes[ResourceAttributes.CONTAINER_ID]} + 13:rdma:/ + 12:pids:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 11:hugetlb:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 10:net_prio:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 9:perf_event:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 8:net_cls:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 7:freezer:/docker/ + 6:devices:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 5:memory:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 4:blkio:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 3:cpuacct:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 2:cpu:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + 1:cpuset:/docker/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked + """, + ) + def test_container_id_detect_from_cgroup_file(self, mock_cgroup_file): + actual = ContainerResourceDetector().detect() + self.assertDictEqual( + actual.attributes.copy(), MockContainerResourceAttributes + ) + + @patch( + "opentelemetry.resource.detector.container._get_container_id_v1", + return_value=None, + ) + @patch( + "builtins.open", + new_callable=mock_open, + read_data=f""" + 608 607 0:183 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw + 609 607 0:184 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 + 610 609 0:185 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 + 611 607 0:186 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro + 612 611 0:29 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - cgroup2 cgroup rw + 613 609 0:182 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw + 614 609 0:187 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k + 615 607 254:1 /docker/containers/{MockContainerResourceAttributes[ResourceAttributes.CONTAINER_ID]}/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/vda1 rw + 616 607 254:1 /docker/containers/{MockContainerResourceAttributes[ResourceAttributes.CONTAINER_ID]}/hostname /etc/hostname rw,relatime - ext4 /dev/vda1 rw + 617 607 254:1 /docker/containers/bogusContainerIdThatShouldNotBeOneSetBecauseTheFirstOneWasPicked/hosts /etc/hosts rw,relatime - ext4 /dev/vda1 rw + 618 607 0:131 /Users/sankmeht/development/otel/opentelemetry-python /development/otel/opentelemetry-python rw,nosuid,nodev,relatime - fuse.grpcfuse grpcfuse rw,user_id=0,group_id=0,allow_other,max_read=1048576 + 619 607 0:131 /Users/sankmeht/development/otel/opentelemetry-python-contrib /development/otel/opentelemetry-python-contrib rw,nosuid,nodev,relatime - fuse.grpcfuse grpcfuse rw,user_id=0,group_id=0,allow_other,max_read=1048576 + 519 609 0:185 /0 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 + 520 608 0:183 /bus /proc/bus ro,nosuid,nodev,noexec,relatime - proc proc rw + 521 608 0:183 /fs /proc/fs ro,nosuid,nodev,noexec,relatime - proc proc rw + 522 608 0:183 /irq /proc/irq ro,nosuid,nodev,noexec,relatime - proc proc rw + 523 608 0:183 /sys /proc/sys ro,nosuid,nodev,noexec,relatime - proc proc rw + 524 608 0:183 /sysrq-trigger /proc/sysrq-trigger ro,nosuid,nodev,noexec,relatime - proc proc rw + 525 608 0:212 / /proc/acpi ro,relatime - tmpfs tmpfs ro + 526 608 0:184 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 + 527 608 0:184 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 + 528 608 0:184 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 + 529 611 0:213 / /sys/firmware ro,relatime - tmpfs tmpfs ro + """, + ) + def test_container_id_detect_from_mountinfo_file( + self, mock_get_container_id_v1, mock_cgroup_file + ): + actual = ContainerResourceDetector().detect() + self.assertDictEqual( + actual.attributes.copy(), MockContainerResourceAttributes + ) + + @patch( + "opentelemetry.resource.detector.container._get_container_id", + return_value=MockContainerResourceAttributes[ + ResourceAttributes.CONTAINER_ID + ], + ) + def test_container_id_as_span_attribute(self, mock_cgroup_file): + tracer_provider, exporter = self.create_tracer_provider( + resource=get_aggregated_resources([ContainerResourceDetector()]) + ) + tracer = tracer_provider.get_tracer(__name__) + + with tracer.start_as_current_span( + "test", kind=trace_api.SpanKind.SERVER + ) as _: + pass + + span_list = exporter.get_finished_spans() + self.assertEqual( + span_list[0].resource.attributes["container.id"], + MockContainerResourceAttributes[ResourceAttributes.CONTAINER_ID], + ) + + @patch( + "opentelemetry.resource.detector.container._get_container_id", + return_value=MockContainerResourceAttributes[ + ResourceAttributes.CONTAINER_ID + ], + ) + def test_container_id_detect_from_cgroup(self, mock_get_container_id): + actual = ContainerResourceDetector().detect() + self.assertDictEqual( + actual.attributes.copy(), MockContainerResourceAttributes + ) + + @patch( + "opentelemetry.resource.detector.container._get_container_id_v1", + return_value=None, + ) + @patch( + "opentelemetry.resource.detector.container._get_container_id_v2", + return_value=MockContainerResourceAttributes[ + ResourceAttributes.CONTAINER_ID + ], + ) + def test_container_id_detect_from_mount_info( + self, mock_get_container_id_v1, mock_get_container_id_v2 + ): + actual = ContainerResourceDetector().detect() + self.assertDictEqual( + actual.attributes.copy(), MockContainerResourceAttributes + )
Resource Detection for container properties (e.g.`container.id`) **Is your feature request related to a problem?** Detecting the id of the container that holds the service instrumented with otel allows end-users to correlate container issues with service issues. **Describe the solution you'd like** I see some of that implemented in the AWS Extension, but I was wondering if there's a way to have this standalone. **Describe alternatives you've considered** Injecting the container id via an environment variable is always possible. **Additional context** Semantic conventions for container resource: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/container.md A similar implementation exists for Java, JS, .NET & Go already. The .JS is the most advanced since it also works with containers when cgroup v2 is used: https://github.com/open-telemetry/opentelemetry-js-contrib/blob/main/detectors/node/opentelemetry-resource-detector-container/src/detectors/ContainerDetector.ts We are also currently trying at opentelemetry.io to have per-language pages on resources and used the container detection as an example besides using the environment variable: https://github.com/open-telemetry/opentelemetry.io/issues/1773 and https://opentelemetry.io/docs/instrumentation/js/resources/
This seems to be a good feature request. Here is my approach to implement this feature: - create a `ContainerResourceDetector` class in a separate module which extends `ResourceDetector` class from API and override `detect()` method which returns Resource obj. - in each framework, set the tracer_provider like this: ``` trace.set_tracer_provider( TracerProvider( resource=get_aggregated_resources( [ ContainerResourceDetector(), ] ), ) ) ``` There is an issue with this approach: - It should work when tracer_provider is not provided before `set_tracer_provider()` is called in a framework. But in case where `tracer_provider` is created outside and then passed to the Instrumentor, there is no way to set the tracer.resource with my resources (collected by ContainerResourceDetector). Can anyone help me with this issue? This is the comment I have received from @srikanthccv for the above issue: `It is not allowed to be set at a later point in time, but you could implement an entry point kind of thing. I realized this is not supported yet by SDK and spec so I opened [this issue]( https://github.com/open-telemetry/opentelemetry-specification/issues/2948) I think this is an important discussion that should take place in the issue itself for broad visibility.` @ocelotl : tagging you here as well The ResourceDetection is independent of library instrumentation. You don't need all these issues opened for each framework. Users don't set the tracer provider in each framework separately. It happens once, and the user is expected to provide the `ContainerResourceDetector` when setting the provider manually and via the env(achieved through the entry point) when done through auto instrumentation. Hi @srikanthccv , In case of user is not providing the tracer_provider, I was thinking to create one inside `_instrument() `function with `ContainerResourceDetector` passed as argument so that this `container.id` will automatically be available in spans. I understand that ResourceDetector module is independent of library instrumentation but I wanted to use this module in each framework inside instrumentation only. That was the reason why I had created separate issues for each framework. Also in case of ENV variable approach there are 2 problems: - specification may/may not approve it as it should be compatible to other SIGs as well or it can take a long time before they do it. (Diego also agreed to this point) What is the alternate option in that case? - if we specify the `OTEL_RESOURCE_DETECTORS=ContainerResourceDetector` in that case, we have to parse this ENV variable to create object of that class and call its detect() in Resource.Create() in SDK when the actual implementation lies in contrib repo. I believe this is not the right approach. Please do let me know if you think I have misunderstood anything here. > I was thinking to create one inside _instrument() function with ContainerResourceDetector passed as argument so that this container.id will automatically be available in spans No, the instrumentation will not set any (tracer/meter/logger)provider on its own for any reason. In the absence of a tracer provider, it will be all no-op, and it's a design decision. >but I wanted to use this module in each framework inside instrumentation only. That was the reason why I had created separate issues for each framework. `Resource` is an SDK component, and no SDK related things will be part of the instrumentation. > specification may/may not approve it as it should be compatible to other SIGs as well or it can take a long time before they do it. If you are referring `OTEL_RESOURCE_DETECTORS`, I can't think of a reason why it will not be accepted. > What is the alternate option in that case? I don't know of any alternative at the moment. And we don't want to do something that is not spec-compliant. You are free to propose a solution. > if we specify the OTEL_RESOURCE_DETECTORS=ContainerResourceDetector in that case, we have to parse this ENV variable to create object of that class and call its detect() in Resource.Create() in SDK when the actual implementation lies in contrib repo. I believe this is not the right approach Why do you believe this is not the right approach? Hey @sanketmehta28 , @srikanthccv , thanks for discussing this. Since I raised this issue a few comments on that matter: A few other language implementations have the support for resource detectors already in place, and took the following approaches to get them loaded: * Java: It's part of the javaagent (auto instrumentation), so it is loaded OOTB (under the hood it's probably similar to what JS & .NET are doing, see next 2 items) * Node.JS has a resourceDetectors options when initialising the SDK, outlined here: https://opentelemetry.io/docs/instrumentation/js/resources/#container-resource-detection * https://github.com/open-telemetry/opentelemetry-dotnet-contrib/tree/main/src/OpenTelemetry.Extensions.Docker also provides means to add the ResourceDetector on initalisation. So independent of the OTEL_RESOURCE_DETECTORS, I see two things that python SIG could implement *right now* and being spec compliant & consistent with other languages: * Have a way to add resource detectors to the SDK initialisations * Have a bunch of resource detectors included in the distro/auto instrumentation Regarding OTEL_RESOURCE_DETECTORS: I assume that Sanket's concern is the following: If I define resource detectors in that list, they have to be part of the application I am using, e.g. the developer made the deliberate choice to add the ContainerResourceDetector (or others) to the application while creating it. I will comment on the same matter at the spec issue: I think it's worth to go back a few steps and re-think how resource detectors are added. > Have a way to add resource detectors to the SDK initialisations This is already supported. > Have a bunch of resource detectors included in the distro/auto instrumentation By _included_, you mean they get installed as a part of the distro; it's fine we do this already for instrumentations but including everything, as in all attribute data users have not opted for, is not a good thing. There will also be third-party resource detectors outside this repo which we have no control over, and there needs to be a mechanism to configure them, which is where I bring the feature request to add new env. > Regarding OTEL_RESOURCE_DETECTORS: I assume that Sanket's concern is the following: If I define resource detectors in that list, they have to be part of the application I am using, e.g. the developer made the deliberate choice to add the ContainerResourceDetector (or others) to the application while creating it. I don't get this argument. Let's take what's already spec'd out, When the user configures `OTEL_TRACES_EXPORTER=otlp,jarger,zipkin` then all these exporters _have to_ be part of the application, and it's the deliberate choice they are making. The feature request to add `OTEL_RESOURCE_DETECTORS` is nothing different. It's also the same thing that makes `OTEL_PROPAGATORS` work. > > Have a bunch of resource detectors included in the distro/auto instrumentation > > By _included_, you mean they get installed as a part of the distro; it's fine we do this already for instrumentations but including everything, as in all attribute data users have not opted for, is not a good thing. There will also be third-party resource detectors outside this repo which we have no control over, and there needs to be a mechanism to configure them, which is where I bring the feature request to add new env. I don't care that much about third-party (aka cloud vendors) resource detectors and more about "standard" ones like container , k8s or other common infrastructure. > I don't get this argument. Let's take what's already spec'd out, When the user configures `OTEL_TRACES_EXPORTER=otlp,jarger,zipkin` then all these exporters _have to_ be part of the application, and it's the deliberate choice they are making. The feature request to add `OTEL_RESOURCE_DETECTORS` is nothing different. It's also the same thing that makes `OTEL_PROPAGATORS` work. I see that you commented on my more lengthy comment over at the spec as well, so let's move that conversation there? >I don't care that much about third-party (aka cloud vendors) resource detectors and more about "standard" ones like container , k8s or other common infrastructure. From a design perspective, we want a solution that works well for the "standard", or otherwise, that doesn't require rethinking when users come to ask for support for the vendor or even their custom detectors. >I see that you commented on my more lengthy comment over at the spec as well, so let's move that conversation there? Sure, I am conveying this is not a new problem - we already have this addressed for other components and let's follow the pattern. > Have a bunch of resource detectors included in the distro/auto instrumentation @srikanthccv , @svrnm : Independent of the ENV var `OTEL_RESOURCE_DETECTORS`, how do you plan to use these detectors in auto-instrumentation? an example will be of great help here. Just for the record, discussed in our last SIG, @sanketmehta28 will look into other SIGs implementations maybe they have a way to solve this problem that can work for us  Hi All, Here is the status of containerDetector feature for other SIGS: NodeJS: `Not available in auto instrumentation` - https://github.com/open-telemetry/opentelemetry-js-contrib/tree/main/detectors/node/opentelemetry-resource-detector-container - https://github.com/open-telemetry/opentelemetry-js-contrib/blob/main/detectors/node/opentelemetry-resource-detector-container/src/detectors/ContainerDetector.ts DotNet: `Not available in auto instrumentation` - https://github.com/open-telemetry/opentelemetry-dotnet-contrib/tree/main/src/OpenTelemetry.Extensions.Docker Java: `available in auto instrumentation` - https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/resources/library/src/main/java/io/opentelemetry/instrumentation/resources/ContainerResource.java - They call all the resourceDetectors available at the time of agent loading and add all the collected resources to spans GoLang: `Not available in auto instrumentation` - https://github.com/hypertrace/goagent/blob/main/sdk/internal/container/container.go By looking at the implementation and having discussion with some SIG contributors, it looks like none of them are providing any way to make this feature available in auto instrumentation but only in manual instrumentation using SDK. It looks like Java agent is able to collect container.id out of the box (I have updated my last comment accordingly). They load all ResourceDetectors available and collect resources from it (just like all instrumentors are being loaded in sizecustomize.py). This way container.id is available in span resource if it is available otherwise it will fail silently. I guess till we finalize the ENV variable approach, we can use this approach to make this feature available for auto instrumentation. Thoughts? I have checked with @svrnm and came to know that in java agent, they basically load all `ResourceDetectors` installed and get all resources from them. this way they get the container.id in the span resources if `ContainerResourceDetector `module is installed else fails silently. I guess we can use the same approach to implement this feature in otel python agent Thoughts anyone @open-telemetry/opentelemetry-python-contrib-approvers ? >else fails silently. What is it that you refer to here that fails silently? >this way they get the container.id in the span resources Whatever we do, It will not be limited just to the traces. The same aggregated resource must be used for all three "signals". I am okay with the idea of using all installed resource detectors, but I think the user should have the option to suppress it if they want to. > What is it that you refer to here that fails silently? Means it will not fetch the container.id without any error or warning and continue the execution > Whatever we do, It will not be limited just to the traces. The same aggregated resource must be used for all three "signals". I am okay with the idea of using all installed resource detectors, but I think the user should have the option to suppress it if they want to. @srikanthccv : How do you suggest we provide the option to suppress it?
2023-01-15T22:35:00
open-telemetry/opentelemetry-python-contrib
1,598
open-telemetry__opentelemetry-python-contrib-1598
[ "1545" ]
7af87e1becfe303e555347c8ed7055a08ffe8895
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py @@ -44,6 +44,7 @@ The instrument() method accepts the following keyword args: tracer_provider (TracerProvider) - an optional tracer provider +sanitize_query (bool) - an optional query sanitization flag request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request this function signature is: def request_hook(span: Span, method: str, url: str, kwargs) @@ -96,6 +97,8 @@ def response_hook(span, response): from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.trace import SpanKind, get_tracer +from .utils import sanitize_body + logger = getLogger(__name__) @@ -135,11 +138,16 @@ def _instrument(self, **kwargs): tracer = get_tracer(__name__, __version__, tracer_provider) request_hook = kwargs.get("request_hook") response_hook = kwargs.get("response_hook") + sanitize_query = kwargs.get("sanitize_query", False) _wrap( elasticsearch, "Transport.perform_request", _wrap_perform_request( - tracer, self._span_name_prefix, request_hook, response_hook + tracer, + sanitize_query, + self._span_name_prefix, + request_hook, + response_hook, ), ) @@ -154,7 +162,11 @@ def _uninstrument(self, **kwargs): def _wrap_perform_request( - tracer, span_name_prefix, request_hook=None, response_hook=None + tracer, + sanitize_query, + span_name_prefix, + request_hook=None, + response_hook=None, ): # pylint: disable=R0912,R0914 def wrapper(wrapped, _, args, kwargs): @@ -213,7 +225,10 @@ def wrapper(wrapped, _, args, kwargs): if method: attributes["elasticsearch.method"] = method if body: - attributes[SpanAttributes.DB_STATEMENT] = str(body) + statement = str(body) + if sanitize_query: + statement = sanitize_body(body) + attributes[SpanAttributes.DB_STATEMENT] = statement if params: attributes["elasticsearch.params"] = str(params) if doc_id: diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py new file mode 100644 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py @@ -0,0 +1,59 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +sanitized_keys = ( + "message", + "should", + "filter", + "query", + "queries", + "intervals", + "match", +) +sanitized_value = "?" + + +# pylint: disable=C0103 +def _flatten_dict(d, parent_key=""): + items = [] + for k, v in d.items(): + new_key = parent_key + "." + k if parent_key else k + if isinstance(v, dict): + items.extend(_flatten_dict(v, new_key).items()) + else: + items.append((new_key, v)) + return dict(items) + + +def _unflatten_dict(d): + res = {} + for k, v in d.items(): + keys = k.split(".") + d = res + for key in keys[:-1]: + if key not in d: + d[key] = {} + d = d[key] + d[keys[-1]] = v + return res + + +def sanitize_body(body) -> str: + flatten_body = _flatten_dict(body) + + for key in flatten_body: + if key.endswith(sanitized_keys): + flatten_body[key] = sanitized_value + + return str(_unflatten_dict(flatten_body))
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/sanitization_queries.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/sanitization_queries.py new file mode 100644 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/sanitization_queries.py @@ -0,0 +1,65 @@ +interval_query = { + "query": { + "intervals": { + "my_text": { + "all_of": { + "ordered": True, + "intervals": [ + { + "match": { + "query": "my favorite food", + "max_gaps": 0, + "ordered": True, + } + }, + { + "any_of": { + "intervals": [ + {"match": {"query": "hot water"}}, + {"match": {"query": "cold porridge"}}, + ] + } + }, + ], + } + } + } + } +} + +match_query = {"query": {"match": {"message": {"query": "this is a test"}}}} + +filter_query = { + "query": { + "bool": { + "must": [ + {"match": {"title": "Search"}}, + {"match": {"content": "Elasticsearch"}}, + ], + "filter": [ + {"term": {"status": "published"}}, + {"range": {"publish_date": {"gte": "2015-01-01"}}}, + ], + } + } +} + +interval_query_sanitized = { + "query": { + "intervals": { + "my_text": {"all_of": {"ordered": True, "intervals": "?"}} + } + } +} +match_query_sanitized = {"query": {"match": {"message": {"query": "?"}}}} +filter_query_sanitized = { + "query": { + "bool": { + "must": [ + {"match": {"title": "Search"}}, + {"match": {"content": "Elasticsearch"}}, + ], + "filter": "?", + } + } +} diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import json import os import threading @@ -27,10 +28,13 @@ from opentelemetry.instrumentation.elasticsearch import ( ElasticsearchInstrumentor, ) +from opentelemetry.instrumentation.elasticsearch.utils import sanitize_body from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.test.test_base import TestBase from opentelemetry.trace import StatusCode +from . import sanitization_queries # pylint: disable=no-name-in-module + major_version = elasticsearch.VERSION[0] if major_version == 7: @@ -42,7 +46,6 @@ else: from . import helpers_es2 as helpers # pylint: disable=no-name-in-module - Article = helpers.Article @@ -50,6 +53,22 @@ "elasticsearch.connection.http_urllib3.Urllib3HttpConnection.perform_request" ) class TestElasticsearchIntegration(TestBase): + search_attributes = { + SpanAttributes.DB_SYSTEM: "elasticsearch", + "elasticsearch.url": "/test-index/_search", + "elasticsearch.method": helpers.dsl_search_method, + "elasticsearch.target": "test-index", + SpanAttributes.DB_STATEMENT: str( + {"query": {"bool": {"filter": [{"term": {"author": "testing"}}]}}} + ), + } + + create_attributes = { + SpanAttributes.DB_SYSTEM: "elasticsearch", + "elasticsearch.url": "/test-index", + "elasticsearch.method": "HEAD", + } + def setUp(self): super().setUp() self.tracer = self.tracer_provider.get_tracer(__name__) @@ -241,21 +260,36 @@ def test_dsl_search(self, request_mock): self.assertIsNotNone(span.end_time) self.assertEqual( span.attributes, + self.search_attributes, + ) + + def test_dsl_search_sanitized(self, request_mock): + # Reset instrumentation to use sanitized query (default) + ElasticsearchInstrumentor().uninstrument() + ElasticsearchInstrumentor().instrument(sanitize_query=True) + + # update expected attributes to match sanitized query + sanitized_search_attributes = self.search_attributes.copy() + sanitized_search_attributes.update( { - SpanAttributes.DB_SYSTEM: "elasticsearch", - "elasticsearch.url": "/test-index/_search", - "elasticsearch.method": helpers.dsl_search_method, - "elasticsearch.target": "test-index", - SpanAttributes.DB_STATEMENT: str( - { - "query": { - "bool": { - "filter": [{"term": {"author": "testing"}}] - } - } - } - ), - }, + SpanAttributes.DB_STATEMENT: "{'query': {'bool': {'filter': '?'}}}" + } + ) + + request_mock.return_value = (1, {}, '{"hits": {"hits": []}}') + client = Elasticsearch() + search = Search(using=client, index="test-index").filter( + "term", author="testing" + ) + search.execute() + spans = self.get_finished_spans() + span = spans[0] + self.assertEqual(1, len(spans)) + self.assertEqual(span.name, "Elasticsearch/<target>/_search") + self.assertIsNotNone(span.end_time) + self.assertEqual( + span.attributes, + sanitized_search_attributes, ) def test_dsl_create(self, request_mock): @@ -264,17 +298,14 @@ def test_dsl_create(self, request_mock): Article.init(using=client) spans = self.get_finished_spans() + assert spans self.assertEqual(2, len(spans)) span1 = spans.by_attr(key="elasticsearch.method", value="HEAD") span2 = spans.by_attr(key="elasticsearch.method", value="PUT") self.assertEqual( span1.attributes, - { - SpanAttributes.DB_SYSTEM: "elasticsearch", - "elasticsearch.url": "/test-index", - "elasticsearch.method": "HEAD", - }, + self.create_attributes, ) attributes = { @@ -288,6 +319,25 @@ def test_dsl_create(self, request_mock): helpers.dsl_create_statement, ) + def test_dsl_create_sanitized(self, request_mock): + # Reset instrumentation to explicitly use sanitized query + ElasticsearchInstrumentor().uninstrument() + ElasticsearchInstrumentor().instrument(sanitize_query=True) + request_mock.return_value = (1, {}, {}) + client = Elasticsearch() + Article.init(using=client) + + spans = self.get_finished_spans() + assert spans + + self.assertEqual(2, len(spans)) + span = spans.by_attr(key="elasticsearch.method", value="HEAD") + + self.assertEqual( + span.attributes, + self.create_attributes, + ) + def test_dsl_index(self, request_mock): request_mock.return_value = helpers.dsl_index_result @@ -412,3 +462,17 @@ def response_hook(span, response): json.dumps(response_payload), spans[0].attributes[response_attribute_name], ) + + def test_body_sanitization(self, _): + self.assertEqual( + sanitize_body(sanitization_queries.interval_query), + str(sanitization_queries.interval_query_sanitized), + ) + self.assertEqual( + sanitize_body(sanitization_queries.match_query), + str(sanitization_queries.match_query_sanitized), + ) + self.assertEqual( + sanitize_body(sanitization_queries.filter_query), + str(sanitization_queries.filter_query_sanitized), + )
Implement sensitive data sanitization for elasticsearch instrumentation Aggregated by #1543
Starting to work on this one
2023-01-24T10:28:35
open-telemetry/opentelemetry-python-contrib
1,645
open-telemetry__opentelemetry-python-contrib-1645
[ "1143" ]
0417141a703b2d6f3c9b6afa5b18db5bb59fbaa3
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py @@ -105,13 +105,16 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.sqlalchemy.engine import ( EngineTracer, - _get_tracer, _wrap_connect, _wrap_create_async_engine, _wrap_create_engine, ) from opentelemetry.instrumentation.sqlalchemy.package import _instruments +from opentelemetry.instrumentation.sqlalchemy.version import __version__ from opentelemetry.instrumentation.utils import unwrap +from opentelemetry.metrics import get_meter +from opentelemetry.semconv.metrics import MetricInstruments +from opentelemetry.trace import get_tracer class SQLAlchemyInstrumentor(BaseInstrumentor): @@ -136,32 +139,47 @@ def _instrument(self, **kwargs): An instrumented engine if passed in as an argument or list of instrumented engines, None otherwise. """ tracer_provider = kwargs.get("tracer_provider") + tracer = get_tracer(__name__, __version__, tracer_provider) + + meter_provider = kwargs.get("meter_provider") + meter = get_meter(__name__, __version__, meter_provider) + + connections_usage = meter.create_up_down_counter( + name=MetricInstruments.DB_CLIENT_CONNECTIONS_USAGE, + unit="connections", + description="The number of connections that are currently in state described by the state attribute.", + ) + enable_commenter = kwargs.get("enable_commenter", False) + _w( "sqlalchemy", "create_engine", - _wrap_create_engine(tracer_provider, enable_commenter), + _wrap_create_engine(tracer, connections_usage, enable_commenter), ) _w( "sqlalchemy.engine", "create_engine", - _wrap_create_engine(tracer_provider, enable_commenter), + _wrap_create_engine(tracer, connections_usage, enable_commenter), ) _w( "sqlalchemy.engine.base", "Engine.connect", - _wrap_connect(tracer_provider), + _wrap_connect(tracer), ) if parse_version(sqlalchemy.__version__).release >= (1, 4): _w( "sqlalchemy.ext.asyncio", "create_async_engine", - _wrap_create_async_engine(tracer_provider, enable_commenter), + _wrap_create_async_engine( + tracer, connections_usage, enable_commenter + ), ) if kwargs.get("engine") is not None: return EngineTracer( - _get_tracer(tracer_provider), + tracer, kwargs.get("engine"), + connections_usage, kwargs.get("enable_commenter", False), kwargs.get("commenter_options", {}), ) @@ -170,8 +188,9 @@ def _instrument(self, **kwargs): ): return [ EngineTracer( - _get_tracer(tracer_provider), + tracer, engine, + connections_usage, kwargs.get("enable_commenter", False), kwargs.get("commenter_options", {}), ) diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py @@ -20,9 +20,6 @@ ) from opentelemetry import trace -from opentelemetry.instrumentation.sqlalchemy.package import ( - _instrumenting_module_name, -) from opentelemetry.instrumentation.sqlalchemy.version import __version__ from opentelemetry.instrumentation.sqlcommenter_utils import _add_sql_comment from opentelemetry.instrumentation.utils import _get_opentelemetry_values @@ -44,15 +41,9 @@ def _normalize_vendor(vendor): return vendor -def _get_tracer(tracer_provider=None): - return trace.get_tracer( - _instrumenting_module_name, - __version__, - tracer_provider=tracer_provider, - ) - - -def _wrap_create_async_engine(tracer_provider=None, enable_commenter=False): +def _wrap_create_async_engine( + tracer, connections_usage, enable_commenter=False +): # pylint: disable=unused-argument def _wrap_create_async_engine_internal(func, module, args, kwargs): """Trace the SQLAlchemy engine, creating an `EngineTracer` @@ -60,33 +51,26 @@ def _wrap_create_async_engine_internal(func, module, args, kwargs): """ engine = func(*args, **kwargs) EngineTracer( - _get_tracer(tracer_provider), engine.sync_engine, enable_commenter + tracer, engine.sync_engine, connections_usage, enable_commenter ) return engine return _wrap_create_async_engine_internal -def _wrap_create_engine(tracer_provider=None, enable_commenter=False): - # pylint: disable=unused-argument - def _wrap_create_engine_internal(func, module, args, kwargs): +def _wrap_create_engine(tracer, connections_usage, enable_commenter=False): + def _wrap_create_engine_internal(func, _module, args, kwargs): """Trace the SQLAlchemy engine, creating an `EngineTracer` object that will listen to SQLAlchemy events. """ engine = func(*args, **kwargs) - EngineTracer(_get_tracer(tracer_provider), engine, enable_commenter) + EngineTracer(tracer, engine, connections_usage, enable_commenter) return engine return _wrap_create_engine_internal -def _wrap_connect(tracer_provider=None): - tracer = trace.get_tracer( - _instrumenting_module_name, - __version__, - tracer_provider=tracer_provider, - ) - +def _wrap_connect(tracer): # pylint: disable=unused-argument def _wrap_connect_internal(func, module, args, kwargs): with tracer.start_as_current_span( @@ -107,10 +91,16 @@ class EngineTracer: _remove_event_listener_params = [] def __init__( - self, tracer, engine, enable_commenter=False, commenter_options=None + self, + tracer, + engine, + connections_usage, + enable_commenter=False, + commenter_options=None, ): self.tracer = tracer self.engine = engine + self.connections_usage = connections_usage self.vendor = _normalize_vendor(engine.name) self.enable_commenter = enable_commenter self.commenter_options = commenter_options if commenter_options else {} @@ -123,6 +113,49 @@ def __init__( engine, "after_cursor_execute", _after_cur_exec ) self._register_event_listener(engine, "handle_error", _handle_error) + self._register_event_listener(engine, "connect", self._pool_connect) + self._register_event_listener(engine, "close", self._pool_close) + self._register_event_listener(engine, "checkin", self._pool_checkin) + self._register_event_listener(engine, "checkout", self._pool_checkout) + + def _get_pool_name(self): + return self.engine.pool.logging_name or "" + + def _add_idle_to_connection_usage(self, value): + self.connections_usage.add( + value, + attributes={ + "pool.name": self._get_pool_name(), + "state": "idle", + }, + ) + + def _add_used_to_connection_usage(self, value): + self.connections_usage.add( + value, + attributes={ + "pool.name": self._get_pool_name(), + "state": "used", + }, + ) + + def _pool_connect(self, _dbapi_connection, _connection_record): + self._add_idle_to_connection_usage(1) + + def _pool_close(self, _dbapi_connection, _connection_record): + self._add_idle_to_connection_usage(-1) + + # Called when a connection returns to the pool. + def _pool_checkin(self, _dbapi_connection, _connection_record): + self._add_used_to_connection_usage(-1) + self._add_idle_to_connection_usage(1) + + # Called when a connection is retrieved from the Pool. + def _pool_checkout( + self, _dbapi_connection, _connection_record, _connection_proxy + ): + self._add_idle_to_connection_usage(-1) + self._add_used_to_connection_usage(1) @classmethod def _register_event_listener(cls, target, identifier, func, *args, **kw): @@ -153,9 +186,8 @@ def _operation_name(self, db_name, statement): return self.vendor return " ".join(parts) - # pylint: disable=unused-argument def _before_cur_exec( - self, conn, cursor, statement, params, context, executemany + self, conn, cursor, statement, params, context, _executemany ): attrs, found = _get_attributes_from_url(conn.engine.url) if not found: diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/package.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/package.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/package.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/package.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -_instrumenting_module_name = "opentelemetry.instrumentation.sqlalchemy" - _instruments = ("sqlalchemy",) + +_supports_metrics = True
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy_metrics.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy_metrics.py new file mode 100644 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy_metrics.py @@ -0,0 +1,159 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sqlalchemy +from sqlalchemy.pool import QueuePool + +from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor +from opentelemetry.test.test_base import TestBase + + +class TestSqlalchemyMetricsInstrumentation(TestBase): + def setUp(self): + super().setUp() + SQLAlchemyInstrumentor().instrument( + tracer_provider=self.tracer_provider, + ) + + def tearDown(self): + super().tearDown() + SQLAlchemyInstrumentor().uninstrument() + + def assert_pool_idle_used_expected(self, pool_name, idle, used): + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 1) + self.assert_metric_expected( + metrics[0], + [ + self.create_number_data_point( + value=idle, + attributes={"pool.name": pool_name, "state": "idle"}, + ), + self.create_number_data_point( + value=used, + attributes={"pool.name": pool_name, "state": "used"}, + ), + ], + ) + + def test_metrics_one_connection(self): + pool_name = "pool_test_name" + engine = sqlalchemy.create_engine( + "sqlite:///:memory:", + pool_size=5, + poolclass=QueuePool, + pool_logging_name=pool_name, + ) + + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 0) + + with engine.connect(): + self.assert_pool_idle_used_expected( + pool_name=pool_name, idle=0, used=1 + ) + + # After the connection is closed + self.assert_pool_idle_used_expected( + pool_name=pool_name, idle=1, used=0 + ) + + def test_metrics_without_pool_name(self): + pool_name = "" + engine = sqlalchemy.create_engine( + "sqlite:///:memory:", + pool_size=5, + poolclass=QueuePool, + ) + + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 0) + + with engine.connect(): + self.assert_pool_idle_used_expected( + pool_name=pool_name, idle=0, used=1 + ) + + # After the connection is closed + self.assert_pool_idle_used_expected( + pool_name=pool_name, idle=1, used=0 + ) + + def test_metrics_two_connections(self): + pool_name = "pool_test_name" + engine = sqlalchemy.create_engine( + "sqlite:///:memory:", + pool_size=5, + poolclass=QueuePool, + pool_logging_name=pool_name, + ) + + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 0) + + with engine.connect(): + with engine.connect(): + self.assert_pool_idle_used_expected(pool_name, idle=0, used=2) + + # After the first connection is closed + self.assert_pool_idle_used_expected(pool_name, idle=1, used=1) + + # After the two connections are closed + self.assert_pool_idle_used_expected(pool_name, idle=2, used=0) + + def test_metrics_connections(self): + pool_name = "pool_test_name" + engine = sqlalchemy.create_engine( + "sqlite:///:memory:", + pool_size=5, + poolclass=QueuePool, + pool_logging_name=pool_name, + ) + + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 0) + + with engine.connect(): + with engine.connect(): + self.assert_pool_idle_used_expected( + pool_name=pool_name, idle=0, used=2 + ) + + # After the first connection is closed + self.assert_pool_idle_used_expected( + pool_name=pool_name, idle=1, used=1 + ) + + # Resume from idle to used + with engine.connect(): + self.assert_pool_idle_used_expected( + pool_name=pool_name, idle=0, used=2 + ) + + # After the two connections are closed + self.assert_pool_idle_used_expected( + pool_name=pool_name, idle=2, used=0 + ) + + def test_metric_uninstrument(self): + SQLAlchemyInstrumentor().uninstrument() + engine = sqlalchemy.create_engine( + "sqlite:///:memory:", + poolclass=QueuePool, + ) + + engine.connect() + + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 0)
Metrics instrumentation sqlalchemy Spec https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/database-metrics.md
Hi, I'd like to work on this issue Hi, [awuorm](https://github.com/awuorm) Do you working on this or can I take this issue? @awuorm since there is no (draft) work so far and no response to previous comment I am unassigning you. Feel free to come back and pick something else when you get some time to contribute.
2023-02-06T15:27:57
open-telemetry/opentelemetry-python-contrib
1,660
open-telemetry__opentelemetry-python-contrib-1660
[ "1658" ]
7aa4aec3a6d916f1de41ce4c64a5ae044d4c7276
diff --git a/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py b/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-dbapi/src/opentelemetry/instrumentation/dbapi/__init__.py @@ -203,7 +203,7 @@ def instrument_connection( Returns: An instrumented connection. """ - if isinstance(connection, _TracedConnectionProxy): + if isinstance(connection, wrapt.ObjectProxy): _logger.warning("Connection already instrumented") return connection @@ -230,8 +230,8 @@ def uninstrument_connection(connection): Returns: An uninstrumented connection. """ - if isinstance(connection, _TracedConnectionProxy): - return connection._connection + if isinstance(connection, wrapt.ObjectProxy): + return connection.__wrapped__ _logger.warning("Connection is not instrumented") return connection @@ -320,22 +320,14 @@ def get_connection_attributes(self, connection): self.span_attributes[SpanAttributes.NET_PEER_PORT] = port -class _TracedConnectionProxy: - pass - - def get_traced_connection_proxy( connection, db_api_integration, *args, **kwargs ): # pylint: disable=abstract-method - class TracedConnectionProxy(type(connection), _TracedConnectionProxy): - def __init__(self, connection): - self._connection = connection - - def __getattr__(self, name): - return object.__getattribute__( - object.__getattribute__(self, "_connection"), name - ) + class TracedConnectionProxy(wrapt.ObjectProxy): + # pylint: disable=unused-argument + def __init__(self, connection, *args, **kwargs): + wrapt.ObjectProxy.__init__(self, connection) def __getattribute__(self, name): if object.__getattribute__(self, name): @@ -347,16 +339,17 @@ def __getattribute__(self, name): def cursor(self, *args, **kwargs): return get_traced_cursor_proxy( - self._connection.cursor(*args, **kwargs), db_api_integration + self.__wrapped__.cursor(*args, **kwargs), db_api_integration ) - # For some reason this is necessary as trying to access the close - # method of self._connection via __getattr__ leads to unexplained - # errors. - def close(self): - self._connection.close() + def __enter__(self): + self.__wrapped__.__enter__() + return self + + def __exit__(self, *args, **kwargs): + self.__wrapped__.__exit__(*args, **kwargs) - return TracedConnectionProxy(connection) + return TracedConnectionProxy(connection, *args, **kwargs) class CursorTracer:
diff --git a/instrumentation/opentelemetry-instrumentation-dbapi/tests/test_dbapi_integration.py b/instrumentation/opentelemetry-instrumentation-dbapi/tests/test_dbapi_integration.py --- a/instrumentation/opentelemetry-instrumentation-dbapi/tests/test_dbapi_integration.py +++ b/instrumentation/opentelemetry-instrumentation-dbapi/tests/test_dbapi_integration.py @@ -325,14 +325,14 @@ def test_callproc(self): @mock.patch("opentelemetry.instrumentation.dbapi") def test_wrap_connect(self, mock_dbapi): - dbapi.wrap_connect(self.tracer, MockConnectionEmpty(), "connect", "-") + dbapi.wrap_connect(self.tracer, mock_dbapi, "connect", "-") connection = mock_dbapi.connect() self.assertEqual(mock_dbapi.connect.call_count, 1) - self.assertIsInstance(connection._connection, mock.Mock) + self.assertIsInstance(connection.__wrapped__, mock.Mock) @mock.patch("opentelemetry.instrumentation.dbapi") def test_unwrap_connect(self, mock_dbapi): - dbapi.wrap_connect(self.tracer, MockConnectionEmpty(), "connect", "-") + dbapi.wrap_connect(self.tracer, mock_dbapi, "connect", "-") connection = mock_dbapi.connect() self.assertEqual(mock_dbapi.connect.call_count, 1) @@ -342,21 +342,19 @@ def test_unwrap_connect(self, mock_dbapi): self.assertIsInstance(connection, mock.Mock) def test_instrument_connection(self): - connection = MockConnectionEmpty() + connection = mock.Mock() # Avoid get_attributes failing because can't concatenate mock - # pylint: disable=attribute-defined-outside-init connection.database = "-" connection2 = dbapi.instrument_connection(self.tracer, connection, "-") - self.assertIs(connection2._connection, connection) + self.assertIs(connection2.__wrapped__, connection) def test_uninstrument_connection(self): - connection = MockConnectionEmpty() + connection = mock.Mock() # Set connection.database to avoid a failure because mock can't # be concatenated - # pylint: disable=attribute-defined-outside-init connection.database = "-" connection2 = dbapi.instrument_connection(self.tracer, connection, "-") - self.assertIs(connection2._connection, connection) + self.assertIs(connection2.__wrapped__, connection) connection3 = dbapi.uninstrument_connection(connection2) self.assertIs(connection3, connection) @@ -372,12 +370,10 @@ def mock_connect(*args, **kwargs): server_host = kwargs.get("server_host") server_port = kwargs.get("server_port") user = kwargs.get("user") - return MockConnectionWithAttributes( - database, server_port, server_host, user - ) + return MockConnection(database, server_port, server_host, user) -class MockConnectionWithAttributes: +class MockConnection: def __init__(self, database, server_port, server_host, user): self.database = database self.server_port = server_port @@ -410,7 +406,3 @@ def executemany(self, query, params=None, throw_exception=False): def callproc(self, query, params=None, throw_exception=False): if throw_exception: raise Exception("Test Exception") - - -class MockConnectionEmpty: - pass diff --git a/instrumentation/opentelemetry-instrumentation-mysql/tests/test_mysql_integration.py b/instrumentation/opentelemetry-instrumentation-mysql/tests/test_mysql_integration.py --- a/instrumentation/opentelemetry-instrumentation-mysql/tests/test_mysql_integration.py +++ b/instrumentation/opentelemetry-instrumentation-mysql/tests/test_mysql_integration.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import Mock, patch +from unittest import mock import mysql.connector @@ -23,15 +23,6 @@ from opentelemetry.test.test_base import TestBase -def mock_connect(*args, **kwargs): - class MockConnection: - def cursor(self): - # pylint: disable=no-self-use - return Mock() - - return MockConnection() - - def connect_and_execute_query(): cnx = mysql.connector.connect(database="test") cursor = cnx.cursor() @@ -47,9 +38,9 @@ def tearDown(self): with self.disable_logging(): MySQLInstrumentor().uninstrument() - @patch("mysql.connector.connect", new=mock_connect) + @mock.patch("mysql.connector.connect") # pylint: disable=unused-argument - def test_instrumentor(self): + def test_instrumentor(self, mock_connect): MySQLInstrumentor().instrument() connect_and_execute_query() @@ -71,8 +62,9 @@ def test_instrumentor(self): spans_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans_list), 1) - @patch("mysql.connector.connect", new=mock_connect) - def test_custom_tracer_provider(self): + @mock.patch("mysql.connector.connect") + # pylint: disable=unused-argument + def test_custom_tracer_provider(self, mock_connect): resource = resources.Resource.create({}) result = self.create_tracer_provider(resource=resource) tracer_provider, exporter = result @@ -86,9 +78,9 @@ def test_custom_tracer_provider(self): self.assertIs(span.resource, resource) - @patch("mysql.connector.connect", new=mock_connect) + @mock.patch("mysql.connector.connect") # pylint: disable=unused-argument - def test_instrument_connection(self): + def test_instrument_connection(self, mock_connect): cnx, query = connect_and_execute_query() spans_list = self.memory_exporter.get_finished_spans() @@ -101,8 +93,8 @@ def test_instrument_connection(self): spans_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans_list), 1) - @patch("mysql.connector.connect", new=mock_connect) - def test_instrument_connection_no_op_tracer_provider(self): + @mock.patch("mysql.connector.connect") + def test_instrument_connection_no_op_tracer_provider(self, mock_connect): tracer_provider = trace_api.NoOpTracerProvider() MySQLInstrumentor().instrument(tracer_provider=tracer_provider) connect_and_execute_query() @@ -110,9 +102,9 @@ def test_instrument_connection_no_op_tracer_provider(self): spans_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans_list), 0) - @patch("mysql.connector.connect", new=mock_connect) + @mock.patch("mysql.connector.connect") # pylint: disable=unused-argument - def test_uninstrument_connection(self): + def test_uninstrument_connection(self, mock_connect): MySQLInstrumentor().instrument() cnx, query = connect_and_execute_query() diff --git a/instrumentation/opentelemetry-instrumentation-pymysql/tests/test_pymysql_integration.py b/instrumentation/opentelemetry-instrumentation-pymysql/tests/test_pymysql_integration.py --- a/instrumentation/opentelemetry-instrumentation-pymysql/tests/test_pymysql_integration.py +++ b/instrumentation/opentelemetry-instrumentation-pymysql/tests/test_pymysql_integration.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import Mock, patch +from unittest import mock import pymysql @@ -22,24 +22,15 @@ from opentelemetry.test.test_base import TestBase -def mock_connect(*args, **kwargs): - class MockConnection: - def cursor(self): - # pylint: disable=no-self-use - return Mock() - - return MockConnection() - - class TestPyMysqlIntegration(TestBase): def tearDown(self): super().tearDown() with self.disable_logging(): PyMySQLInstrumentor().uninstrument() - @patch("pymysql.connect", new=mock_connect) + @mock.patch("pymysql.connect") # pylint: disable=unused-argument - def test_instrumentor(self): + def test_instrumentor(self, mock_connect): PyMySQLInstrumentor().instrument() cnx = pymysql.connect(database="test") @@ -67,9 +58,9 @@ def test_instrumentor(self): spans_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans_list), 1) - @patch("pymysql.connect", new=mock_connect) + @mock.patch("pymysql.connect") # pylint: disable=unused-argument - def test_custom_tracer_provider(self): + def test_custom_tracer_provider(self, mock_connect): resource = resources.Resource.create({}) result = self.create_tracer_provider(resource=resource) tracer_provider, exporter = result @@ -87,9 +78,9 @@ def test_custom_tracer_provider(self): self.assertIs(span.resource, resource) - @patch("pymysql.connect", new=mock_connect) + @mock.patch("pymysql.connect") # pylint: disable=unused-argument - def test_instrument_connection(self): + def test_instrument_connection(self, mock_connect): cnx = pymysql.connect(database="test") query = "SELECT * FROM test" cursor = cnx.cursor() @@ -105,9 +96,9 @@ def test_instrument_connection(self): spans_list = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans_list), 1) - @patch("pymysql.connect", new=mock_connect) + @mock.patch("pymysql.connect") # pylint: disable=unused-argument - def test_uninstrument_connection(self): + def test_uninstrument_connection(self, mock_connect): PyMySQLInstrumentor().instrument() cnx = pymysql.connect(database="test") query = "SELECT * FROM test"
Revert fix for #1077 #1097 fixed #1077 but introduced another issue, reverting to find a fix that does not cause this problem.
2023-02-10T22:21:37
open-telemetry/opentelemetry-python-contrib
1,664
open-telemetry__opentelemetry-python-contrib-1664
[ "1663" ]
3bcc9fb5173600572479edae1c3071aa4bce6ee1
diff --git a/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/package.py b/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/package.py --- a/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/package.py +++ b/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/package.py @@ -13,6 +13,6 @@ # limitations under the License. -_instruments = ("fastapi ~= 0.58",) +_instruments = ("fastapi <= 0.90.1",) _supports_metrics = True diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -69,7 +69,7 @@ "instrumentation": "opentelemetry-instrumentation-falcon==0.37b0.dev", }, "fastapi": { - "library": "fastapi ~= 0.58", + "library": "fastapi <= 0.90.1", "instrumentation": "opentelemetry-instrumentation-fastapi==0.37b0.dev", }, "flask": {
Fix fastapi version CI in `main` is failing right now because of `opentelemetery-instrumentation-fastapi` failures, fix `fastapi` version.
2023-02-12T20:13:00
open-telemetry/opentelemetry-python-contrib
1,666
open-telemetry__opentelemetry-python-contrib-1666
[ "1495" ]
aa6397ad5957dbbb3d6db2598861ad3b8a012f5e
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -126,7 +126,7 @@ def getlistcfg(strval): ] -ignore_categories = ["py-class", "py-func", "py-exc", "any"] +ignore_categories = ["py-class", "py-func", "py-exc", "py-obj", "any"] for category in ignore_categories: if category in mcfg: diff --git a/instrumentation/opentelemetry-instrumentation-boto3sqs/src/opentelemetry/instrumentation/boto3sqs/__init__.py b/instrumentation/opentelemetry-instrumentation-boto3sqs/src/opentelemetry/instrumentation/boto3sqs/__init__.py --- a/instrumentation/opentelemetry-instrumentation-boto3sqs/src/opentelemetry/instrumentation/boto3sqs/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-boto3sqs/src/opentelemetry/instrumentation/boto3sqs/__init__.py @@ -16,17 +16,17 @@ .. _boto3sqs: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs.html - Usage ----- -.. code:: python +.. code-block:: python import boto3 from opentelemetry.instrumentation.boto3sqs import Boto3SQSInstrumentor - Boto3SQSInstrumentor().instrument() + +--- """ import logging from typing import Any, Collection, Dict, Generator, List, Mapping, Optional
Add readthedocs documentation for boto3sqs instrumentation Part of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)
2023-02-13T19:25:45
open-telemetry/opentelemetry-python-contrib
1,668
open-telemetry__opentelemetry-python-contrib-1668
[ "1496" ]
5e4766ed660553f4e0ad818a53a1ee38fbf95e1b
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -126,25 +126,14 @@ def getlistcfg(strval): ] -if "class_references" in mcfg: - class_references = getlistcfg(mcfg["class_references"]) - for class_reference in class_references: - nitpick_ignore.append( - ( - "py:class", - class_reference, - ) - ) - -if "anys" in mcfg: - anys = getlistcfg(mcfg["anys"]) - for _any in anys: - nitpick_ignore.append( - ( - "any", - _any, - ) - ) +ignore_categories = ["py-class", "py-func", "py-exc", "any"] + +for category in ignore_categories: + if category in mcfg: + items = getlistcfg(mcfg[category]) + for item in items: + nitpick_ignore.append((category.replace("-", ":"), item)) + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py @@ -13,12 +13,12 @@ # limitations under the License. """ -Instrument `confluent-kafka-python` to report instrumentation-confluent-kafka produced and consumed messages +Instrument confluent-kafka-python to report instrumentation-confluent-kafka produced and consumed messages Usage ----- -..code:: python +.. code-block:: python from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor from confluent_kafka import Producer, Consumer @@ -30,12 +30,10 @@ conf1 = {'bootstrap.servers': "localhost:9092"} producer = Producer(conf1) producer.produce('my-topic',b'raw_bytes') - - conf2 = {'bootstrap.servers': "localhost:9092", - 'group.id': "foo", - 'auto.offset.reset': 'smallest'} + conf2 = {'bootstrap.servers': "localhost:9092", 'group.id': "foo", 'auto.offset.reset': 'smallest'} # report a span of type consumer with the default settings consumer = Consumer(conf2) + def basic_consume_loop(consumer, topics): try: consumer.subscribe(topics) @@ -43,11 +41,10 @@ def basic_consume_loop(consumer, topics): while running: msg = consumer.poll(timeout=1.0) if msg is None: continue - if msg.error(): if msg.error().code() == KafkaError._PARTITION_EOF: # End of partition event - sys.stderr.write(f"{msg.topic()} [{msg.partition()}] reached end at offset {msg.offset()}}\n") + sys.stderr.write(f"{msg.topic() [{msg.partition()}] reached end at offset {msg.offset()}}") elif msg.error(): raise KafkaException(msg.error()) else: @@ -57,19 +54,26 @@ def basic_consume_loop(consumer, topics): consumer.close() basic_consume_loop(consumer, "my-topic") + --- + +The _instrument method accepts the following keyword args: + tracer_provider (TracerProvider) - an optional tracer provider + + instrument_producer (Callable) - a function with extra user-defined logic to be performed before sending the message + this function signature is: + + def instrument_producer(producer: Producer, tracer_provider=None) + instrument_consumer (Callable) - a function with extra user-defined logic to be performed after consuming a message + this function signature is: + + def instrument_consumer(consumer: Consumer, tracer_provider=None) + for example: + +.. code:: python -The `_instrument` method accepts the following keyword args: -tracer_provider (TracerProvider) - an optional tracer provider -instrument_producer (Callable) - a function with extra user-defined logic to be performed before sending the message - this function signature is: - def instrument_producer(producer: Producer, tracer_provider=None) -instrument_consumer (Callable) - a function with extra user-defined logic to be performed after consuming a message - this function signature is: - def instrument_consumer(consumer: Consumer, tracer_provider=None) -for example: -.. code: python from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor + from confluent_kafka import Producer, Consumer inst = ConfluentKafkaInstrumentor() @@ -85,15 +89,12 @@ def instrument_consumer(consumer: Consumer, tracer_provider=None) p = inst.instrument_producer(p, tracer_provider) c = inst.instrument_consumer(c, tracer_provider=tracer_provider) - # Using kafka as normal now will automatically generate spans, # including user custom attributes added from the hooks conf = {'bootstrap.servers': "localhost:9092"} p.produce('my-topic',b'raw_bytes') msg = c.poll() - -API ___ """ from typing import Collection
Add readthedocs documentation for confluent kafka instrumentation Part of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)
2023-02-14T12:25:51
open-telemetry/opentelemetry-python-contrib
1,679
open-telemetry__opentelemetry-python-contrib-1679
[ "1159" ]
78040836d2d401c9a0ba5013e73140b768fb58ee
diff --git a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py --- a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py @@ -60,6 +60,7 @@ def add(x, y): """ import logging +from timeit import default_timer from typing import Collection, Iterable from celery import signals # pylint: disable=no-name-in-module @@ -69,6 +70,7 @@ def add(x, y): from opentelemetry.instrumentation.celery.package import _instruments from opentelemetry.instrumentation.celery.version import __version__ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.metrics import get_meter from opentelemetry.propagate import extract, inject from opentelemetry.propagators.textmap import Getter from opentelemetry.semconv.trace import SpanAttributes @@ -104,6 +106,11 @@ def keys(self, carrier): class CeleryInstrumentor(BaseInstrumentor): + def __init__(self): + super().__init__() + self.metrics = None + self.task_id_to_start_time = {} + def instrumentation_dependencies(self) -> Collection[str]: return _instruments @@ -113,6 +120,11 @@ def _instrument(self, **kwargs): # pylint: disable=attribute-defined-outside-init self._tracer = trace.get_tracer(__name__, __version__, tracer_provider) + meter_provider = kwargs.get("meter_provider") + meter = get_meter(__name__, __version__, meter_provider) + + self.create_celery_metrics(meter) + signals.task_prerun.connect(self._trace_prerun, weak=False) signals.task_postrun.connect(self._trace_postrun, weak=False) signals.before_task_publish.connect( @@ -139,6 +151,7 @@ def _trace_prerun(self, *args, **kwargs): if task is None or task_id is None: return + self.update_task_duration_time(task_id) request = task.request tracectx = extract(request, getter=celery_getter) or None @@ -153,8 +166,7 @@ def _trace_prerun(self, *args, **kwargs): activation.__enter__() # pylint: disable=E1101 utils.attach_span(task, task_id, (span, activation)) - @staticmethod - def _trace_postrun(*args, **kwargs): + def _trace_postrun(self, *args, **kwargs): task = utils.retrieve_task(kwargs) task_id = utils.retrieve_task_id(kwargs) @@ -178,6 +190,9 @@ def _trace_postrun(*args, **kwargs): activation.__exit__(None, None, None) utils.detach_span(task, task_id) + self.update_task_duration_time(task_id) + labels = {"task": task.name, "worker": task.request.hostname} + self._record_histograms(task_id, labels) def _trace_before_publish(self, *args, **kwargs): task = utils.retrieve_task_from_sender(kwargs) @@ -277,3 +292,30 @@ def _trace_retry(*args, **kwargs): # Use `str(reason)` instead of `reason.message` in case we get # something that isn't an `Exception` span.set_attribute(_TASK_RETRY_REASON_KEY, str(reason)) + + def update_task_duration_time(self, task_id): + cur_time = default_timer() + task_duration_time_until_now = ( + cur_time - self.task_id_to_start_time[task_id] + if task_id in self.task_id_to_start_time + else cur_time + ) + self.task_id_to_start_time[task_id] = task_duration_time_until_now + + def _record_histograms(self, task_id, metric_attributes): + if task_id is None: + return + + self.metrics["flower.task.runtime.seconds"].record( + self.task_id_to_start_time.get(task_id), + attributes=metric_attributes, + ) + + def create_celery_metrics(self, meter) -> None: + self.metrics = { + "flower.task.runtime.seconds": meter.create_histogram( + name="flower.task.runtime.seconds", + unit="seconds", + description="The time it took to run the task.", + ) + }
diff --git a/instrumentation/opentelemetry-instrumentation-celery/tests/test_metrics.py b/instrumentation/opentelemetry-instrumentation-celery/tests/test_metrics.py new file mode 100644 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-celery/tests/test_metrics.py @@ -0,0 +1,76 @@ +import threading +import time +from timeit import default_timer + +from opentelemetry.instrumentation.celery import CeleryInstrumentor +from opentelemetry.test.test_base import TestBase + +from .celery_test_tasks import app, task_add + + +class TestMetrics(TestBase): + def setUp(self): + super().setUp() + self._worker = app.Worker( + app=app, pool="solo", concurrency=1, hostname="celery@akochavi" + ) + self._thread = threading.Thread(target=self._worker.start) + self._thread.daemon = True + self._thread.start() + + def tearDown(self): + super().tearDown() + self._worker.stop() + self._thread.join() + + def get_metrics(self): + result = task_add.delay(1, 2) + + timeout = time.time() + 60 * 1 # 1 minutes from now + while not result.ready(): + if time.time() > timeout: + break + time.sleep(0.05) + return self.get_sorted_metrics() + + def test_basic_metric(self): + CeleryInstrumentor().instrument() + start_time = default_timer() + task_runtime_estimated = (default_timer() - start_time) * 1000 + + metrics = self.get_metrics() + CeleryInstrumentor().uninstrument() + self.assertEqual(len(metrics), 1) + + task_runtime = metrics[0] + print(task_runtime) + self.assertEqual(task_runtime.name, "flower.task.runtime.seconds") + self.assert_metric_expected( + task_runtime, + [ + self.create_histogram_data_point( + count=1, + sum_data_point=task_runtime_estimated, + max_data_point=task_runtime_estimated, + min_data_point=task_runtime_estimated, + attributes={ + "task": "tests.celery_test_tasks.task_add", + "worker": "celery@akochavi", + }, + ) + ], + est_value_delta=200, + ) + + def test_metric_uninstrument(self): + CeleryInstrumentor().instrument() + metrics = self.get_metrics() + self.assertEqual(len(metrics), 1) + CeleryInstrumentor().uninstrument() + + metrics = self.get_metrics() + self.assertEqual(len(metrics), 1) + + for metric in metrics: + for point in list(metric.data.data_points): + self.assertEqual(point.count, 1)
Metrics instrumentation celery Relevant semconv: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/instrumentation/kafka.md
Hi Can you assign it to me? Hello @sanketmehta28 If you are stuck or have not started working on it yet, can you please assign this to me? CC @srikanthccv Hello @srikanthccv @TheAnshul756 this means that I can't use Opentelemetry Metrics under Celery ? Right now I'm trying to use it but some metrics are getting lost, I assume it's because the normal exporter doesn't work correctly in celery/worker-based projects, right ? @levivm I didn't understand your question. This issue is for adding metrics instrumentation for `opentelemetry-instrumentation-celery`. Today it only supports tracing. @srikanthccv I want to send metrics not span from a celery worker using OpenTelemetry, but it seems some metrics are getting lost, so, I don't know if it is because OpenTelemetry Metrics is not yet supported to be used along celery ? Thanks in advance for you help. The celery instrumentation doesn't have metrics instrumentation yet and this is a tracker issue for it. Hello @srikanthccv I wanted to know what metric semantic convention to follow for task queues like Celery? We will do something similar to this https://flower.readthedocs.io/en/latest/prometheus-integration.html#available-metrics. Since this framework is specific to Python projects it won't probably be added to spec but we can provide these metrics using OTEL Metrics API. Hi, can I work on it or you already work on it? @TheAnshul756 Sure you can work on it. @srikanthccv Hi, I try to add Gauge metrics and I see that there isn't record option for Gauge (For example when I use Histogram, the option is exist). Do you know if this option exists in some form? Is there a situation where they didn't implement it until they needed to? Or maybe you don't need to call anything at all and it should work that way? Because no one has used it before so there is no reference at all.
2023-02-16T11:31:13
open-telemetry/opentelemetry-python-contrib
1,690
open-telemetry__opentelemetry-python-contrib-1690
[ "1689" ]
85ae95c88ffbae59f34f41051ca96fc7cad97ccc
diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py @@ -88,10 +88,27 @@ def response_hook(span, instance, response): client = redis.StrictRedis(host="localhost", port=6379) client.get("my-key") +Configuration +------------- + +Query sanitization +****************** +To enable query sanitization with an environment variable, set +``OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS`` to "true". + +For example, + +:: + + export OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS="true" + +will result in traced queries like "SET ? ?". + API --- """ import typing +from os import environ from typing import Any, Collection import redis @@ -99,6 +116,9 @@ def response_hook(span, instance, response): from opentelemetry import trace from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.instrumentation.redis.environment_variables import ( + OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS, +) from opentelemetry.instrumentation.redis.package import _instruments from opentelemetry.instrumentation.redis.util import ( _extract_conn_attributes, @@ -287,7 +307,15 @@ def _instrument(self, **kwargs): tracer, request_hook=kwargs.get("request_hook"), response_hook=kwargs.get("response_hook"), - sanitize_query=kwargs.get("sanitize_query", False), + sanitize_query=kwargs.get( + "sanitize_query", + environ.get( + OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS, "false" + ) + .lower() + .strip() + == "true", + ), ) def _uninstrument(self, **kwargs): diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/environment_variables.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/environment_variables.py new file mode 100644 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/environment_variables.py @@ -0,0 +1,17 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS = ( + "OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS" +)
diff --git a/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py b/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py --- a/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py +++ b/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py @@ -168,6 +168,32 @@ def test_query_sanitizer_enabled(self): span = spans[0] self.assertEqual(span.attributes.get("db.statement"), "SET ? ?") + def test_query_sanitizer_enabled_env(self): + redis_client = redis.Redis() + connection = redis.connection.Connection() + redis_client.connection = connection + + RedisInstrumentor().uninstrument() + + env_patch = mock.patch.dict( + "os.environ", + {"OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS": "true"}, + ) + env_patch.start() + RedisInstrumentor().instrument( + tracer_provider=self.tracer_provider, + ) + + with mock.patch.object(redis_client, "connection"): + redis_client.set("key", "value") + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + + span = spans[0] + self.assertEqual(span.attributes.get("db.statement"), "SET ? ?") + env_patch.stop() + def test_query_sanitizer_disabled(self): redis_client = redis.Redis() connection = redis.connection.Connection()
[Redis] Add support for sanitisation from environment variable The Redis instrumentation now supports sanitisation. I'd like to use this via auto instrumentation so an environment variable must be configured. When running auto instrumentation with environment variable `OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS` set to `true`, I expect sanitisation to be enabled. A more interesting change could be to support kwargs being passed to instrumentations from environment variables.
2023-02-23T11:48:39
open-telemetry/opentelemetry-python-contrib
1,695
open-telemetry__opentelemetry-python-contrib-1695
[ "1694" ]
e5d9ac5755f53113c87f0e383d6c158d1b9fb077
diff --git a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py --- a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py @@ -162,6 +162,7 @@ def response_hook(span, request, response): """ import logging import typing +from types import TracebackType import httpx @@ -293,6 +294,18 @@ def __init__( self._request_hook = request_hook self._response_hook = response_hook + def __enter__(self) -> "SyncOpenTelemetryTransport": + self._transport.__enter__() + return self + + def __exit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[TracebackType] = None, + ) -> None: + self._transport.__exit__(exc_type, exc_value, traceback) + def handle_request( self, *args, @@ -343,6 +356,9 @@ def handle_request( return response + def close(self) -> None: + self._transport.close() + class AsyncOpenTelemetryTransport(httpx.AsyncBaseTransport): """Async transport class that will trace all requests made with a client. @@ -372,6 +388,18 @@ def __init__( self._request_hook = request_hook self._response_hook = response_hook + async def __aenter__(self) -> "AsyncOpenTelemetryTransport": + await self._transport.__aenter__() + return self + + async def __aexit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[TracebackType] = None, + ) -> None: + await self._transport.__aexit__(exc_type, exc_value, traceback) + async def handle_async_request( self, *args, **kwargs ) -> typing.Union[ @@ -423,6 +451,9 @@ async def handle_async_request( return response + async def aclose(self) -> None: + await self._transport.aclose() + class _InstrumentedClient(httpx.Client): _tracer_provider = None
Httpx instrumentation results in a connection leak **Describe your environment** python 3.10.10 httpx==0.23.3 opentelemetry-instrumentation-httpx==0.36b0 **Steps to reproduce** ```python import httpx from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor with httpx.Client() as client: HTTPXClientInstrumentor.instrument_client(client) print(client.get("https://wtfismyip.com/json").json()) ``` **What is the expected behavior?** I expected it to run without any ResourceWarning. **What is the actual behavior?** Got ResourceWarning for each client instance
2023-02-26T00:55:22
open-telemetry/opentelemetry-python-contrib
1,717
open-telemetry__opentelemetry-python-contrib-1717
[ "411" ]
fbec281553b1983cb14d61cee05ed8c0efe80fbd
diff --git a/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py b/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py --- a/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py @@ -54,7 +54,7 @@ from typing import Callable, Collection, Iterable, Optional from urllib.parse import urlparse -from requests.models import Response +from requests.models import PreparedRequest, Response from requests.sessions import Session from requests.structures import CaseInsensitiveDict @@ -85,14 +85,17 @@ _excluded_urls_from_env = get_excluded_urls("REQUESTS") +_RequestHookT = Optional[Callable[[Span, PreparedRequest], None]] +_ResponseHookT = Optional[Callable[[Span, PreparedRequest], None]] + # pylint: disable=unused-argument # pylint: disable=R0915 def _instrument( tracer: Tracer, duration_histogram: Histogram, - span_callback: Optional[Callable[[Span, Response], str]] = None, - name_callback: Optional[Callable[[str, str], str]] = None, + request_hook: _RequestHookT = None, + response_hook: _ResponseHookT = None, excluded_urls: Iterable[str] = None, ): """Enables tracing of all requests calls that go through @@ -106,29 +109,9 @@ def _instrument( # before v1.0.0, Dec 17, 2012, see # https://github.com/psf/requests/commit/4e5c4a6ab7bb0195dececdd19bb8505b872fe120) - wrapped_request = Session.request wrapped_send = Session.send - @functools.wraps(wrapped_request) - def instrumented_request(self, method, url, *args, **kwargs): - if excluded_urls and excluded_urls.url_disabled(url): - return wrapped_request(self, method, url, *args, **kwargs) - - def get_or_create_headers(): - headers = kwargs.get("headers") - if headers is None: - headers = {} - kwargs["headers"] = headers - - return headers - - def call_wrapped(): - return wrapped_request(self, method, url, *args, **kwargs) - - return _instrumented_requests_call( - method, url, call_wrapped, get_or_create_headers - ) - + # pylint: disable-msg=too-many-locals,too-many-branches @functools.wraps(wrapped_send) def instrumented_send(self, request, **kwargs): if excluded_urls and excluded_urls.url_disabled(request.url): @@ -142,32 +125,17 @@ def get_or_create_headers(): ) return request.headers - def call_wrapped(): - return wrapped_send(self, request, **kwargs) - - return _instrumented_requests_call( - request.method, request.url, call_wrapped, get_or_create_headers - ) - - # pylint: disable-msg=too-many-locals,too-many-branches - def _instrumented_requests_call( - method: str, url: str, call_wrapped, get_or_create_headers - ): if context.get_value( _SUPPRESS_INSTRUMENTATION_KEY ) or context.get_value(_SUPPRESS_HTTP_INSTRUMENTATION_KEY): - return call_wrapped() + return wrapped_send(self, request, **kwargs) # See # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-client - method = method.upper() - span_name = "" - if name_callback is not None: - span_name = name_callback(method, url) - if not span_name or not isinstance(span_name, str): - span_name = get_default_span_name(method) + method = request.method.upper() + span_name = get_default_span_name(method) - url = remove_url_credentials(url) + url = remove_url_credentials(request.url) span_attributes = { SpanAttributes.HTTP_METHOD: method, @@ -195,6 +163,8 @@ def _instrumented_requests_call( span_name, kind=SpanKind.CLIENT, attributes=span_attributes ) as span, set_ip_on_next_http_connection(span): exception = None + if callable(request_hook): + request_hook(span, request) headers = get_or_create_headers() inject(headers) @@ -206,7 +176,7 @@ def _instrumented_requests_call( start_time = default_timer() try: - result = call_wrapped() # *** PROCEED + result = wrapped_send(self, request, **kwargs) # *** PROCEED except Exception as exc: # pylint: disable=W0703 exception = exc result = getattr(exc, "response", None) @@ -236,8 +206,8 @@ def _instrumented_requests_call( "1.1" if version == 11 else "1.0" ) - if span_callback is not None: - span_callback(span, result) + if callable(response_hook): + response_hook(span, request, result) duration_histogram.record(elapsed_time, attributes=metric_labels) @@ -246,9 +216,6 @@ def _instrumented_requests_call( return result - instrumented_request.opentelemetry_instrumentation_requests_applied = True - Session.request = instrumented_request - instrumented_send.opentelemetry_instrumentation_requests_applied = True Session.send = instrumented_send @@ -295,10 +262,8 @@ def _instrument(self, **kwargs): Args: **kwargs: Optional arguments ``tracer_provider``: a TracerProvider, defaults to global - ``span_callback``: An optional callback invoked before returning the http response. Invoked with Span and requests.Response - ``name_callback``: Callback which calculates a generic span name for an - outgoing HTTP request based on the method and url. - Optional: Defaults to get_default_span_name. + ``request_hook``: An optional callback that is invoked right after a span is created. + ``response_hook``: An optional callback which is invoked right before the span is finished processing a response. ``excluded_urls``: A string containing a comma-delimited list of regexes used to exclude URLs from tracking """ @@ -319,8 +284,8 @@ def _instrument(self, **kwargs): _instrument( tracer, duration_histogram, - span_callback=kwargs.get("span_callback"), - name_callback=kwargs.get("name_callback"), + request_hook=kwargs.get("request_hook"), + response_hook=kwargs.get("response_hook"), excluded_urls=_excluded_urls_from_env if excluded_urls is None else parse_excluded_urls(excluded_urls),
diff --git a/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py b/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py --- a/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py +++ b/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py @@ -133,17 +133,23 @@ def test_basic(self): span, opentelemetry.instrumentation.requests ) - def test_name_callback(self): - def name_callback(method, url): - return "GET" + url + def test_hooks(self): + def request_hook(span, request_obj): + span.update_name("name set from hook") + + def response_hook(span, request_obj, response): + span.set_attribute("response_hook_attr", "value") RequestsInstrumentor().uninstrument() - RequestsInstrumentor().instrument(name_callback=name_callback) + RequestsInstrumentor().instrument( + request_hook=request_hook, response_hook=response_hook + ) result = self.perform_request(self.URL) self.assertEqual(result.text, "Hello!") span = self.assert_span() - self.assertEqual(span.name, "GET" + self.URL) + self.assertEqual(span.name, "name set from hook") + self.assertEqual(span.attributes["response_hook_attr"], "value") def test_excluded_urls_explicit(self): url_404 = "http://httpbin.org/status/404" @@ -300,17 +306,21 @@ def test_distributed_context(self): finally: set_global_textmap(previous_propagator) - def test_span_callback(self): + def test_response_hook(self): RequestsInstrumentor().uninstrument() - def span_callback(span, result: requests.Response): + def response_hook( + span, + request: requests.PreparedRequest, + response: requests.Response, + ): span.set_attribute( - "http.response.body", result.content.decode("utf-8") + "http.response.body", response.content.decode("utf-8") ) RequestsInstrumentor().instrument( tracer_provider=self.tracer_provider, - span_callback=span_callback, + response_hook=response_hook, ) result = self.perform_request(self.URL) @@ -449,21 +459,6 @@ def perform_request(url: str, session: requests.Session = None): return requests.get(url) return session.get(url) - def test_invalid_url(self): - url = "http://[::1/nope" - - with self.assertRaises(ValueError): - requests.post(url) - - span = self.assert_span() - - self.assertEqual(span.name, "HTTP POST") - self.assertEqual( - span.attributes, - {SpanAttributes.HTTP_METHOD: "POST", SpanAttributes.HTTP_URL: url}, - ) - self.assertEqual(span.status.status_code, StatusCode.ERROR) - def test_credential_removal(self): new_url = "http://username:[email protected]/status/200" self.perform_request(new_url)
[Requests] Replace span name callback with request and response hooks Requests instrumentation accepts a span name callback which should be replaced with more generic request/response callbacks (hooks). Details: https://github.com/open-telemetry/opentelemetry-python-contrib/issues/408
I can take on this issue. cc @alolita @NickSulistio Are you still interested in taking this on? @lzchen Yeah! Sorry for the delays, I can get it done this week. Hey @lzchen just wanted to confirm that this is still open since I'm cleaning up the remaining instrumentations without hooks :) (https://github.com/open-telemetry/opentelemetry-python-contrib/issues/412). cc: @alolita If you are working on all of the instrumentations that is great! We just have individual tasks for the hooks so be sure to link your PR to each when you submit it. @ryokather Do you have plans on adding hooks for the requests instrumentation? Hi @lzchen. I believe in my previous PR I investigated into this and found out that implementing hooks on the requests instrumentation isn't easily achievable because the wrapper for Session.request provides no ability to access a request-like object which is necessary for a request_hook. I left it in case anyone else has a better idea! Hi, I have been investigating this issue and apparently we could remove the instrumentation of Session.request and leave only Session.send ([request goes through send anyway](https://github.com/psf/requests/blob/e90852d20c9bc44c465e55234c01432711627d94/requests/sessions.py#LL587C8-L587C46)). When you remove the instrumentation for request only one test fails: `test_invalid_url`. I went to look at how other instrumentations handle this case and, at least for the ones I checked (aiohttp and urllib), they do not create a span if they can't build a request due to an invalid URL. Thus, by the time we reach send we always have a PreparedRequest object we can use for the request_hook callback argument.
2023-03-10T19:50:59
open-telemetry/opentelemetry-python-contrib
1,735
open-telemetry__opentelemetry-python-contrib-1735
[ "1722" ]
20d2cc311a7a0634ff6c9059d9acce49545b5938
diff --git a/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py b/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py --- a/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py @@ -34,8 +34,8 @@ "system.network.io": ["transmit", "receive"], "system.network.connections": ["family", "type"], "system.thread_count": None - "runtime.memory": ["rss", "vms"], - "runtime.cpu.time": ["user", "system"], + "process.runtime.memory": ["rss", "vms"], + "process.runtime.cpu.time": ["user", "system"], } Usage @@ -61,8 +61,8 @@ "system.memory.usage": ["used", "free", "cached"], "system.cpu.time": ["idle", "user", "system", "irq"], "system.network.io": ["transmit", "receive"], - "runtime.memory": ["rss", "vms"], - "runtime.cpu.time": ["user", "system"], + "process.runtime.memory": ["rss", "vms"], + "process.runtime.cpu.time": ["user", "system"], } SystemMetricsInstrumentor(config=configuration).instrument() @@ -102,9 +102,9 @@ "system.network.io": ["transmit", "receive"], "system.network.connections": ["family", "type"], "system.thread_count": None, - "runtime.memory": ["rss", "vms"], - "runtime.cpu.time": ["user", "system"], - "runtime.gc_count": None, + "process.runtime.memory": ["rss", "vms"], + "process.runtime.cpu.time": ["user", "system"], + "process.runtime.gc_count": None, } @@ -323,25 +323,25 @@ def _instrument(self, **kwargs): description="System active threads count", ) - if "runtime.memory" in self._config: - self._meter.create_observable_counter( - name=f"runtime.{self._python_implementation}.memory", + if "process.runtime.memory" in self._config: + self._meter.create_observable_up_down_counter( + name=f"process.runtime.{self._python_implementation}.memory", callbacks=[self._get_runtime_memory], description=f"Runtime {self._python_implementation} memory", unit="bytes", ) - if "runtime.cpu.time" in self._config: + if "process.runtime.cpu.time" in self._config: self._meter.create_observable_counter( - name=f"runtime.{self._python_implementation}.cpu_time", + name=f"process.runtime.{self._python_implementation}.cpu_time", callbacks=[self._get_runtime_cpu_time], description=f"Runtime {self._python_implementation} CPU time", unit="seconds", ) - if "runtime.gc_count" in self._config: + if "process.runtime.gc_count" in self._config: self._meter.create_observable_counter( - name=f"runtime.{self._python_implementation}.gc_count", + name=f"process.runtime.{self._python_implementation}.gc_count", callbacks=[self._get_runtime_gc_count], description=f"Runtime {self._python_implementation} GC count", unit="bytes", @@ -618,7 +618,7 @@ def _get_runtime_memory( ) -> Iterable[Observation]: """Observer callback for runtime memory""" proc_memory = self._proc.memory_info() - for metric in self._config["runtime.memory"]: + for metric in self._config["process.runtime.memory"]: if hasattr(proc_memory, metric): self._runtime_memory_labels["type"] = metric yield Observation( @@ -631,7 +631,7 @@ def _get_runtime_cpu_time( ) -> Iterable[Observation]: """Observer callback for runtime CPU time""" proc_cpu = self._proc.cpu_times() - for metric in self._config["runtime.cpu.time"]: + for metric in self._config["process.runtime.cpu.time"]: if hasattr(proc_cpu, metric): self._runtime_cpu_time_labels["type"] = metric yield Observation(
diff --git a/instrumentation/opentelemetry-instrumentation-system-metrics/tests/test_system_metrics.py b/instrumentation/opentelemetry-instrumentation-system-metrics/tests/test_system_metrics.py --- a/instrumentation/opentelemetry-instrumentation-system-metrics/tests/test_system_metrics.py +++ b/instrumentation/opentelemetry-instrumentation-system-metrics/tests/test_system_metrics.py @@ -114,9 +114,9 @@ def test_system_metrics_instrument(self): "system.network.io", "system.network.connections", "system.thread_count", - f"runtime.{self.implementation}.memory", - f"runtime.{self.implementation}.cpu_time", - f"runtime.{self.implementation}.gc_count", + f"process.runtime.{self.implementation}.memory", + f"process.runtime.{self.implementation}.cpu_time", + f"process.runtime.{self.implementation}.gc_count", ] for observer in metric_names: @@ -125,9 +125,9 @@ def test_system_metrics_instrument(self): def test_runtime_metrics_instrument(self): runtime_config = { - "runtime.memory": ["rss", "vms"], - "runtime.cpu.time": ["user", "system"], - "runtime.gc_count": None, + "process.runtime.memory": ["rss", "vms"], + "process.runtime.cpu.time": ["user", "system"], + "process.runtime.gc_count": None, } reader = InMemoryMetricReader() @@ -143,9 +143,9 @@ def test_runtime_metrics_instrument(self): self.assertEqual(len(metric_names), 3) observer_names = [ - f"runtime.{self.implementation}.memory", - f"runtime.{self.implementation}.cpu_time", - f"runtime.{self.implementation}.gc_count", + f"process.runtime.{self.implementation}.memory", + f"process.runtime.{self.implementation}.cpu_time", + f"process.runtime.{self.implementation}.gc_count", ] for observer in metric_names: @@ -750,7 +750,9 @@ def test_runtime_memory(self, mock_process_memory_info): _SystemMetricsResult({"type": "rss"}, 1), _SystemMetricsResult({"type": "vms"}, 2), ] - self._test_metrics(f"runtime.{self.implementation}.memory", expected) + self._test_metrics( + f"process.runtime.{self.implementation}.memory", expected + ) @mock.patch("psutil.Process.cpu_times") def test_runtime_cpu_time(self, mock_process_cpu_times): @@ -764,7 +766,9 @@ def test_runtime_cpu_time(self, mock_process_cpu_times): _SystemMetricsResult({"type": "user"}, 1.1), _SystemMetricsResult({"type": "system"}, 2.2), ] - self._test_metrics(f"runtime.{self.implementation}.cpu_time", expected) + self._test_metrics( + f"process.runtime.{self.implementation}.cpu_time", expected + ) @mock.patch("gc.get_count") def test_runtime_get_count(self, mock_gc_get_count): @@ -775,4 +779,6 @@ def test_runtime_get_count(self, mock_gc_get_count): _SystemMetricsResult({"count": "1"}, 2), _SystemMetricsResult({"count": "2"}, 3), ] - self._test_metrics(f"runtime.{self.implementation}.gc_count", expected) + self._test_metrics( + f"process.runtime.{self.implementation}.gc_count", expected + )
Runtime metrics don't follow semantic conventions I noticed that [these runtime metrics](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py#L326-L348) (`runtime.memory`, `runtime.cpu.time`, `runtime.gc_count`) don't follow the semantic conventions listed here: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/process-metrics.md#metric-instruments Should we update these metrics? If so, I'm happy to open a PR to fix this
Please send a fix; we appreciate the help.
2023-03-30T14:45:35
open-telemetry/opentelemetry-python-contrib
1,748
open-telemetry__opentelemetry-python-contrib-1748
[ "1463" ]
37d85f07458900762f75ec6e4942c5dec2f93694
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -81,7 +81,7 @@ "instrumentation": "opentelemetry-instrumentation-grpc==0.40b0.dev", }, "httpx": { - "library": "httpx >= 0.18.0, <= 0.23.0", + "library": "httpx >= 0.18.0", "instrumentation": "opentelemetry-instrumentation-httpx==0.40b0.dev", }, "jinja2": {
diff --git a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py --- a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py @@ -59,7 +59,7 @@ def _async_call(coro: typing.Coroutine) -> asyncio.Task: def _response_hook(span, request: "RequestInfo", response: "ResponseInfo"): span.set_attribute( HTTP_RESPONSE_BODY, - response[2].read(), + b"".join(response[2]), ) @@ -68,7 +68,7 @@ async def _async_response_hook( ): span.set_attribute( HTTP_RESPONSE_BODY, - await response[2].aread(), + b"".join([part async for part in response[2]]), )
httpx tests failing on httpx==0.23.1 Can we leave https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1459 open until we actually fix the instrumentation to work with that version? _Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1460#pullrequestreview-1186403709_
2023-04-12T06:26:46
open-telemetry/opentelemetry-python-contrib
1,758
open-telemetry__opentelemetry-python-contrib-1758
[ "1755" ]
2d4e6c9ac775347e04ed04d4898f9ad6b0372503
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py @@ -44,7 +44,6 @@ The instrument() method accepts the following keyword args: tracer_provider (TracerProvider) - an optional tracer provider -sanitize_query (bool) - an optional query sanitization flag request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request this function signature is: def request_hook(span: Span, method: str, url: str, kwargs) @@ -138,13 +137,11 @@ def _instrument(self, **kwargs): tracer = get_tracer(__name__, __version__, tracer_provider) request_hook = kwargs.get("request_hook") response_hook = kwargs.get("response_hook") - sanitize_query = kwargs.get("sanitize_query", False) _wrap( elasticsearch, "Transport.perform_request", _wrap_perform_request( tracer, - sanitize_query, self._span_name_prefix, request_hook, response_hook, @@ -163,7 +160,6 @@ def _uninstrument(self, **kwargs): def _wrap_perform_request( tracer, - sanitize_query, span_name_prefix, request_hook=None, response_hook=None, @@ -225,10 +221,9 @@ def wrapper(wrapped, _, args, kwargs): if method: attributes["elasticsearch.method"] = method if body: - statement = str(body) - if sanitize_query: - statement = sanitize_body(body) - attributes[SpanAttributes.DB_STATEMENT] = statement + attributes[SpanAttributes.DB_STATEMENT] = sanitize_body( + body + ) if params: attributes["elasticsearch.params"] = str(params) if doc_id: diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py @@ -29,7 +29,8 @@ def _flatten_dict(d, parent_key=""): items = [] for k, v in d.items(): new_key = parent_key + "." + k if parent_key else k - if isinstance(v, dict): + # recursive call _flatten_dict for a non-empty dict value + if isinstance(v, dict) and v: items.extend(_flatten_dict(v, new_key).items()) else: items.append((new_key, v))
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py @@ -58,9 +58,7 @@ class TestElasticsearchIntegration(TestBase): "elasticsearch.url": "/test-index/_search", "elasticsearch.method": helpers.dsl_search_method, "elasticsearch.target": "test-index", - SpanAttributes.DB_STATEMENT: str( - {"query": {"bool": {"filter": [{"term": {"author": "testing"}}]}}} - ), + SpanAttributes.DB_STATEMENT: str({"query": {"bool": {"filter": "?"}}}), } create_attributes = { @@ -264,18 +262,6 @@ def test_dsl_search(self, request_mock): ) def test_dsl_search_sanitized(self, request_mock): - # Reset instrumentation to use sanitized query (default) - ElasticsearchInstrumentor().uninstrument() - ElasticsearchInstrumentor().instrument(sanitize_query=True) - - # update expected attributes to match sanitized query - sanitized_search_attributes = self.search_attributes.copy() - sanitized_search_attributes.update( - { - SpanAttributes.DB_STATEMENT: "{'query': {'bool': {'filter': '?'}}}" - } - ) - request_mock.return_value = (1, {}, '{"hits": {"hits": []}}') client = Elasticsearch() search = Search(using=client, index="test-index").filter( @@ -289,7 +275,7 @@ def test_dsl_search_sanitized(self, request_mock): self.assertIsNotNone(span.end_time) self.assertEqual( span.attributes, - sanitized_search_attributes, + self.search_attributes, ) def test_dsl_create(self, request_mock): @@ -320,9 +306,6 @@ def test_dsl_create(self, request_mock): ) def test_dsl_create_sanitized(self, request_mock): - # Reset instrumentation to explicitly use sanitized query - ElasticsearchInstrumentor().uninstrument() - ElasticsearchInstrumentor().instrument(sanitize_query=True) request_mock.return_value = (1, {}, {}) client = Elasticsearch() Article.init(using=client)
Only collect `db.statement` if there is sanitization Spec https://github.com/open-telemetry/opentelemetry-specification/pull/3127 - [ ] [aiopg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-aiopg) - [ ] [asyncpg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-asyncpg) - [ ] [dbapi](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-dbapi) - [ ] [elasticsearch](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-elasticsearch) - [ ] [mysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-mysql) - [ ] [pymemcache](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymemcache) - [ ] [pymongo](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymongo) - [ ] [pymysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymysql) - [ ] [redis](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-redis) - [ ] [sqlalchemy](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlalchemy) - [ ] [sqlite3](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlite3)
Can I add db instrumentation with sanitization to this issue? All of them need to be aligned with the new specifications.
2023-04-16T10:07:14
open-telemetry/opentelemetry-python-contrib
1,771
open-telemetry__opentelemetry-python-contrib-1771
[ "1761", "1761" ]
a45c9c37924d78419d3482781d17086fad5cc3a8
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py @@ -13,6 +13,7 @@ # limitations under the License. import os import re +import weakref from sqlalchemy.event import ( # pylint: disable=no-name-in-module listen, @@ -99,11 +100,11 @@ def __init__( commenter_options=None, ): self.tracer = tracer - self.engine = engine self.connections_usage = connections_usage self.vendor = _normalize_vendor(engine.name) self.enable_commenter = enable_commenter self.commenter_options = commenter_options if commenter_options else {} + self._engine_attrs = _get_attributes_from_engine(engine) self._leading_comment_remover = re.compile(r"^/\*.*?\*/") self._register_event_listener( @@ -118,23 +119,11 @@ def __init__( self._register_event_listener(engine, "checkin", self._pool_checkin) self._register_event_listener(engine, "checkout", self._pool_checkout) - def _get_connection_string(self): - drivername = self.engine.url.drivername or "" - host = self.engine.url.host or "" - port = self.engine.url.port or "" - database = self.engine.url.database or "" - return f"{drivername}://{host}:{port}/{database}" - - def _get_pool_name(self): - if self.engine.pool.logging_name is not None: - return self.engine.pool.logging_name - return self._get_connection_string() - def _add_idle_to_connection_usage(self, value): self.connections_usage.add( value, attributes={ - "pool.name": self._get_pool_name(), + **self._engine_attrs, "state": "idle", }, ) @@ -143,7 +132,7 @@ def _add_used_to_connection_usage(self, value): self.connections_usage.add( value, attributes={ - "pool.name": self._get_pool_name(), + **self._engine_attrs, "state": "used", }, ) @@ -169,12 +158,21 @@ def _pool_checkout( @classmethod def _register_event_listener(cls, target, identifier, func, *args, **kw): listen(target, identifier, func, *args, **kw) - cls._remove_event_listener_params.append((target, identifier, func)) + cls._remove_event_listener_params.append( + (weakref.ref(target), identifier, func) + ) @classmethod def remove_all_event_listeners(cls): - for remove_params in cls._remove_event_listener_params: - remove(*remove_params) + for ( + weak_ref_target, + identifier, + func, + ) in cls._remove_event_listener_params: + # Remove an event listener only if saved weak reference points to an object + # which has not been garbage collected + if weak_ref_target() is not None: + remove(weak_ref_target(), identifier, func) cls._remove_event_listener_params.clear() def _operation_name(self, db_name, statement): @@ -300,3 +298,22 @@ def _get_attributes_from_cursor(vendor, cursor, attrs): if info.port: attrs[SpanAttributes.NET_PEER_PORT] = int(info.port) return attrs + + +def _get_connection_string(engine): + drivername = engine.url.drivername or "" + host = engine.url.host or "" + port = engine.url.port or "" + database = engine.url.database or "" + return f"{drivername}://{host}:{port}/{database}" + + +def _get_attributes_from_engine(engine): + """Set metadata attributes of the database engine""" + attrs = {} + + attrs["pool.name"] = getattr( + getattr(engine, "pool", None), "logging_name", None + ) or _get_connection_string(engine) + + return attrs
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy.py @@ -307,3 +307,26 @@ def test_no_op_tracer_provider(self): cnx.execute("SELECT 1 + 1;").fetchall() spans = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans), 0) + + def test_no_memory_leakage_if_engine_diposed(self): + SQLAlchemyInstrumentor().instrument() + import gc + import weakref + + from sqlalchemy import create_engine + + callback = mock.Mock() + + def make_shortlived_engine(): + engine = create_engine("sqlite:///:memory:") + # Callback will be called if engine is deallocated during garbage + # collection + weakref.finalize(engine, callback) + with engine.connect() as conn: + conn.execute("SELECT 1 + 1;").fetchall() + + for _ in range(0, 5): + make_shortlived_engine() + + gc.collect() + assert callback.call_count == 5
`sqlalchemy` memory leak with Engine cleanup introduced in 0.37b0 **Describe your environment** python 3.9.7 ``` Deprecated==1.2.13 graphviz==0.20.1 importlib-metadata==6.0.1 mysqlclient==1.4.4 objgraph==3.5.0 opentelemetry-api==1.16.0 opentelemetry-instrumentation==0.37b0 opentelemetry-instrumentation-sqlalchemy==0.37b0 opentelemetry-sdk==1.16.0 opentelemetry-semantic-conventions==0.37b0 packaging==23.1 SQLAlchemy==1.3.24 typing_extensions==4.5.0 wrapt==1.15.0 zipp==3.15.0 ``` **Steps to reproduce** ``` from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor SQLAlchemyInstrumentor().instrument() from sqlalchemy import create_engine import objgraph import gc def leak_memory(): engine = create_engine('mysql://[email protected]:3306') with engine.connect() as conn: rs = conn.execute('SELECT 1') for row in rs: print(row) engine.dispose() gc.collect() snapshot = objgraph.growth() leak_memory() gc.collect() snapshot2 = objgraph.growth(limit=None) print(snapshot2) for x in range(0,10): leak_memory() gc.collect() snapshot3 = objgraph.growth(limit=None) print(snapshot3) ``` output: ``` (1,) [('function', 8357, 438), ('dict', 4705, 339), ('tuple', 4055, 302), ('weakref', 2024, 168), ('cell', 1223, 107), ('deque', 67, 58), ('type', 958, 53), ('_EmptyListener', 50, 50), ('method_descriptor', 1082, 45), ('VisitableType', 245, 43), ('module', 355, 34), ('ModuleSpec', 353, 34), ('SourceFileLoader', 289, 32), ('attrgetter', 157, 29), ('getset_descriptor', 1005, 27), ('member_descriptor', 530, 27), ('list', 1821, 25), ('builtin_function_or_method', 1007, 22), ('set', 303, 20), ('classmethod', 239, 12), ('property', 448, 11), ('wrapper_descriptor', 1354, 10), ('PlaceHolder', 14, 8), ('memoized_property', 93, 7), ('DBAPISet', 7, 7), ('FileFinder', 43, 5), ('_ListenerCollection', 5, 5), ('Logger', 26, 4), ('ExtensionFileLoader', 31, 2), ('Condition', 3, 2), ('Context', 4, 2), ('frozenset', 166, 1), ('ABCMeta', 129, 1), ('_abc_data', 138, 1), ('method', 88, 1), ('RegexFlag', 15, 1), ('WeakKeyDictionary', 46, 1), ('NullType', 2, 1), ('EnsureKWArgType', 4, 1), ('PoolEventsDispatch', 2, 1), ('ConnectionEventsDispatch', 2, 1), ('ProxyTracer', 2, 1), ('NoOpTracer', 2, 1), ('ContextVarsRuntimeContext', 1, 1), ('hamt', 1, 1), ('hamt_bitmap_node', 1, 1), ('EngineTracer', 1, 1), ('ContextVar', 1, 1), ('Engine', 1, 1), ('QueuePool', 1, 1), ('URL', 1, 1), ('MySQLDialect_mysqldb', 1, 1), ('_local', 1, 1), ('Queue', 1, 1), ('MySQLIdentifierPreparer_mysqldb', 1, 1), ('MySQLTypeCompiler', 1, 1)] (1,) (1,) (1,) (1,) (1,) (1,) (1,) (1,) (1,) (1,) [('dict', 4966, 261), ('builtin_function_or_method', 1127, 120), ('weakref', 2144, 120), ('tuple', 4145, 90), ('cell', 1313, 90), ('deque', 147, 80), ('function', 8417, 60), ('set', 363, 60), ('_ListenerCollection', 55, 50), ('list', 1853, 32), ('Condition', 23, 20), ('method', 98, 10), ('WeakKeyDictionary', 56, 10), ('NullType', 12, 10), ('PoolEventsDispatch', 12, 10), ('ConnectionEventsDispatch', 12, 10), ('ProxyTracer', 12, 10), ('NoOpTracer', 12, 10), ('EngineTracer', 11, 10), ('Engine', 11, 10), ('QueuePool', 11, 10), ('URL', 11, 10), ('MySQLDialect_mysqldb', 11, 10), ('_local', 11, 10), ('Queue', 11, 10), ('MySQLIdentifierPreparer_mysqldb', 11, 10), ('MySQLTypeCompiler', 11, 10)] ``` **What is the expected behavior?** After fetching the data the engine is disposed to ensure all of the pooled connections are fully closed. Related objects should be garbage collected after they fall out of scope **What is the actual behavior?** Despite manually invoking the garbage collector interface, one instance of the sqlalchemy or sqlalchemy instrumentation related objects remains allocated in memory for each time the `leak_memory` function was executed. If the `leak_memory` function is run in an infinite loop the memory allocated to the script will grow linearly and unbounded until available memory is consumed. **Additional context** This issue was introduced in 0.37b0 and is also present in 0.38b0 `sqlalchemy` memory leak with Engine cleanup introduced in 0.37b0 **Describe your environment** python 3.9.7 ``` Deprecated==1.2.13 graphviz==0.20.1 importlib-metadata==6.0.1 mysqlclient==1.4.4 objgraph==3.5.0 opentelemetry-api==1.16.0 opentelemetry-instrumentation==0.37b0 opentelemetry-instrumentation-sqlalchemy==0.37b0 opentelemetry-sdk==1.16.0 opentelemetry-semantic-conventions==0.37b0 packaging==23.1 SQLAlchemy==1.3.24 typing_extensions==4.5.0 wrapt==1.15.0 zipp==3.15.0 ``` **Steps to reproduce** ``` from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor SQLAlchemyInstrumentor().instrument() from sqlalchemy import create_engine import objgraph import gc def leak_memory(): engine = create_engine('mysql://[email protected]:3306') with engine.connect() as conn: rs = conn.execute('SELECT 1') for row in rs: print(row) engine.dispose() gc.collect() snapshot = objgraph.growth() leak_memory() gc.collect() snapshot2 = objgraph.growth(limit=None) print(snapshot2) for x in range(0,10): leak_memory() gc.collect() snapshot3 = objgraph.growth(limit=None) print(snapshot3) ``` output: ``` (1,) [('function', 8357, 438), ('dict', 4705, 339), ('tuple', 4055, 302), ('weakref', 2024, 168), ('cell', 1223, 107), ('deque', 67, 58), ('type', 958, 53), ('_EmptyListener', 50, 50), ('method_descriptor', 1082, 45), ('VisitableType', 245, 43), ('module', 355, 34), ('ModuleSpec', 353, 34), ('SourceFileLoader', 289, 32), ('attrgetter', 157, 29), ('getset_descriptor', 1005, 27), ('member_descriptor', 530, 27), ('list', 1821, 25), ('builtin_function_or_method', 1007, 22), ('set', 303, 20), ('classmethod', 239, 12), ('property', 448, 11), ('wrapper_descriptor', 1354, 10), ('PlaceHolder', 14, 8), ('memoized_property', 93, 7), ('DBAPISet', 7, 7), ('FileFinder', 43, 5), ('_ListenerCollection', 5, 5), ('Logger', 26, 4), ('ExtensionFileLoader', 31, 2), ('Condition', 3, 2), ('Context', 4, 2), ('frozenset', 166, 1), ('ABCMeta', 129, 1), ('_abc_data', 138, 1), ('method', 88, 1), ('RegexFlag', 15, 1), ('WeakKeyDictionary', 46, 1), ('NullType', 2, 1), ('EnsureKWArgType', 4, 1), ('PoolEventsDispatch', 2, 1), ('ConnectionEventsDispatch', 2, 1), ('ProxyTracer', 2, 1), ('NoOpTracer', 2, 1), ('ContextVarsRuntimeContext', 1, 1), ('hamt', 1, 1), ('hamt_bitmap_node', 1, 1), ('EngineTracer', 1, 1), ('ContextVar', 1, 1), ('Engine', 1, 1), ('QueuePool', 1, 1), ('URL', 1, 1), ('MySQLDialect_mysqldb', 1, 1), ('_local', 1, 1), ('Queue', 1, 1), ('MySQLIdentifierPreparer_mysqldb', 1, 1), ('MySQLTypeCompiler', 1, 1)] (1,) (1,) (1,) (1,) (1,) (1,) (1,) (1,) (1,) (1,) [('dict', 4966, 261), ('builtin_function_or_method', 1127, 120), ('weakref', 2144, 120), ('tuple', 4145, 90), ('cell', 1313, 90), ('deque', 147, 80), ('function', 8417, 60), ('set', 363, 60), ('_ListenerCollection', 55, 50), ('list', 1853, 32), ('Condition', 23, 20), ('method', 98, 10), ('WeakKeyDictionary', 56, 10), ('NullType', 12, 10), ('PoolEventsDispatch', 12, 10), ('ConnectionEventsDispatch', 12, 10), ('ProxyTracer', 12, 10), ('NoOpTracer', 12, 10), ('EngineTracer', 11, 10), ('Engine', 11, 10), ('QueuePool', 11, 10), ('URL', 11, 10), ('MySQLDialect_mysqldb', 11, 10), ('_local', 11, 10), ('Queue', 11, 10), ('MySQLIdentifierPreparer_mysqldb', 11, 10), ('MySQLTypeCompiler', 11, 10)] ``` **What is the expected behavior?** After fetching the data the engine is disposed to ensure all of the pooled connections are fully closed. Related objects should be garbage collected after they fall out of scope **What is the actual behavior?** Despite manually invoking the garbage collector interface, one instance of the sqlalchemy or sqlalchemy instrumentation related objects remains allocated in memory for each time the `leak_memory` function was executed. If the `leak_memory` function is run in an infinite loop the memory allocated to the script will grow linearly and unbounded until available memory is consumed. **Additional context** This issue was introduced in 0.37b0 and is also present in 0.38b0
If @adamgregory isn't ineterested, I would like to take this up. I noticed that this also works with `sqlite:///:memory:` engine, so easy to reproduce locally. Quick check indicates it's probably related to [this commit](https://github.com/open-telemetry/opentelemetry-python-contrib/commit/bbe7578d1741d2103876fd1360301f62ef114730). Please go ahead. I haven't been able to make much progress here beyond identifying the issue. Compared to 0.36b0 these additional listeners introduce more circular memory references between Engine and EngineTracer, but this is definitely not my area of expertise and it wasn't clear to me why the garbage collector wasn't able to detect and free these circular references. It looks like the gist of the issue is [this line](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/bbe7578d1741d2103876fd1360301f62ef114730/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py#L124). Each new engine gets appended to `cls._remove_event_listener_params` via the `target` parameter, and so this list keeps holding references until `uninstrument()` is called. Need to check in `sqlalchemy` code if there's another alternative. If @adamgregory isn't ineterested, I would like to take this up. I noticed that this also works with `sqlite:///:memory:` engine, so easy to reproduce locally. Quick check indicates it's probably related to [this commit](https://github.com/open-telemetry/opentelemetry-python-contrib/commit/bbe7578d1741d2103876fd1360301f62ef114730). Please go ahead. I haven't been able to make much progress here beyond identifying the issue. Compared to 0.36b0 these additional listeners introduce more circular memory references between Engine and EngineTracer, but this is definitely not my area of expertise and it wasn't clear to me why the garbage collector wasn't able to detect and free these circular references. It looks like the gist of the issue is [this line](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/bbe7578d1741d2103876fd1360301f62ef114730/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py#L124). Each new engine gets appended to `cls._remove_event_listener_params` via the `target` parameter, and so this list keeps holding references until `uninstrument()` is called. Need to check in `sqlalchemy` code if there's another alternative.
2023-04-22T12:36:42
open-telemetry/opentelemetry-python-contrib
1,773
open-telemetry__opentelemetry-python-contrib-1773
[ "1765" ]
46e4b1da44c534fed8e1002899e9e41e6d668018
diff --git a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py --- a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py @@ -94,6 +94,7 @@ def record_factory(*args, **kwargs): record.otelSpanID = "0" record.otelTraceID = "0" + record.otelTraceSampled = False nonlocal service_name if service_name is None: @@ -113,6 +114,7 @@ def record_factory(*args, **kwargs): if ctx != INVALID_SPAN_CONTEXT: record.otelSpanID = format(ctx.span_id, "016x") record.otelTraceID = format(ctx.trace_id, "032x") + record.otelTraceSampled = ctx.trace_flags.sampled if callable(LoggingInstrumentor._log_hook): try: LoggingInstrumentor._log_hook( # pylint: disable=E1102 diff --git a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py --- a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py +++ b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -DEFAULT_LOGGING_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] [trace_id=%(otelTraceID)s span_id=%(otelSpanID)s resource.service.name=%(otelServiceName)s] - %(message)s" +DEFAULT_LOGGING_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] [trace_id=%(otelTraceID)s span_id=%(otelSpanID)s resource.service.name=%(otelServiceName)s trace_sampled=%(otelTraceSampled)s] - %(message)s" _MODULE_DOC = """ @@ -27,6 +27,7 @@ - ``otelSpanID`` - ``otelTraceID`` - ``otelServiceName`` +- ``otelTraceSampled`` The integration uses the following logging format by default: @@ -113,7 +114,7 @@ .. code-block:: - %(otelSpanID)s %(otelTraceID)s %(otelServiceName)s + %(otelSpanID)s %(otelTraceID)s %(otelServiceName)s %(otelTraceSampled)s
diff --git a/instrumentation/opentelemetry-instrumentation-logging/tests/test_logging.py b/instrumentation/opentelemetry-instrumentation-logging/tests/test_logging.py --- a/instrumentation/opentelemetry-instrumentation-logging/tests/test_logging.py +++ b/instrumentation/opentelemetry-instrumentation-logging/tests/test_logging.py @@ -62,6 +62,7 @@ def test_trace_context_injection(self): self.assertEqual(record.otelSpanID, "0") self.assertEqual(record.otelTraceID, "0") self.assertEqual(record.otelServiceName, "") + self.assertEqual(record.otelTraceSampled, False) def log_hook(span, record): @@ -82,7 +83,7 @@ def tearDown(self): super().tearDown() LoggingInstrumentor().uninstrument() - def assert_trace_context_injected(self, span_id, trace_id): + def assert_trace_context_injected(self, span_id, trace_id, trace_sampled): with self.caplog.at_level(level=logging.INFO): logger = logging.getLogger("test logger") logger.info("hello") @@ -90,16 +91,20 @@ def assert_trace_context_injected(self, span_id, trace_id): record = self.caplog.records[0] self.assertEqual(record.otelSpanID, span_id) self.assertEqual(record.otelTraceID, trace_id) + self.assertEqual(record.otelTraceSampled, trace_sampled) self.assertEqual(record.otelServiceName, "unknown_service") def test_trace_context_injection(self): with self.tracer.start_as_current_span("s1") as span: span_id = format(span.get_span_context().span_id, "016x") trace_id = format(span.get_span_context().trace_id, "032x") - self.assert_trace_context_injected(span_id, trace_id) + trace_sampled = span.get_span_context().trace_flags.sampled + self.assert_trace_context_injected( + span_id, trace_id, trace_sampled + ) def test_trace_context_injection_without_span(self): - self.assert_trace_context_injected("0", "0") + self.assert_trace_context_injected("0", "0", False) @mock.patch("logging.basicConfig") def test_basic_config_called(self, basic_config_mock): @@ -163,6 +168,7 @@ def test_log_hook(self): with self.tracer.start_as_current_span("s1") as span: span_id = format(span.get_span_context().span_id, "016x") trace_id = format(span.get_span_context().trace_id, "032x") + trace_sampled = span.get_span_context().trace_flags.sampled with self.caplog.at_level(level=logging.INFO): logger = logging.getLogger("test logger") logger.info("hello") @@ -171,6 +177,7 @@ def test_log_hook(self): self.assertEqual(record.otelSpanID, span_id) self.assertEqual(record.otelTraceID, trace_id) self.assertEqual(record.otelServiceName, "unknown_service") + self.assertEqual(record.otelTraceSampled, trace_sampled) self.assertEqual( record.custom_user_attribute_from_log_hook, "some-value" ) @@ -179,7 +186,10 @@ def test_uninstrumented(self): with self.tracer.start_as_current_span("s1") as span: span_id = format(span.get_span_context().span_id, "016x") trace_id = format(span.get_span_context().trace_id, "032x") - self.assert_trace_context_injected(span_id, trace_id) + trace_sampled = span.get_span_context().trace_flags.sampled + self.assert_trace_context_injected( + span_id, trace_id, trace_sampled + ) LoggingInstrumentor().uninstrument() @@ -187,6 +197,7 @@ def test_uninstrumented(self): with self.tracer.start_as_current_span("s1") as span: span_id = format(span.get_span_context().span_id, "016x") trace_id = format(span.get_span_context().trace_id, "032x") + trace_sampled = span.get_span_context().trace_flags.sampled with self.caplog.at_level(level=logging.INFO): logger = logging.getLogger("test logger") logger.info("hello") @@ -195,3 +206,4 @@ def test_uninstrumented(self): self.assertFalse(hasattr(record, "otelSpanID")) self.assertFalse(hasattr(record, "otelTraceID")) self.assertFalse(hasattr(record, "otelServiceName")) + self.assertFalse(hasattr(record, "otelTraceSampled"))
Add `otelTraceSampled` field to LogEntry for OLTP Logging Instrumentation module Before opening a feature request against this repo, consider whether the feature should/could be implemented in the [other OpenTelemetry client libraries](https://github.com/open-telemetry/). If so, please [open an issue on opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification/issues/new) first. **Is your feature request related to a problem?** Getting span id and trace id in the log record is a must. Cloud provider libraries, e.g. Google Cloud Logging also provides a `logging.googleapis.com/trace_sampled` field under structured logging, which can be populated using this library. **Describe the solution you'd like** Add a `record.otelTraceSampled` field similar to `record.otelSpanID` and `record.otelTraceID` in the log entry using the `trace_flags` property in `SpanContext`. **Describe alternatives you've considered** Manually injecting the value of `trace_flags` property into the log record by using the current `SpanContext`.
Feel free to send a pull request. Hi there, I would like to help out with this feature request. As a contributor, I can see the importance of including the span id and trace id in the log record. I have experience with OpenTelemetry and would be happy to work on implementing this feature. Please let me know if this issue is still available and if there are any guidelines or resources I should follow while working on it. Thank you! Hi @msaad777, I’ve made some changes and will be pushing a PR soon. I’ll update here if I’m unable to make a PR. Thanks!
2023-04-25T17:06:47
open-telemetry/opentelemetry-python-contrib
1,776
open-telemetry__opentelemetry-python-contrib-1776
[ "1755" ]
818ef4322303dabc066a7f84e0f8879e5f558df9
diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py @@ -64,8 +64,6 @@ async def redis_get(): response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request this function signature is: def response_hook(span: Span, instance: redis.connection.Connection, response) -> None -sanitize_query (Boolean) - default False, enable the Redis query sanitization - for example: .. code: python @@ -88,27 +86,11 @@ def response_hook(span, instance, response): client = redis.StrictRedis(host="localhost", port=6379) client.get("my-key") -Configuration -------------- - -Query sanitization -****************** -To enable query sanitization with an environment variable, set -``OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS`` to "true". - -For example, - -:: - - export OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS="true" - -will result in traced queries like "SET ? ?". API --- """ import typing -from os import environ from typing import Any, Collection import redis @@ -116,9 +98,6 @@ def response_hook(span, instance, response): from opentelemetry import trace from opentelemetry.instrumentation.instrumentor import BaseInstrumentor -from opentelemetry.instrumentation.redis.environment_variables import ( - OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS, -) from opentelemetry.instrumentation.redis.package import _instruments from opentelemetry.instrumentation.redis.util import ( _extract_conn_attributes, @@ -161,10 +140,9 @@ def _instrument( tracer, request_hook: _RequestHookT = None, response_hook: _ResponseHookT = None, - sanitize_query: bool = False, ): def _traced_execute_command(func, instance, args, kwargs): - query = _format_command_args(args, sanitize_query) + query = _format_command_args(args) if len(args) > 0 and args[0]: name = args[0] @@ -194,7 +172,7 @@ def _traced_execute_pipeline(func, instance, args, kwargs): cmds = [ _format_command_args( - c.args if hasattr(c, "args") else c[0], sanitize_query + c.args if hasattr(c, "args") else c[0], ) for c in command_stack ] @@ -307,15 +285,6 @@ def _instrument(self, **kwargs): tracer, request_hook=kwargs.get("request_hook"), response_hook=kwargs.get("response_hook"), - sanitize_query=kwargs.get( - "sanitize_query", - environ.get( - OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS, "false" - ) - .lower() - .strip() - == "true", - ), ) def _uninstrument(self, **kwargs): diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/environment_variables.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/environment_variables.py deleted file mode 100644 --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/environment_variables.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS = ( - "OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS" -) diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/util.py @@ -48,41 +48,23 @@ def _extract_conn_attributes(conn_kwargs): return attributes -def _format_command_args(args, sanitize_query): +def _format_command_args(args): """Format and sanitize command arguments, and trim them as needed""" cmd_max_len = 1000 value_too_long_mark = "..." - if sanitize_query: - # Sanitized query format: "COMMAND ? ?" - args_length = len(args) - if args_length > 0: - out = [str(args[0])] + ["?"] * (args_length - 1) - out_str = " ".join(out) - if len(out_str) > cmd_max_len: - out_str = ( - out_str[: cmd_max_len - len(value_too_long_mark)] - + value_too_long_mark - ) - else: - out_str = "" - return out_str + # Sanitized query format: "COMMAND ? ?" + args_length = len(args) + if args_length > 0: + out = [str(args[0])] + ["?"] * (args_length - 1) + out_str = " ".join(out) - value_max_len = 100 - length = 0 - out = [] - for arg in args: - cmd = str(arg) + if len(out_str) > cmd_max_len: + out_str = ( + out_str[: cmd_max_len - len(value_too_long_mark)] + + value_too_long_mark + ) + else: + out_str = "" - if len(cmd) > value_max_len: - cmd = cmd[:value_max_len] + value_too_long_mark - - if length + len(cmd) > cmd_max_len: - prefix = cmd[: cmd_max_len - length] - out.append(f"{prefix}{value_too_long_mark}") - break - - out.append(cmd) - length += len(cmd) - - return " ".join(out) + return out_str
diff --git a/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py b/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py --- a/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py +++ b/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py @@ -168,22 +168,11 @@ def test_query_sanitizer_enabled(self): span = spans[0] self.assertEqual(span.attributes.get("db.statement"), "SET ? ?") - def test_query_sanitizer_enabled_env(self): + def test_query_sanitizer(self): redis_client = redis.Redis() connection = redis.connection.Connection() redis_client.connection = connection - RedisInstrumentor().uninstrument() - - env_patch = mock.patch.dict( - "os.environ", - {"OTEL_PYTHON_INSTRUMENTATION_SANITIZE_REDIS": "true"}, - ) - env_patch.start() - RedisInstrumentor().instrument( - tracer_provider=self.tracer_provider, - ) - with mock.patch.object(redis_client, "connection"): redis_client.set("key", "value") @@ -192,21 +181,6 @@ def test_query_sanitizer_enabled_env(self): span = spans[0] self.assertEqual(span.attributes.get("db.statement"), "SET ? ?") - env_patch.stop() - - def test_query_sanitizer_disabled(self): - redis_client = redis.Redis() - connection = redis.connection.Connection() - redis_client.connection = connection - - with mock.patch.object(redis_client, "connection"): - redis_client.set("key", "value") - - spans = self.memory_exporter.get_finished_spans() - self.assertEqual(len(spans), 1) - - span = spans[0] - self.assertEqual(span.attributes.get("db.statement"), "SET key value") def test_no_op_tracer_provider(self): RedisInstrumentor().uninstrument() diff --git a/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py b/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py --- a/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py +++ b/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py @@ -47,9 +47,7 @@ def _check_span(self, span, name): def test_long_command_sanitized(self): RedisInstrumentor().uninstrument() - RedisInstrumentor().instrument( - tracer_provider=self.tracer_provider, sanitize_query=True - ) + RedisInstrumentor().instrument(tracer_provider=self.tracer_provider) self.redis_client.mget(*range(2000)) @@ -75,7 +73,7 @@ def test_long_command(self): self._check_span(span, "MGET") self.assertTrue( span.attributes.get(SpanAttributes.DB_STATEMENT).startswith( - "MGET 0 1 2 3" + "MGET ? ? ? ?" ) ) self.assertTrue( @@ -84,9 +82,7 @@ def test_long_command(self): def test_basics_sanitized(self): RedisInstrumentor().uninstrument() - RedisInstrumentor().instrument( - tracer_provider=self.tracer_provider, sanitize_query=True - ) + RedisInstrumentor().instrument(tracer_provider=self.tracer_provider) self.assertIsNone(self.redis_client.get("cheese")) spans = self.memory_exporter.get_finished_spans() @@ -105,15 +101,13 @@ def test_basics(self): span = spans[0] self._check_span(span, "GET") self.assertEqual( - span.attributes.get(SpanAttributes.DB_STATEMENT), "GET cheese" + span.attributes.get(SpanAttributes.DB_STATEMENT), "GET ?" ) self.assertEqual(span.attributes.get("db.redis.args_length"), 2) def test_pipeline_traced_sanitized(self): RedisInstrumentor().uninstrument() - RedisInstrumentor().instrument( - tracer_provider=self.tracer_provider, sanitize_query=True - ) + RedisInstrumentor().instrument(tracer_provider=self.tracer_provider) with self.redis_client.pipeline(transaction=False) as pipeline: pipeline.set("blah", 32) @@ -144,15 +138,13 @@ def test_pipeline_traced(self): self._check_span(span, "SET RPUSH HGETALL") self.assertEqual( span.attributes.get(SpanAttributes.DB_STATEMENT), - "SET blah 32\nRPUSH foo éé\nHGETALL xxx", + "SET ? ?\nRPUSH ? ?\nHGETALL ?", ) self.assertEqual(span.attributes.get("db.redis.pipeline_length"), 3) def test_pipeline_immediate_sanitized(self): RedisInstrumentor().uninstrument() - RedisInstrumentor().instrument( - tracer_provider=self.tracer_provider, sanitize_query=True - ) + RedisInstrumentor().instrument(tracer_provider=self.tracer_provider) with self.redis_client.pipeline() as pipeline: pipeline.set("a", 1) @@ -182,7 +174,7 @@ def test_pipeline_immediate(self): span = spans[0] self._check_span(span, "SET") self.assertEqual( - span.attributes.get(SpanAttributes.DB_STATEMENT), "SET b 2" + span.attributes.get(SpanAttributes.DB_STATEMENT), "SET ? ?" ) def test_parent(self): @@ -230,7 +222,7 @@ def test_basics(self): span = spans[0] self._check_span(span, "GET") self.assertEqual( - span.attributes.get(SpanAttributes.DB_STATEMENT), "GET cheese" + span.attributes.get(SpanAttributes.DB_STATEMENT), "GET ?" ) self.assertEqual(span.attributes.get("db.redis.args_length"), 2) @@ -247,7 +239,7 @@ def test_pipeline_traced(self): self._check_span(span, "SET RPUSH HGETALL") self.assertEqual( span.attributes.get(SpanAttributes.DB_STATEMENT), - "SET blah 32\nRPUSH foo éé\nHGETALL xxx", + "SET ? ?\nRPUSH ? ?\nHGETALL ?", ) self.assertEqual(span.attributes.get("db.redis.pipeline_length"), 3) @@ -308,7 +300,7 @@ def test_long_command(self): self._check_span(span, "MGET") self.assertTrue( span.attributes.get(SpanAttributes.DB_STATEMENT).startswith( - "MGET 0 1 2 3" + "MGET ? ? ? ?" ) ) self.assertTrue( @@ -322,7 +314,7 @@ def test_basics(self): span = spans[0] self._check_span(span, "GET") self.assertEqual( - span.attributes.get(SpanAttributes.DB_STATEMENT), "GET cheese" + span.attributes.get(SpanAttributes.DB_STATEMENT), "GET ?" ) self.assertEqual(span.attributes.get("db.redis.args_length"), 2) @@ -344,7 +336,7 @@ async def pipeline_simple(): self._check_span(span, "SET RPUSH HGETALL") self.assertEqual( span.attributes.get(SpanAttributes.DB_STATEMENT), - "SET blah 32\nRPUSH foo éé\nHGETALL xxx", + "SET ? ?\nRPUSH ? ?\nHGETALL ?", ) self.assertEqual(span.attributes.get("db.redis.pipeline_length"), 3) @@ -364,7 +356,7 @@ async def pipeline_immediate(): span = spans[0] self._check_span(span, "SET") self.assertEqual( - span.attributes.get(SpanAttributes.DB_STATEMENT), "SET b 2" + span.attributes.get(SpanAttributes.DB_STATEMENT), "SET ? ?" ) def test_parent(self): @@ -412,7 +404,7 @@ def test_basics(self): span = spans[0] self._check_span(span, "GET") self.assertEqual( - span.attributes.get(SpanAttributes.DB_STATEMENT), "GET cheese" + span.attributes.get(SpanAttributes.DB_STATEMENT), "GET ?" ) self.assertEqual(span.attributes.get("db.redis.args_length"), 2) @@ -434,7 +426,7 @@ async def pipeline_simple(): self._check_span(span, "SET RPUSH HGETALL") self.assertEqual( span.attributes.get(SpanAttributes.DB_STATEMENT), - "SET blah 32\nRPUSH foo éé\nHGETALL xxx", + "SET ? ?\nRPUSH ? ?\nHGETALL ?", ) self.assertEqual(span.attributes.get("db.redis.pipeline_length"), 3) @@ -488,5 +480,5 @@ def test_get(self): span = spans[0] self._check_span(span, "GET") self.assertEqual( - span.attributes.get(SpanAttributes.DB_STATEMENT), "GET foo" + span.attributes.get(SpanAttributes.DB_STATEMENT), "GET ?" )
Only collect `db.statement` if there is sanitization Spec https://github.com/open-telemetry/opentelemetry-specification/pull/3127 - [ ] [aiopg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-aiopg) - [ ] [asyncpg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-asyncpg) - [ ] [dbapi](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-dbapi) - [ ] [elasticsearch](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-elasticsearch) - [ ] [mysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-mysql) - [ ] [pymemcache](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymemcache) - [ ] [pymongo](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymongo) - [ ] [pymysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymysql) - [ ] [redis](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-redis) - [ ] [sqlalchemy](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlalchemy) - [ ] [sqlite3](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlite3)
Can I add db instrumentation with sanitization to this issue? All of them need to be aligned with the new specifications. Yes, thanks, that would be great.
2023-05-01T11:28:06
open-telemetry/opentelemetry-python-contrib
1,778
open-telemetry__opentelemetry-python-contrib-1778
[ "1777" ]
890e5dd9b8a4ec263f183fe9d0362ffb907847a4
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py @@ -118,8 +118,17 @@ def __init__( self._register_event_listener(engine, "checkin", self._pool_checkin) self._register_event_listener(engine, "checkout", self._pool_checkout) + def _get_connection_string(self): + drivername = self.engine.url.drivername or "" + host = self.engine.url.host or "" + port = self.engine.url.port or "" + database = self.engine.url.database or "" + return f"{drivername}://{host}:{port}/{database}" + def _get_pool_name(self): - return self.engine.pool.logging_name or "" + if self.engine.pool.logging_name is not None: + return self.engine.pool.logging_name + return self._get_connection_string() def _add_idle_to_connection_usage(self, value): self.connections_usage.add(
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy_metrics.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy_metrics.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy_metrics.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/tests/test_sqlalchemy_metrics.py @@ -70,11 +70,12 @@ def test_metrics_one_connection(self): ) def test_metrics_without_pool_name(self): - pool_name = "" + pool_name = "pool_test_name" engine = sqlalchemy.create_engine( "sqlite:///:memory:", pool_size=5, poolclass=QueuePool, + pool_logging_name=pool_name, ) metrics = self.get_sorted_metrics() diff --git a/tests/opentelemetry-docker-tests/tests/sqlalchemy_tests/test_postgres.py b/tests/opentelemetry-docker-tests/tests/sqlalchemy_tests/test_postgres.py --- a/tests/opentelemetry-docker-tests/tests/sqlalchemy_tests/test_postgres.py +++ b/tests/opentelemetry-docker-tests/tests/sqlalchemy_tests/test_postgres.py @@ -95,3 +95,40 @@ class PostgresCreatorTestCase(PostgresTestCase): "url": "postgresql://", "creator": lambda: psycopg2.connect(**POSTGRES_CONFIG), } + + +class PostgresMetricsTestCase(PostgresTestCase): + __test__ = True + + VENDOR = "postgresql" + SQL_DB = "opentelemetry-tests" + ENGINE_ARGS = { + "url": "postgresql://%(user)s:%(password)s@%(host)s:%(port)s/%(dbname)s" + % POSTGRES_CONFIG + } + + def test_metrics_pool_name(self): + with self.connection() as conn: + conn.execute("SELECT 1 + 1").fetchall() + + pool_name = "{}://{}:{}/{}".format( + self.VENDOR, + POSTGRES_CONFIG["host"], + POSTGRES_CONFIG["port"], + self.SQL_DB, + ) + metrics = self.get_sorted_metrics() + self.assertEqual(len(metrics), 1) + self.assert_metric_expected( + metrics[0], + [ + self.create_number_data_point( + value=0, + attributes={"pool.name": pool_name, "state": "idle"}, + ), + self.create_number_data_point( + value=0, + attributes={"pool.name": pool_name, "state": "used"}, + ), + ], + )
Expand sqlalchemy pool.name to follow the semantic conventions Spec ref: https://github.com/open-telemetry/opentelemetry-specification/pull/3050
2023-05-03T08:38:39
open-telemetry/opentelemetry-python-contrib
1,785
open-telemetry__opentelemetry-python-contrib-1785
[ "1780" ]
a3a0b2409cc1fe9a03bb73da9c8bbf47b9d3d5ea
diff --git a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py --- a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py @@ -201,30 +201,35 @@ def _set_api_gateway_v1_proxy_attributes( span.set_attribute( SpanAttributes.HTTP_METHOD, lambda_event.get("httpMethod") ) - span.set_attribute(SpanAttributes.HTTP_ROUTE, lambda_event.get("resource")) if lambda_event.get("headers"): - span.set_attribute( - SpanAttributes.HTTP_USER_AGENT, - lambda_event["headers"].get("User-Agent"), - ) - span.set_attribute( - SpanAttributes.HTTP_SCHEME, - lambda_event["headers"].get("X-Forwarded-Proto"), - ) - span.set_attribute( - SpanAttributes.NET_HOST_NAME, lambda_event["headers"].get("Host") - ) + if "User-Agent" in lambda_event["headers"]: + span.set_attribute( + SpanAttributes.HTTP_USER_AGENT, + lambda_event["headers"]["User-Agent"], + ) + if "X-Forwarded-Proto" in lambda_event["headers"]: + span.set_attribute( + SpanAttributes.HTTP_SCHEME, + lambda_event["headers"]["X-Forwarded-Proto"], + ) + if "Host" in lambda_event["headers"]: + span.set_attribute( + SpanAttributes.NET_HOST_NAME, + lambda_event["headers"]["Host"], + ) + if "resource" in lambda_event: + span.set_attribute(SpanAttributes.HTTP_ROUTE, lambda_event["resource"]) - if lambda_event.get("queryStringParameters"): - span.set_attribute( - SpanAttributes.HTTP_TARGET, - f"{lambda_event.get('resource')}?{urlencode(lambda_event.get('queryStringParameters'))}", - ) - else: - span.set_attribute( - SpanAttributes.HTTP_TARGET, lambda_event.get("resource") - ) + if lambda_event.get("queryStringParameters"): + span.set_attribute( + SpanAttributes.HTTP_TARGET, + f"{lambda_event['resource']}?{urlencode(lambda_event['queryStringParameters'])}", + ) + else: + span.set_attribute( + SpanAttributes.HTTP_TARGET, lambda_event["resource"] + ) return span @@ -237,35 +242,38 @@ def _set_api_gateway_v2_proxy_attributes( More info: https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html """ - span.set_attribute( - SpanAttributes.NET_HOST_NAME, - lambda_event["requestContext"].get("domainName"), - ) - - if lambda_event["requestContext"].get("http"): + if "domainName" in lambda_event["requestContext"]: span.set_attribute( - SpanAttributes.HTTP_METHOD, - lambda_event["requestContext"]["http"].get("method"), - ) - span.set_attribute( - SpanAttributes.HTTP_USER_AGENT, - lambda_event["requestContext"]["http"].get("userAgent"), - ) - span.set_attribute( - SpanAttributes.HTTP_ROUTE, - lambda_event["requestContext"]["http"].get("path"), + SpanAttributes.NET_HOST_NAME, + lambda_event["requestContext"]["domainName"], ) - if lambda_event.get("rawQueryString"): + if lambda_event["requestContext"].get("http"): + if "method" in lambda_event["requestContext"]["http"]: span.set_attribute( - SpanAttributes.HTTP_TARGET, - f"{lambda_event['requestContext']['http'].get('path')}?{lambda_event.get('rawQueryString')}", + SpanAttributes.HTTP_METHOD, + lambda_event["requestContext"]["http"]["method"], ) - else: + if "userAgent" in lambda_event["requestContext"]["http"]: span.set_attribute( - SpanAttributes.HTTP_TARGET, - lambda_event["requestContext"]["http"].get("path"), + SpanAttributes.HTTP_USER_AGENT, + lambda_event["requestContext"]["http"]["userAgent"], + ) + if "path" in lambda_event["requestContext"]["http"]: + span.set_attribute( + SpanAttributes.HTTP_ROUTE, + lambda_event["requestContext"]["http"]["path"], ) + if lambda_event.get("rawQueryString"): + span.set_attribute( + SpanAttributes.HTTP_TARGET, + f"{lambda_event['requestContext']['http']['path']}?{lambda_event['rawQueryString']}", + ) + else: + span.set_attribute( + SpanAttributes.HTTP_TARGET, + lambda_event["requestContext"]["http"]["path"], + ) return span
opentelemetry-instrumentation-aws-lambda | Invalid type NoneType for attribute 'http.user_agent' value. Environment: I'm using `opentelemetry-instrumentation-aws-lambda` to instrument the Lambda function behind the API Gateway (V2). Python==3.9 AWS-OTEL Lmabda Layer == arn:aws:lambda:us-east-1:901920570463:layer:aws-otel-python-amd64-ver-1-17-0:1 fastapi==0.73.0 opentelemetry-sdk==1.17.0 Environment variables: ```bash AWS_LAMBDA_EXEC_WRAPPER=/opt/otel-instrument OPENTELEMETRY_COLLECTOR_CONFIG_FILE=/var/task/src/otel-config.yaml OTEL_METRICS_EXPORTER=otlp OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf OPENTELEMETRY_EXTENSION_LOG_LEVEL=error OTEL_PYTHON_DISABLED_INSTRUMENTATIONS=fastapi ``` OTEL Config yaml: ```yaml receivers: otlp: protocols: http: exporters: otlp: endpoint: https://<ANOTHER_SERVICE>:4317 service: pipelines: metrics: receivers: [otlp] exporters: [otlp] ``` **Steps to reproduce** 1. Instrument your Lambda with as stated here: https://aws-otel.github.io/docs/getting-started/lambda/lambda-python 2. Lambda should be behind the API Gateway 3. Send a request **What is the expected behavior?** There shouldn't be any Warning logs in the Lambda logs. **What is the actual behavior?** I'm seeing couple of Warning in the lambda logs. <img width="1171" alt="Screenshot 2023-05-03 at 9 30 27 AM" src="https://user-images.githubusercontent.com/16422720/235990288-c0f8cef8-b85e-47ac-a717-744571945654.png"> ```bash [WARNING] 2023-05-02T23:36:45.395Z ec784472-c5bb-4f74-867d-e45d39e0fcca Invalid type NoneType for attribute 'http.user_agent' value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types ``` **Additional context** I'm interested to open a PR if possible to add some logic here preventing None values being set as attributes: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/46e4b1da44c534fed8e1002899e9e41e6d668018/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py#LL232C3-L232C3 Also, if there is a way to disable Tracing/Warning Logs it would be helpful, I couldn't find a way to do that. Another related discussion: https://github.com/open-telemetry/opentelemetry-python/discussions/3293 Thanks.
>I'm interested to open a PR if possible to add some logic here preventing None values being set as attributes Feel free to send a pull request. >Also, if there is a way to disable Tracing/Warning Logs it would be helpful, I couldn't find a way to do that. See https://github.com/open-telemetry/opentelemetry-python/issues/1059 @srikanthccv I need some insights about the best way here. After further investigation, I realized that my Lambda behind the APIGW is working with no problem. The problem is for Lambda behind the ALB. The logic itself is checking if the version is equal to `2.0` set V2 attributes, otherwise set V1 attributes: ```python if lambda_event.get(“version”) == “2.0": _set_api_gateway_v2_proxy_attributes(lambda_event, span) else: _set_api_gateway_v1_proxy_attributes(lambda_event, span) ``` Do you think it’s best that I keep this behavior and only add some checking inside the `_set_api_gateway_v1_proxy_attributes` Or should I change the behavior by changing `else` to `elif lambda_event.get(“version”) == “1.0"` In addition, I also can add an `else` statement and provide some kind of default behavior, but I don’t think I could be able to make it so generic. The problem (The second way) is the current Lambda functions behind the ALB will not have any attributes, even attributes like `X-Forwarded-Proto` will be removed. The first way is more like a stable way, but I need to add a couple of `if` statements to the logic. Let me know what you think. Thanks. You mentioned a lot of lambda and AWS-specific detail I do not understand. What I could tell is that warning originates from the package, and it's because of `.get()`. I could suggest to the point that there should be a `None` check.
2023-05-05T17:44:18
open-telemetry/opentelemetry-python-contrib
1,786
open-telemetry__opentelemetry-python-contrib-1786
[ "1747" ]
0871dd455c0adfa125a2f258a0b55c47a5da5227
diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py @@ -112,6 +112,8 @@ def instrument_consumer(consumer: Consumer, tracer_provider=None) from .package import _instruments from .utils import ( KafkaPropertiesExtractor, + _end_current_consume_span, + _create_new_consume_span, _enrich_span, _get_span_name, _kafka_getter, @@ -137,6 +139,12 @@ def __init__(self, config): def poll(self, timeout=-1): # pylint: disable=useless-super-delegation return super().poll(timeout) + # This method is deliberately implemented in order to allow wrapt to wrap this function + def consume( + self, *args, **kwargs + ): # pylint: disable=useless-super-delegation + return super().consume(*args, **kwargs) + class ProxiedProducer(Producer): def __init__(self, producer: Producer, tracer: Tracer): @@ -177,10 +185,14 @@ def committed(self, partitions, timeout=-1): def commit(self, *args, **kwargs): return self._consumer.commit(*args, **kwargs) - def consume( - self, num_messages=1, *args, **kwargs - ): # pylint: disable=keyword-arg-before-vararg - return self._consumer.consume(num_messages, *args, **kwargs) + def consume(self, *args, **kwargs): + return ConfluentKafkaInstrumentor.wrap_consume( + self._consumer.consume, + self, + self._tracer, + args, + kwargs, + ) def get_watermark_offsets( self, partition, timeout=-1, *args, **kwargs @@ -275,6 +287,11 @@ def _inner_wrap_poll(func, instance, args, kwargs): func, instance, self._tracer, args, kwargs ) + def _inner_wrap_consume(func, instance, args, kwargs): + return ConfluentKafkaInstrumentor.wrap_consume( + func, instance, self._tracer, args, kwargs + ) + wrapt.wrap_function_wrapper( AutoInstrumentedProducer, "produce", @@ -287,6 +304,12 @@ def _inner_wrap_poll(func, instance, args, kwargs): _inner_wrap_poll, ) + wrapt.wrap_function_wrapper( + AutoInstrumentedConsumer, + "consume", + _inner_wrap_consume, + ) + def _uninstrument(self, **kwargs): confluent_kafka.Producer = self._original_kafka_producer confluent_kafka.Consumer = self._original_kafka_consumer @@ -326,29 +349,14 @@ def wrap_produce(func, instance, tracer, args, kwargs): @staticmethod def wrap_poll(func, instance, tracer, args, kwargs): if instance._current_consume_span: - context.detach(instance._current_context_token) - instance._current_context_token = None - instance._current_consume_span.end() - instance._current_consume_span = None + _end_current_consume_span(instance) with tracer.start_as_current_span( "recv", end_on_exit=True, kind=trace.SpanKind.CONSUMER ): record = func(*args, **kwargs) if record: - links = [] - ctx = propagate.extract(record.headers(), getter=_kafka_getter) - if ctx: - for item in ctx.values(): - if hasattr(item, "get_span_context"): - links.append(Link(context=item.get_span_context())) - - instance._current_consume_span = tracer.start_span( - name=f"{record.topic()} process", - links=links, - kind=SpanKind.CONSUMER, - ) - + _create_new_consume_span(instance, tracer, [record]) _enrich_span( instance._current_consume_span, record.topic(), @@ -361,3 +369,26 @@ def wrap_poll(func, instance, tracer, args, kwargs): ) return record + + @staticmethod + def wrap_consume(func, instance, tracer, args, kwargs): + if instance._current_consume_span: + _end_current_consume_span(instance) + + with tracer.start_as_current_span( + "recv", end_on_exit=True, kind=trace.SpanKind.CONSUMER + ): + records = func(*args, **kwargs) + if len(records) > 0: + _create_new_consume_span(instance, tracer, records) + _enrich_span( + instance._current_consume_span, + records[0].topic(), + operation=MessagingOperationValues.PROCESS, + ) + + instance._current_context_token = context.attach( + trace.set_span_in_context(instance._current_consume_span) + ) + + return records diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/utils.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/utils.py --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/utils.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/utils.py @@ -1,6 +1,8 @@ from logging import getLogger from typing import List, Optional +from opentelemetry import context, propagate +from opentelemetry.trace import SpanKind, Link from opentelemetry.propagators import textmap from opentelemetry.semconv.trace import ( MessagingDestinationKindValues, @@ -81,6 +83,34 @@ def set(self, carrier: textmap.CarrierT, key: str, value: str) -> None: _kafka_getter = KafkaContextGetter() +def _end_current_consume_span(instance): + context.detach(instance._current_context_token) + instance._current_context_token = None + instance._current_consume_span.end() + instance._current_consume_span = None + + +def _create_new_consume_span(instance, tracer, records): + links = _get_links_from_records(records) + instance._current_consume_span = tracer.start_span( + name=f"{records[0].topic()} process", + links=links, + kind=SpanKind.CONSUMER, + ) + + +def _get_links_from_records(records): + links = [] + for record in records: + ctx = propagate.extract(record.headers(), getter=_kafka_getter) + if ctx: + for item in ctx.values(): + if hasattr(item, "get_span_context"): + links.append(Link(context=item.get_span_context())) + + return links + + def _enrich_span( span, topic, @@ -94,7 +124,7 @@ def _enrich_span( span.set_attribute(SpanAttributes.MESSAGING_SYSTEM, "kafka") span.set_attribute(SpanAttributes.MESSAGING_DESTINATION, topic) - if partition: + if partition is not None: span.set_attribute(SpanAttributes.MESSAGING_KAFKA_PARTITION, partition) span.set_attribute( @@ -109,7 +139,7 @@ def _enrich_span( # https://stackoverflow.com/questions/65935155/identify-and-find-specific-message-in-kafka-topic # A message within Kafka is uniquely defined by its topic name, topic partition and offset. - if partition and offset and topic: + if partition is not None and offset is not None and topic: span.set_attribute( SpanAttributes.MESSAGING_MESSAGE_ID, f"{topic}.{partition}.{offset}",
diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py @@ -14,7 +14,12 @@ # pylint: disable=no-name-in-module -from unittest import TestCase +from opentelemetry.semconv.trace import ( + SpanAttributes, + MessagingDestinationKindValues, +) +from opentelemetry.test.test_base import TestBase +from .utils import MockConsumer, MockedMessage from confluent_kafka import Consumer, Producer @@ -29,7 +34,7 @@ ) -class TestConfluentKafka(TestCase): +class TestConfluentKafka(TestBase): def test_instrument_api(self) -> None: instrumentation = ConfluentKafkaInstrumentor() @@ -104,3 +109,140 @@ def test_context_getter(self) -> None: context_setter.set(carrier_list, "key1", "val1") self.assertEqual(context_getter.get(carrier_list, "key1"), ["val1"]) self.assertEqual(["key1"], context_getter.keys(carrier_list)) + + def test_poll(self) -> None: + instrumentation = ConfluentKafkaInstrumentor() + mocked_messages = [ + MockedMessage("topic-10", 0, 0, []), + MockedMessage("topic-20", 2, 4, []), + MockedMessage("topic-30", 1, 3, []), + ] + expected_spans = [ + {"name": "recv", "attributes": {}}, + { + "name": "topic-10 process", + "attributes": { + SpanAttributes.MESSAGING_OPERATION: "process", + SpanAttributes.MESSAGING_KAFKA_PARTITION: 0, + SpanAttributes.MESSAGING_SYSTEM: "kafka", + SpanAttributes.MESSAGING_DESTINATION: "topic-10", + SpanAttributes.MESSAGING_DESTINATION_KIND: MessagingDestinationKindValues.QUEUE.value, + SpanAttributes.MESSAGING_MESSAGE_ID: "topic-10.0.0", + }, + }, + {"name": "recv", "attributes": {}}, + { + "name": "topic-20 process", + "attributes": { + SpanAttributes.MESSAGING_OPERATION: "process", + SpanAttributes.MESSAGING_KAFKA_PARTITION: 2, + SpanAttributes.MESSAGING_SYSTEM: "kafka", + SpanAttributes.MESSAGING_DESTINATION: "topic-20", + SpanAttributes.MESSAGING_DESTINATION_KIND: MessagingDestinationKindValues.QUEUE.value, + SpanAttributes.MESSAGING_MESSAGE_ID: "topic-20.2.4", + }, + }, + {"name": "recv", "attributes": {}}, + { + "name": "topic-30 process", + "attributes": { + SpanAttributes.MESSAGING_OPERATION: "process", + SpanAttributes.MESSAGING_KAFKA_PARTITION: 1, + SpanAttributes.MESSAGING_SYSTEM: "kafka", + SpanAttributes.MESSAGING_DESTINATION: "topic-30", + SpanAttributes.MESSAGING_DESTINATION_KIND: MessagingDestinationKindValues.QUEUE.value, + SpanAttributes.MESSAGING_MESSAGE_ID: "topic-30.1.3", + }, + }, + {"name": "recv", "attributes": {}}, + ] + + consumer = MockConsumer( + mocked_messages, + { + "bootstrap.servers": "localhost:29092", + "group.id": "mygroup", + "auto.offset.reset": "earliest", + }, + ) + self.memory_exporter.clear() + consumer = instrumentation.instrument_consumer(consumer) + consumer.poll() + consumer.poll() + consumer.poll() + consumer.poll() + + span_list = self.memory_exporter.get_finished_spans() + self._compare_spans(span_list, expected_spans) + + def test_consume(self) -> None: + instrumentation = ConfluentKafkaInstrumentor() + mocked_messages = [ + MockedMessage("topic-1", 0, 0, []), + MockedMessage("topic-1", 2, 1, []), + MockedMessage("topic-1", 3, 2, []), + MockedMessage("topic-2", 0, 0, []), + MockedMessage("topic-3", 0, 3, []), + MockedMessage("topic-2", 0, 1, []), + ] + expected_spans = [ + {"name": "recv", "attributes": {}}, + { + "name": "topic-1 process", + "attributes": { + SpanAttributes.MESSAGING_OPERATION: "process", + SpanAttributes.MESSAGING_SYSTEM: "kafka", + SpanAttributes.MESSAGING_DESTINATION: "topic-1", + SpanAttributes.MESSAGING_DESTINATION_KIND: MessagingDestinationKindValues.QUEUE.value, + }, + }, + {"name": "recv", "attributes": {}}, + { + "name": "topic-2 process", + "attributes": { + SpanAttributes.MESSAGING_OPERATION: "process", + SpanAttributes.MESSAGING_SYSTEM: "kafka", + SpanAttributes.MESSAGING_DESTINATION: "topic-2", + SpanAttributes.MESSAGING_DESTINATION_KIND: MessagingDestinationKindValues.QUEUE.value, + }, + }, + {"name": "recv", "attributes": {}}, + { + "name": "topic-3 process", + "attributes": { + SpanAttributes.MESSAGING_OPERATION: "process", + SpanAttributes.MESSAGING_SYSTEM: "kafka", + SpanAttributes.MESSAGING_DESTINATION: "topic-3", + SpanAttributes.MESSAGING_DESTINATION_KIND: MessagingDestinationKindValues.QUEUE.value, + }, + }, + {"name": "recv", "attributes": {}}, + ] + + consumer = MockConsumer( + mocked_messages, + { + "bootstrap.servers": "localhost:29092", + "group.id": "mygroup", + "auto.offset.reset": "earliest", + }, + ) + + self.memory_exporter.clear() + consumer = instrumentation.instrument_consumer(consumer) + consumer.consume(3) + consumer.consume(1) + consumer.consume(2) + consumer.consume(1) + span_list = self.memory_exporter.get_finished_spans() + self._compare_spans(span_list, expected_spans) + + def _compare_spans(self, spans, expected_spans): + for span, expected_span in zip(spans, expected_spans): + self.assertEqual(expected_span["name"], span.name) + for attribute_key, expected_attribute_value in expected_span[ + "attributes" + ].items(): + self.assertEqual( + expected_attribute_value, span.attributes[attribute_key] + ) diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py new file mode 100644 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py @@ -0,0 +1,39 @@ +from confluent_kafka import Consumer + + +class MockConsumer(Consumer): + def __init__(self, queue, config): + self._queue = queue + super().__init__(config) + + def consume( + self, num_messages=1, *args, **kwargs + ): # pylint: disable=keyword-arg-before-vararg + messages = self._queue[:num_messages] + self._queue = self._queue[num_messages:] + return messages + + def poll(self, timeout=None): + if len(self._queue) > 0: + return self._queue.pop(0) + return None + + +class MockedMessage: + def __init__(self, topic: str, partition: int, offset: int, headers): + self._topic = topic + self._partition = partition + self._offset = offset + self._headers = headers + + def topic(self): + return self._topic + + def partition(self): + return self._partition + + def offset(self): + return self._offset + + def headers(self): + return self._headers
[confluent-kafka] Add wrapper to 'consume' method for batch messages **Is your feature request related to a problem?** If we use `consumer.consume(num_messages=100)` [src](https://github.com/confluentinc/confluent-kafka-python/blob/8fbc98198ca7d9a9748ec5148be711779b74aa62/src/confluent_kafka/src/Consumer.c#L1209) for receiving messages, [ConfluentKafkaInstrumentor](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py#L209) doesn't send spans for this. **Describe the solution you'd like** [ConfluentKafkaInstrumentor](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py#L209) should wrap `consume` method the same as the [poll](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py#L298) method. **Additional context** We could add `messaging.batch.message_count` to [semconv](https://github.com/open-telemetry/opentelemetry-python/blob/main/opentelemetry-semantic-conventions/src/opentelemetry/semconv/trace/__init__.py) and use it in a span tag. Relates to [batch-receiving](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/messaging.md#batch-receiving) in [opentelemetry-semantic-conventions](https://github.com/open-telemetry/opentelemetry-python/tree/main/opentelemetry-semantic-conventions).
Hi! It seems like a good issue to start contributing. Can I take it? The addition of `messaging.batch.message_count` needs the [v1.17.0](https://github.com/open-telemetry/opentelemetry-specification/releases/tag/v1.17.0) of semconv. I cannot add it until this [PR](https://github.com/open-telemetry/opentelemetry-python/pull/3251) is merged, but I can wrap the `consume` method using the same implementation of `poll` wrapper if that's okay.
2023-05-06T13:39:43
open-telemetry/opentelemetry-python-contrib
1,788
open-telemetry__opentelemetry-python-contrib-1788
[ "1098" ]
db90ce38a2a9f48f225ea63bab1d295deeaff764
diff --git a/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py b/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py --- a/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py @@ -172,9 +172,12 @@ def _get_span_name(request): else: match = resolve(request.path) - if hasattr(match, "route"): + if hasattr(match, "route") and match.route: return f"{request.method} {match.route}" + if hasattr(match, "url_name") and match.url_name: + return f"{request.method} {match.url_name}" + return request.method except Resolver404:
diff --git a/instrumentation/opentelemetry-instrumentation-django/tests/test_middleware.py b/instrumentation/opentelemetry-instrumentation-django/tests/test_middleware.py --- a/instrumentation/opentelemetry-instrumentation-django/tests/test_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-django/tests/test_middleware.py @@ -74,10 +74,14 @@ DJANGO_3_0 = VERSION >= (3, 0) if DJANGO_2_0: - from django.urls import re_path + from django.urls import path, re_path else: from django.conf.urls import url as re_path + def path(path_argument, *args, **kwargs): + return re_path(rf"^{path_argument}$", *args, **kwargs) + + urlpatterns = [ re_path(r"^traced/", traced), re_path(r"^traced_custom_header/", response_with_custom_header), @@ -87,6 +91,7 @@ re_path(r"^excluded_noarg/", excluded_noarg), re_path(r"^excluded_noarg2/", excluded_noarg2), re_path(r"^span_name/([0-9]{4})/$", route_span_name), + path("", traced, name="empty"), ] _django_instrumentor = DjangoInstrumentor() @@ -205,6 +210,16 @@ def test_not_recording(self): self.assertFalse(mock_span.set_attribute.called) self.assertFalse(mock_span.set_status.called) + def test_empty_path(self): + Client().get("/") + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + + span = spans[0] + + self.assertEqual(span.name, "GET empty") + def test_traced_post(self): Client().post("/traced/")
[instrumentation-django] - empty span name **Describe your environment** opentelemetry-instrumentation-django==0.30b1 django==4.0.4 **Steps to reproduce** Having the following URL patterns: `from django.urls import path from . import views urlpatterns = [ path('', views.index, name='index'), path('list', views.list, name='list'), ]` Please note that the path route has an empty value. **What is the expected behavior?** The expected behavior is not having an empty span name for this endpoint. Also, consider returning http.route attribute with value '/' **What is the actual behavior?** * Got an empty span name * No http.route attribute returned
What do you expect the span name if not empty? if an empty route has the same meaning of a route with a value of '/' then I would consider setting both the route and the name to '/'
2023-05-08T18:06:49
open-telemetry/opentelemetry-python-contrib
1,789
open-telemetry__opentelemetry-python-contrib-1789
[ "1782" ]
8cc10a0859d0da4773985e1025d261c3e334ea28
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py @@ -506,6 +506,11 @@ def __init__( unit="ms", description="measures the duration of the inbound HTTP request", ) + self.server_response_size_histogram = self.meter.create_histogram( + name=MetricInstruments.HTTP_SERVER_RESPONSE_SIZE, + unit="By", + description="measures the size of HTTP response messages (compressed).", + ) self.active_requests_counter = self.meter.create_up_down_counter( name=MetricInstruments.HTTP_SERVER_ACTIVE_REQUESTS, unit="requests", @@ -518,6 +523,7 @@ def __init__( self.server_request_hook = server_request_hook self.client_request_hook = client_request_hook self.client_response_hook = client_response_hook + self.content_length_header = None async def __call__(self, scope, receive, send): """The ASGI application @@ -593,6 +599,10 @@ async def __call__(self, scope, receive, send): self.active_requests_counter.add( -1, active_requests_count_attrs ) + if self.content_length_header: + self.server_response_size_histogram.record( + self.content_length_header, duration_attrs + ) if token: context.detach(token) @@ -660,6 +670,13 @@ async def otel_send(message): setter=asgi_setter, ) + content_length = asgi_getter.get(message, "content-length") + if content_length: + try: + self.content_length_header = int(content_length[0]) + except ValueError: + pass + await send(message) return otel_send
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py --- a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py @@ -46,10 +46,12 @@ _expected_metric_names = [ "http.server.active_requests", "http.server.duration", + "http.server.response.size", ] _recommended_attrs = { "http.server.active_requests": _active_requests_count_attrs, "http.server.duration": _duration_attrs, + "http.server.response.size": _duration_attrs, } @@ -61,7 +63,10 @@ async def http_app(scope, receive, send): { "type": "http.response.start", "status": 200, - "headers": [[b"Content-Type", b"text/plain"]], + "headers": [ + [b"Content-Type", b"text/plain"], + [b"content-length", b"1024"], + ], } ) await send({"type": "http.response.body", "body": b"*"}) @@ -103,7 +108,10 @@ async def error_asgi(scope, receive, send): { "type": "http.response.start", "status": 200, - "headers": [[b"Content-Type", b"text/plain"]], + "headers": [ + [b"Content-Type", b"text/plain"], + [b"content-length", b"1024"], + ], } ) await send({"type": "http.response.body", "body": b"*"}) @@ -126,7 +134,8 @@ def validate_outputs(self, outputs, error=None, modifiers=None): # Check http response start self.assertEqual(response_start["status"], 200) self.assertEqual( - response_start["headers"], [[b"Content-Type", b"text/plain"]] + response_start["headers"], + [[b"Content-Type", b"text/plain"], [b"content-length", b"1024"]], ) exc_info = self.scope.get("hack_exc_info") @@ -352,6 +361,7 @@ def test_traceresponse_header(self): response_start["headers"], [ [b"Content-Type", b"text/plain"], + [b"content-length", b"1024"], [b"traceresponse", f"{traceresponse}".encode()], [b"access-control-expose-headers", b"traceresponse"], ], @@ -565,6 +575,7 @@ def test_basic_metric_success(self): "http.flavor": "1.0", } metrics_list = self.memory_metrics_reader.get_metrics_data() + # pylint: disable=too-many-nested-blocks for resource_metric in metrics_list.resource_metrics: for scope_metrics in resource_metric.scope_metrics: for metric in scope_metrics.metrics: @@ -575,9 +586,12 @@ def test_basic_metric_success(self): dict(point.attributes), ) self.assertEqual(point.count, 1) - self.assertAlmostEqual( - duration, point.sum, delta=5 - ) + if metric.name == "http.server.duration": + self.assertAlmostEqual( + duration, point.sum, delta=5 + ) + elif metric.name == "http.server.response.size": + self.assertEqual(1024, point.sum) elif isinstance(point, NumberDataPoint): self.assertDictEqual( expected_requests_count_attributes, @@ -602,13 +616,12 @@ async def target_asgi(scope, receive, send): app = otel_asgi.OpenTelemetryMiddleware(target_asgi) self.seed_app(app) self.send_default_request() - metrics_list = self.memory_metrics_reader.get_metrics_data() assertions = 0 for resource_metric in metrics_list.resource_metrics: for scope_metrics in resource_metric.scope_metrics: for metric in scope_metrics.metrics: - if metric.name != "http.server.duration": + if metric.name == "http.server.active_requests": continue for point in metric.data.data_points: if isinstance(point, HistogramDataPoint): @@ -617,7 +630,7 @@ async def target_asgi(scope, receive, send): expected_target, ) assertions += 1 - self.assertEqual(assertions, 1) + self.assertEqual(assertions, 2) def test_no_metric_for_websockets(self): self.scope = { diff --git a/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py b/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py --- a/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py @@ -44,10 +44,15 @@ _expected_metric_names = [ "http.server.active_requests", "http.server.duration", + "http.server.response.size", ] _recommended_attrs = { "http.server.active_requests": _active_requests_count_attrs, "http.server.duration": {*_duration_attrs, SpanAttributes.HTTP_TARGET}, + "http.server.response.size": { + *_duration_attrs, + SpanAttributes.HTTP_TARGET, + }, } @@ -187,7 +192,7 @@ def test_fastapi_metrics(self): for resource_metric in metrics_list.resource_metrics: self.assertTrue(len(resource_metric.scope_metrics) == 1) for scope_metric in resource_metric.scope_metrics: - self.assertTrue(len(scope_metric.metrics) == 2) + self.assertTrue(len(scope_metric.metrics) == 3) for metric in scope_metric.metrics: self.assertIn(metric.name, _expected_metric_names) data_points = list(metric.data.data_points) diff --git a/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py b/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py --- a/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py @@ -49,10 +49,12 @@ _expected_metric_names = [ "http.server.active_requests", "http.server.duration", + "http.server.response.size", ] _recommended_attrs = { "http.server.active_requests": _active_requests_count_attrs, "http.server.duration": _duration_attrs, + "http.server.response.size": _duration_attrs, } @@ -128,7 +130,7 @@ def test_starlette_metrics(self): for resource_metric in metrics_list.resource_metrics: self.assertTrue(len(resource_metric.scope_metrics) == 1) for scope_metric in resource_metric.scope_metrics: - self.assertTrue(len(scope_metric.metrics) == 2) + self.assertTrue(len(scope_metric.metrics) == 3) for metric in scope_metric.metrics: self.assertIn(metric.name, _expected_metric_names) data_points = list(metric.data.data_points)
response_size_histogram for ASGI instrumention **Describe the solution you'd like** Can we have `response_size_histogram` inside the `opentelemetry-instrumentation-asgi` logic? Currently, we do have access to `content-length` header from the response inside the `OpenTelemetryMiddleware`, so it would be beneficial that we can have the ability to record response_size like duration. **Describe alternatives you've considered** Currently, I'm thinking about disable the FastAPI auto instrumentation and implement both response size and duration. **Additional context** The only instrumentator I could find that have the ability to record response_size is `opentelemetry-instrumentation-tornado` (`HTTP_SERVER_RESPONSE_SIZE`). Let me know if that makes sense. Thanks.
According to the [specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/http-metrics.md#metric-httpserverresponsesize) it seems possible to add http.server.response.size if there is access to this attribute. This metric is optional, so maybe it's the reason it's not implemented. Do you want to work on this issue? Hello @shalevr , Thank you for your response. I'm happy working on it. I'll create a pull request.
2023-05-08T23:06:25
open-telemetry/opentelemetry-python-contrib
1,791
open-telemetry__opentelemetry-python-contrib-1791
[ "1790" ]
13ce910f143fdebf21c8bdf307b3d236d867ecf8
diff --git a/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py b/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py --- a/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py @@ -96,11 +96,13 @@ def _hydrate_span_from_args(connection, query, parameters) -> dict: class AsyncPGInstrumentor(BaseInstrumentor): + + _leading_comment_remover = re.compile(r"^/\*.*?\*/") + _tracer = None + def __init__(self, capture_parameters=False): super().__init__() self.capture_parameters = capture_parameters - self._tracer = None - self._leading_comment_remover = re.compile(r"^/\*.*?\*/") def instrumentation_dependencies(self) -> Collection[str]: return _instruments
diff --git a/instrumentation/opentelemetry-instrumentation-asyncpg/tests/test_asyncpg_wrapper.py b/instrumentation/opentelemetry-instrumentation-asyncpg/tests/test_asyncpg_wrapper.py --- a/instrumentation/opentelemetry-instrumentation-asyncpg/tests/test_asyncpg_wrapper.py +++ b/instrumentation/opentelemetry-instrumentation-asyncpg/tests/test_asyncpg_wrapper.py @@ -5,7 +5,7 @@ class TestAsyncPGInstrumentation(TestBase): - def test_duplicated_instrumentation(self): + def test_duplicated_instrumentation_can_be_uninstrumented(self): AsyncPGInstrumentor().instrument() AsyncPGInstrumentor().instrument() AsyncPGInstrumentor().instrument() @@ -16,6 +16,14 @@ def test_duplicated_instrumentation(self): hasattr(method, "_opentelemetry_ext_asyncpg_applied") ) + def test_duplicated_instrumentation_works(self): + first = AsyncPGInstrumentor() + first.instrument() + second = AsyncPGInstrumentor() + second.instrument() + self.assertIsNotNone(first._tracer) + self.assertIsNotNone(second._tracer) + def test_duplicated_uninstrumentation(self): AsyncPGInstrumentor().instrument() AsyncPGInstrumentor().uninstrument()
Instantiating AsyncPGInstrumentor after .instrument has been called causes tracing to fail **Describe your environment** Python 3.11 on a M2 Mac. **Steps to reproduce** Run the following code (with `python -m asyncio` to allow top-level `async`/`await`): ``` import asyncpg from opentelemetry.instrumentation.asyncpg import AsyncPGInstrumentor from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter from opentelemetry.trace import set_tracer_provider provider = TracerProvider() processor = BatchSpanProcessor(ConsoleSpanExporter()) provider.add_span_processor(processor) set_tracer_provider(provider) dsn = "postgres://postgres:password@localhost:54320/postgres" AsyncPGInstrumentor().instrument() AsyncPGInstrumentor() connection = await asyncpg.connect(dsn) await connection.execute("SELECT 1") ``` **What is the expected behavior?** The SQL query runs successfully and a span is exported to the console. **What is the actual behavior?** What did you see instead? ``` AttributeError: 'NoneType' object has no attribute 'start_as_current_span' ``` **Additional context** Each instantiation of `AsyncPGInstrumentor` runs `__init__`, which sets `self._tracer` to `None`. However, `BaseInstrumentor` overrides `__new__` to implement the singleton pattern, so only one instance of `AsyncPGInstrumentor` is ever created. Instantiating `AsyncPGInstrumentor` after `instrument` has been called (which sets `self._tracer`) therefore sets `self._tracer` back to `None`, which is a state inconsistent with `_is_instrumented_by_opentelemetry` (which will still be `True`). A simple solution is to remove the line `self._tracer = None`.
2023-05-09T02:58:10
open-telemetry/opentelemetry-python-contrib
1,823
open-telemetry__opentelemetry-python-contrib-1823
[ "1822" ]
dadcd01524449ddee07a8d8405890a60caeb8c8e
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/_load.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/_load.py new file mode 100644 --- /dev/null +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/_load.py @@ -0,0 +1,124 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logging import getLogger +from os import environ + +from pkg_resources import iter_entry_points + +from opentelemetry.instrumentation.dependencies import ( + get_dist_dependency_conflicts, +) +from opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro +from opentelemetry.instrumentation.environment_variables import ( + OTEL_PYTHON_CONFIGURATOR, + OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, + OTEL_PYTHON_DISTRO, +) +from opentelemetry.instrumentation.version import __version__ + +_logger = getLogger(__name__) + + +def _load_distro() -> BaseDistro: + distro_name = environ.get(OTEL_PYTHON_DISTRO, None) + for entry_point in iter_entry_points("opentelemetry_distro"): + try: + # If no distro is specified, use first to come up. + if distro_name is None or distro_name == entry_point.name: + distro = entry_point.load()() + if not isinstance(distro, BaseDistro): + _logger.debug( + "%s is not an OpenTelemetry Distro. Skipping", + entry_point.name, + ) + continue + _logger.debug( + "Distribution %s will be configured", entry_point.name + ) + return distro + except Exception as exc: # pylint: disable=broad-except + _logger.exception( + "Distribution %s configuration failed", entry_point.name + ) + raise exc + return DefaultDistro() + + +def _load_instrumentors(distro): + package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, []) + if isinstance(package_to_exclude, str): + package_to_exclude = package_to_exclude.split(",") + # to handle users entering "requests , flask" or "requests, flask" with spaces + package_to_exclude = [x.strip() for x in package_to_exclude] + + for entry_point in iter_entry_points("opentelemetry_pre_instrument"): + entry_point.load()() + + for entry_point in iter_entry_points("opentelemetry_instrumentor"): + if entry_point.name in package_to_exclude: + _logger.debug( + "Instrumentation skipped for library %s", entry_point.name + ) + continue + + try: + conflict = get_dist_dependency_conflicts(entry_point.dist) + if conflict: + _logger.debug( + "Skipping instrumentation %s: %s", + entry_point.name, + conflict, + ) + continue + + # tell instrumentation to not run dep checks again as we already did it above + distro.load_instrumentor(entry_point, skip_dep_check=True) + _logger.debug("Instrumented %s", entry_point.name) + except Exception as exc: # pylint: disable=broad-except + _logger.exception("Instrumenting of %s failed", entry_point.name) + raise exc + + for entry_point in iter_entry_points("opentelemetry_post_instrument"): + entry_point.load()() + + +def _load_configurators(): + configurator_name = environ.get(OTEL_PYTHON_CONFIGURATOR, None) + configured = None + for entry_point in iter_entry_points("opentelemetry_configurator"): + if configured is not None: + _logger.warning( + "Configuration of %s not loaded, %s already loaded", + entry_point.name, + configured, + ) + continue + try: + if ( + configurator_name is None + or configurator_name == entry_point.name + ): + entry_point.load()().configure(auto_instrumentation_version=__version__) # type: ignore + configured = entry_point.name + else: + _logger.warning( + "Configuration of %s not loaded because %s is set by %s", + entry_point.name, + configurator_name, + OTEL_PYTHON_CONFIGURATOR, + ) + except Exception as exc: # pylint: disable=broad-except + _logger.exception("Configuration of %s failed", entry_point.name) + raise exc diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py @@ -16,99 +16,16 @@ from os import environ from os.path import abspath, dirname, pathsep -from pkg_resources import iter_entry_points - -from opentelemetry.instrumentation.dependencies import ( - get_dist_dependency_conflicts, -) -from opentelemetry.instrumentation.distro import BaseDistro, DefaultDistro -from opentelemetry.instrumentation.environment_variables import ( - OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, +from opentelemetry.instrumentation.auto_instrumentation._load import ( + _load_configurators, + _load_distro, + _load_instrumentors, ) from opentelemetry.instrumentation.utils import _python_path_without_directory -from opentelemetry.instrumentation.version import __version__ logger = getLogger(__name__) -def _load_distros() -> BaseDistro: - for entry_point in iter_entry_points("opentelemetry_distro"): - try: - distro = entry_point.load()() - if not isinstance(distro, BaseDistro): - logger.debug( - "%s is not an OpenTelemetry Distro. Skipping", - entry_point.name, - ) - continue - logger.debug( - "Distribution %s will be configured", entry_point.name - ) - return distro - except Exception as exc: # pylint: disable=broad-except - logger.exception( - "Distribution %s configuration failed", entry_point.name - ) - raise exc - return DefaultDistro() - - -def _load_instrumentors(distro): - package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, []) - if isinstance(package_to_exclude, str): - package_to_exclude = package_to_exclude.split(",") - # to handle users entering "requests , flask" or "requests, flask" with spaces - package_to_exclude = [x.strip() for x in package_to_exclude] - - for entry_point in iter_entry_points("opentelemetry_pre_instrument"): - entry_point.load()() - - for entry_point in iter_entry_points("opentelemetry_instrumentor"): - if entry_point.name in package_to_exclude: - logger.debug( - "Instrumentation skipped for library %s", entry_point.name - ) - continue - - try: - conflict = get_dist_dependency_conflicts(entry_point.dist) - if conflict: - logger.debug( - "Skipping instrumentation %s: %s", - entry_point.name, - conflict, - ) - continue - - # tell instrumentation to not run dep checks again as we already did it above - distro.load_instrumentor(entry_point, skip_dep_check=True) - logger.debug("Instrumented %s", entry_point.name) - except Exception as exc: # pylint: disable=broad-except - logger.exception("Instrumenting of %s failed", entry_point.name) - raise exc - - for entry_point in iter_entry_points("opentelemetry_post_instrument"): - entry_point.load()() - - -def _load_configurators(): - configured = None - for entry_point in iter_entry_points("opentelemetry_configurator"): - if configured is not None: - logger.warning( - "Configuration of %s not loaded, %s already loaded", - entry_point.name, - configured, - ) - continue - try: - entry_point.load()().configure(auto_instrumentation_version=__version__) # type: ignore - configured = entry_point.name - except Exception as exc: # pylint: disable=broad-except - logger.exception("Configuration of %s failed", entry_point.name) - raise exc - - def initialize(): # prevents auto-instrumentation of subprocesses if code execs another python process environ["PYTHONPATH"] = _python_path_without_directory( @@ -116,7 +33,7 @@ def initialize(): ) try: - distro = _load_distros() + distro = _load_distro() distro.configure() _load_configurators() _load_instrumentors(distro) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/environment_variables.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/environment_variables.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/environment_variables.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/environment_variables.py @@ -16,3 +16,13 @@ """ .. envvar:: OTEL_PYTHON_DISABLED_INSTRUMENTATIONS """ + +OTEL_PYTHON_DISTRO = "OTEL_PYTHON_DISTRO" +""" +.. envvar:: OTEL_PYTHON_DISTRO +""" + +OTEL_PYTHON_CONFIGURATOR = "OTEL_PYTHON_CONFIGURATOR" +""" +.. envvar:: OTEL_PYTHON_CONFIGURATOR +"""
diff --git a/opentelemetry-instrumentation/tests/auto_instrumentation/test_load.py b/opentelemetry-instrumentation/tests/auto_instrumentation/test_load.py new file mode 100644 --- /dev/null +++ b/opentelemetry-instrumentation/tests/auto_instrumentation/test_load.py @@ -0,0 +1,312 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# type: ignore + +from unittest import TestCase +from unittest.mock import Mock, call, patch + +from opentelemetry.instrumentation.auto_instrumentation import _load +from opentelemetry.instrumentation.environment_variables import ( + OTEL_PYTHON_CONFIGURATOR, + OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, + OTEL_PYTHON_DISTRO, +) +from opentelemetry.instrumentation.version import __version__ + + +class TestLoad(TestCase): + @patch.dict( + "os.environ", {OTEL_PYTHON_CONFIGURATOR: "custom_configurator2"} + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.iter_entry_points" + ) + def test_load_configurators(self, iter_mock): + # Add multiple entry points but only specify the 2nd in the environment variable. + ep_mock1 = Mock() + ep_mock1.name = "custom_configurator1" + configurator_mock1 = Mock() + ep_mock1.load.return_value = configurator_mock1 + ep_mock2 = Mock() + ep_mock2.name = "custom_configurator2" + configurator_mock2 = Mock() + ep_mock2.load.return_value = configurator_mock2 + ep_mock3 = Mock() + ep_mock3.name = "custom_configurator3" + configurator_mock3 = Mock() + ep_mock3.load.return_value = configurator_mock3 + + iter_mock.return_value = (ep_mock1, ep_mock2, ep_mock3) + _load._load_configurators() + configurator_mock1.assert_not_called() + configurator_mock2().configure.assert_called_once_with( + auto_instrumentation_version=__version__ + ) + configurator_mock3.assert_not_called() + + @patch.dict( + "os.environ", {OTEL_PYTHON_CONFIGURATOR: "custom_configurator2"} + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.iter_entry_points" + ) + def test_load_configurators_no_ep( + self, + iter_mock, + ): + iter_mock.return_value = () + # Confirm method does not crash if not entry points exist. + _load._load_configurators() + + @patch.dict( + "os.environ", {OTEL_PYTHON_CONFIGURATOR: "custom_configurator2"} + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.iter_entry_points" + ) + def test_load_configurators_error(self, iter_mock): + # Add multiple entry points but only specify the 2nd in the environment variable. + ep_mock1 = Mock() + ep_mock1.name = "custom_configurator1" + configurator_mock1 = Mock() + ep_mock1.load.return_value = configurator_mock1 + ep_mock2 = Mock() + ep_mock2.name = "custom_configurator2" + configurator_mock2 = Mock() + configurator_mock2().configure.side_effect = Exception() + ep_mock2.load.return_value = configurator_mock2 + ep_mock3 = Mock() + ep_mock3.name = "custom_configurator3" + configurator_mock3 = Mock() + ep_mock3.load.return_value = configurator_mock3 + + iter_mock.return_value = (ep_mock1, ep_mock2, ep_mock3) + # Confirm failed configuration raises exception. + self.assertRaises(Exception, _load._load_configurators) + + @patch.dict("os.environ", {OTEL_PYTHON_DISTRO: "custom_distro2"}) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.isinstance" + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.iter_entry_points" + ) + def test_load_distro(self, iter_mock, isinstance_mock): + # Add multiple entry points but only specify the 2nd in the environment variable. + ep_mock1 = Mock() + ep_mock1.name = "custom_distro1" + distro_mock1 = Mock() + ep_mock1.load.return_value = distro_mock1 + ep_mock2 = Mock() + ep_mock2.name = "custom_distro2" + distro_mock2 = Mock() + ep_mock2.load.return_value = distro_mock2 + ep_mock3 = Mock() + ep_mock3.name = "custom_distro3" + distro_mock3 = Mock() + ep_mock3.load.return_value = distro_mock3 + + iter_mock.return_value = (ep_mock1, ep_mock2, ep_mock3) + # Mock entry points to be instances of BaseDistro. + isinstance_mock.return_value = True + self.assertEqual( + _load._load_distro(), + distro_mock2(), + ) + + @patch.dict("os.environ", {OTEL_PYTHON_DISTRO: "custom_distro2"}) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.isinstance" + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.DefaultDistro" + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.iter_entry_points" + ) + def test_load_distro_not_distro( + self, iter_mock, default_distro_mock, isinstance_mock + ): + # Add multiple entry points but only specify the 2nd in the environment variable. + ep_mock1 = Mock() + ep_mock1.name = "custom_distro1" + distro_mock1 = Mock() + ep_mock1.load.return_value = distro_mock1 + ep_mock2 = Mock() + ep_mock2.name = "custom_distro2" + distro_mock2 = Mock() + ep_mock2.load.return_value = distro_mock2 + ep_mock3 = Mock() + ep_mock3.name = "custom_distro3" + distro_mock3 = Mock() + ep_mock3.load.return_value = distro_mock3 + + iter_mock.return_value = (ep_mock1, ep_mock2, ep_mock3) + # Confirm default distro is used if specified entry point is not a BaseDistro + isinstance_mock.return_value = False + self.assertEqual( + _load._load_distro(), + default_distro_mock(), + ) + + @patch.dict("os.environ", {OTEL_PYTHON_DISTRO: "custom_distro2"}) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.DefaultDistro" + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.iter_entry_points" + ) + def test_load_distro_no_ep(self, iter_mock, default_distro_mock): + iter_mock.return_value = () + # Confirm default distro is used if there are no entry points. + self.assertEqual( + _load._load_distro(), + default_distro_mock(), + ) + + @patch.dict("os.environ", {OTEL_PYTHON_DISTRO: "custom_distro2"}) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.isinstance" + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.iter_entry_points" + ) + def test_load_distro_error(self, iter_mock, isinstance_mock): + ep_mock1 = Mock() + ep_mock1.name = "custom_distro1" + distro_mock1 = Mock() + ep_mock1.load.return_value = distro_mock1 + ep_mock2 = Mock() + ep_mock2.name = "custom_distro2" + distro_mock2 = Mock() + distro_mock2.side_effect = Exception() + ep_mock2.load.return_value = distro_mock2 + ep_mock3 = Mock() + ep_mock3.name = "custom_distro3" + distro_mock3 = Mock() + ep_mock3.load.return_value = distro_mock3 + + iter_mock.return_value = (ep_mock1, ep_mock2, ep_mock3) + isinstance_mock.return_value = True + # Confirm method raises exception if it fails to load a distro. + self.assertRaises(Exception, _load._load_distro) + + @patch.dict( + "os.environ", + {OTEL_PYTHON_DISABLED_INSTRUMENTATIONS: " instr1 , instr3 "}, + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.get_dist_dependency_conflicts" + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.iter_entry_points" + ) + def test_load_instrumentors(self, iter_mock, dep_mock): + # Mock opentelemetry_pre_instrument entry points + pre_ep_mock1 = Mock() + pre_ep_mock1.name = "pre1" + pre_mock1 = Mock() + pre_ep_mock1.load.return_value = pre_mock1 + + pre_ep_mock2 = Mock() + pre_ep_mock2.name = "pre2" + pre_mock2 = Mock() + pre_ep_mock2.load.return_value = pre_mock2 + + # Mock opentelemetry_instrumentor entry points + ep_mock1 = Mock() + ep_mock1.name = "instr1" + + ep_mock2 = Mock() + ep_mock2.name = "instr2" + + ep_mock3 = Mock() + ep_mock3.name = "instr3" + + ep_mock4 = Mock() + ep_mock4.name = "instr4" + + # Mock opentelemetry_instrumentor entry points + post_ep_mock1 = Mock() + post_ep_mock1.name = "post1" + post_mock1 = Mock() + post_ep_mock1.load.return_value = post_mock1 + + post_ep_mock2 = Mock() + post_ep_mock2.name = "post2" + post_mock2 = Mock() + post_ep_mock2.load.return_value = post_mock2 + + distro_mock = Mock() + + # Mock entry points in order + iter_mock.side_effect = [ + (pre_ep_mock1, pre_ep_mock2), + (ep_mock1, ep_mock2, ep_mock3, ep_mock4), + (post_ep_mock1, post_ep_mock2), + ] + # No dependency conflict + dep_mock.return_value = None + _load._load_instrumentors(distro_mock) + # All opentelemetry_pre_instrument entry points should be loaded + pre_mock1.assert_called_once() + pre_mock2.assert_called_once() + self.assertEqual(iter_mock.call_count, 3) + # Only non-disabled instrumentations should be loaded + distro_mock.load_instrumentor.assert_has_calls( + [ + call(ep_mock2, skip_dep_check=True), + call(ep_mock4, skip_dep_check=True), + ] + ) + self.assertEqual(distro_mock.load_instrumentor.call_count, 2) + # All opentelemetry_post_instrument entry points should be loaded + post_mock1.assert_called_once() + post_mock2.assert_called_once() + + @patch.dict( + "os.environ", + {OTEL_PYTHON_DISABLED_INSTRUMENTATIONS: " instr1 , instr3 "}, + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.get_dist_dependency_conflicts" + ) + @patch( + "opentelemetry.instrumentation.auto_instrumentation._load.iter_entry_points" + ) + def test_load_instrumentors_dep_conflict(self, iter_mock, dep_mock): + ep_mock1 = Mock() + ep_mock1.name = "instr1" + + ep_mock2 = Mock() + ep_mock2.name = "instr2" + + ep_mock3 = Mock() + ep_mock3.name = "instr3" + + ep_mock4 = Mock() + ep_mock4.name = "instr4" + + distro_mock = Mock() + + iter_mock.return_value = (ep_mock1, ep_mock2, ep_mock3, ep_mock4) + # If a dependency conflict is raised, that instrumentation should not be loaded, but others still should. + dep_mock.side_effect = [None, "DependencyConflict"] + _load._load_instrumentors(distro_mock) + distro_mock.load_instrumentor.assert_has_calls( + [ + call(ep_mock2, skip_dep_check=True), + ] + ) + distro_mock.load_instrumentor.assert_called_once() diff --git a/opentelemetry-instrumentation/tests/test_run.py b/opentelemetry-instrumentation/tests/auto_instrumentation/test_run.py similarity index 100% rename from opentelemetry-instrumentation/tests/test_run.py rename to opentelemetry-instrumentation/tests/auto_instrumentation/test_run.py
Allow users to specify which distro and configurator to use in auto instrumentation **Is your feature request related to a problem?** Yes. Currently, if a user has multiple packages with distro or configurator entry points installed, they have no control over which are used for the auto instrumentation flow. **Describe the solution you'd like** Copying what we do for exporters, I am adding 2 env vars where users can specify the entry point names of the distros and configurators they want used. I added warnings for each distro and configurator not used. **Describe alternatives you've considered** I considered requiring thes environment variables to be used but instead chose to make them optional. If the env vars are not set, then existing behavior continues: the first distro and configurators found will be used. **Additional context** With the way the package is set up, this also adds the --distro and --configurator command line args. Also, we currently have not tests for the sitecustomize file. I found it tricky to implement because importing the module triggers instrumentation. Open to suggestions.
2023-05-23T22:40:30
open-telemetry/opentelemetry-python-contrib
1,824
open-telemetry__opentelemetry-python-contrib-1824
[ "1726" ]
60753e2a5528ac34aa415f1daa2fc2db53fc5eb4
diff --git a/instrumentation/opentelemetry-instrumentation-falcon/src/opentelemetry/instrumentation/falcon/__init__.py b/instrumentation/opentelemetry-instrumentation-falcon/src/opentelemetry/instrumentation/falcon/__init__.py --- a/instrumentation/opentelemetry-instrumentation-falcon/src/opentelemetry/instrumentation/falcon/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-falcon/src/opentelemetry/instrumentation/falcon/__init__.py @@ -428,7 +428,6 @@ def process_resource(self, req, resp, resource, params): resource_name = resource.__class__.__name__ span.set_attribute("falcon.resource", resource_name) - span.update_name(f"{resource_name}.on_{req.method.lower()}") def process_response( self, req, resp, resource, req_succeeded=None @@ -483,6 +482,12 @@ def process_response( response_headers = resp.headers if span.is_recording() and span.kind == trace.SpanKind.SERVER: + # Check if low-cardinality route is available as per semantic-conventions + if req.uri_template: + span.update_name(f"{req.method} {req.uri_template}") + else: + span.update_name(f"{req.method}") + custom_attributes = ( otel_wsgi.collect_custom_response_headers_attributes( response_headers.items()
diff --git a/instrumentation/opentelemetry-instrumentation-falcon/tests/app.py b/instrumentation/opentelemetry-instrumentation-falcon/tests/app.py --- a/instrumentation/opentelemetry-instrumentation-falcon/tests/app.py +++ b/instrumentation/opentelemetry-instrumentation-falcon/tests/app.py @@ -61,6 +61,13 @@ def on_get(self, _, resp): resp.set_header("my-secret-header", "my-secret-value") +class UserResource: + def on_get(self, req, resp, user_id): + # pylint: disable=no-member + resp.status = falcon.HTTP_200 + resp.body = f"Hello user {user_id}" + + def make_app(): _parsed_falcon_version = package_version.parse(falcon.__version__) if _parsed_falcon_version < package_version.parse("3.0.0"): @@ -76,4 +83,6 @@ def make_app(): app.add_route( "/test_custom_response_headers", CustomResponseHeaderResource() ) + app.add_route("/user/{user_id}", UserResource()) + return app diff --git a/instrumentation/opentelemetry-instrumentation-falcon/tests/test_falcon.py b/instrumentation/opentelemetry-instrumentation-falcon/tests/test_falcon.py --- a/instrumentation/opentelemetry-instrumentation-falcon/tests/test_falcon.py +++ b/instrumentation/opentelemetry-instrumentation-falcon/tests/test_falcon.py @@ -110,7 +110,7 @@ def _test_method(self, method): spans = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans), 1) span = spans[0] - self.assertEqual(span.name, f"HelloWorldResource.on_{method.lower()}") + self.assertEqual(span.name, f"{method} /hello") self.assertEqual(span.status.status_code, StatusCode.UNSET) self.assertEqual( span.status.description, @@ -145,7 +145,7 @@ def test_404(self): spans = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans), 1) span = spans[0] - self.assertEqual(span.name, "GET /does-not-exist") + self.assertEqual(span.name, "GET") self.assertEqual(span.status.status_code, StatusCode.UNSET) self.assertSpanHasAttributes( span, @@ -177,7 +177,7 @@ def test_500(self): spans = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans), 1) span = spans[0] - self.assertEqual(span.name, "ErrorResource.on_get") + self.assertEqual(span.name, "GET /error") self.assertFalse(span.status.is_ok) self.assertEqual(span.status.status_code, StatusCode.ERROR) self.assertEqual( @@ -206,6 +206,33 @@ def test_500(self): span.attributes[SpanAttributes.NET_PEER_IP], "127.0.0.1" ) + def test_url_template(self): + self.client().simulate_get("/user/123") + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.name, "GET /user/{user_id}") + self.assertEqual(span.status.status_code, StatusCode.UNSET) + self.assertEqual( + span.status.description, + None, + ) + self.assertSpanHasAttributes( + span, + { + SpanAttributes.HTTP_METHOD: "GET", + SpanAttributes.HTTP_SERVER_NAME: "falconframework.org", + SpanAttributes.HTTP_SCHEME: "http", + SpanAttributes.NET_HOST_PORT: 80, + SpanAttributes.HTTP_HOST: "falconframework.org", + SpanAttributes.HTTP_TARGET: "/", + SpanAttributes.NET_PEER_PORT: "65133", + SpanAttributes.HTTP_FLAVOR: "1.1", + "falcon.resource": "UserResource", + SpanAttributes.HTTP_STATUS_CODE: 200, + }, + ) + def test_uninstrument(self): self.client().simulate_get(path="/hello") spans = self.memory_exporter.get_finished_spans()
Improve Falcon OpenTelemetry Instrumentation for Hug Web Framework **Is your feature request related to a problem?** We are extensively using the [Hug](https://hugapi.github.io/hug/) web framework in our organization. Since Hug is built on top of Falcon, we have been using the `falcon-instrumentation` library to instrument our applications. However, this approach results in spans that are difficult to interpret, as shown in the screenshot below: <img width="536" alt="Screen Shot 2023-03-22 at 11 03 16" src="https://user-images.githubusercontent.com/83533845/226852876-6456db91-e9fc-481d-9fc9-c195e2c49d39.png"> **Describe the solution you'd like** We believe that better instrumentation support for Hug can be achieved through one of the following options: - Develop a dedicated `hug-instrumentation` library, which would be largely similar (about 90%) to the existing falcon-instrumentation . - Create a `hug-instrumentation` library that depends on falcon-instrumentation and overrides some functions. However, this solution might be less maintainable and less readable. - Introduce a configuration option in the `falcon-instrumentation` library that enhances its compatibility with Hug. **Additional context** I have not personally tried the Falcon framework to see how spans are named there, but I assume they are similar to those of other frameworks. I would appreciate any input from project contributors on this matter and am willing to explore the issue further. What are your thoughts on the best approach to improve OpenTelemetry instrumentation for the Hug web framework?
2023-05-24T19:28:39
open-telemetry/opentelemetry-python-contrib
1,830
open-telemetry__opentelemetry-python-contrib-1830
[ "1829" ]
e70437a36ea8d153c0fa11b9ceb575f7001ad744
diff --git a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py --- a/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-redis/src/opentelemetry/instrumentation/redis/__init__.py @@ -136,6 +136,43 @@ def _set_connection_attributes(span, conn): span.set_attribute(key, value) +def _build_span_name(instance, cmd_args): + if len(cmd_args) > 0 and cmd_args[0]: + name = cmd_args[0] + else: + name = instance.connection_pool.connection_kwargs.get("db", 0) + return name + + +def _build_span_meta_data_for_pipeline(instance): + try: + command_stack = ( + instance.command_stack + if hasattr(instance, "command_stack") + else instance._command_stack + ) + + cmds = [ + _format_command_args(c.args if hasattr(c, "args") else c[0]) + for c in command_stack + ] + resource = "\n".join(cmds) + + span_name = " ".join( + [ + (c.args[0] if hasattr(c, "args") else c[0][0]) + for c in command_stack + ] + ) + except (AttributeError, IndexError): + command_stack = [] + resource = "" + span_name = "" + + return command_stack, resource, span_name + + +# pylint: disable=R0915 def _instrument( tracer, request_hook: _RequestHookT = None, @@ -143,11 +180,8 @@ def _instrument( ): def _traced_execute_command(func, instance, args, kwargs): query = _format_command_args(args) + name = _build_span_name(instance, args) - if len(args) > 0 and args[0]: - name = args[0] - else: - name = instance.connection_pool.connection_kwargs.get("db", 0) with tracer.start_as_current_span( name, kind=trace.SpanKind.CLIENT ) as span: @@ -163,31 +197,11 @@ def _traced_execute_command(func, instance, args, kwargs): return response def _traced_execute_pipeline(func, instance, args, kwargs): - try: - command_stack = ( - instance.command_stack - if hasattr(instance, "command_stack") - else instance._command_stack - ) - - cmds = [ - _format_command_args( - c.args if hasattr(c, "args") else c[0], - ) - for c in command_stack - ] - resource = "\n".join(cmds) - - span_name = " ".join( - [ - (c.args[0] if hasattr(c, "args") else c[0][0]) - for c in command_stack - ] - ) - except (AttributeError, IndexError): - command_stack = [] - resource = "" - span_name = "" + ( + command_stack, + resource, + span_name, + ) = _build_span_meta_data_for_pipeline(instance) with tracer.start_as_current_span( span_name, kind=trace.SpanKind.CLIENT @@ -232,32 +246,72 @@ def _traced_execute_pipeline(func, instance, args, kwargs): "ClusterPipeline.execute", _traced_execute_pipeline, ) + + async def _async_traced_execute_command(func, instance, args, kwargs): + query = _format_command_args(args) + name = _build_span_name(instance, args) + + with tracer.start_as_current_span( + name, kind=trace.SpanKind.CLIENT + ) as span: + if span.is_recording(): + span.set_attribute(SpanAttributes.DB_STATEMENT, query) + _set_connection_attributes(span, instance) + span.set_attribute("db.redis.args_length", len(args)) + if callable(request_hook): + request_hook(span, instance, args, kwargs) + response = await func(*args, **kwargs) + if callable(response_hook): + response_hook(span, instance, response) + return response + + async def _async_traced_execute_pipeline(func, instance, args, kwargs): + ( + command_stack, + resource, + span_name, + ) = _build_span_meta_data_for_pipeline(instance) + + with tracer.start_as_current_span( + span_name, kind=trace.SpanKind.CLIENT + ) as span: + if span.is_recording(): + span.set_attribute(SpanAttributes.DB_STATEMENT, resource) + _set_connection_attributes(span, instance) + span.set_attribute( + "db.redis.pipeline_length", len(command_stack) + ) + response = await func(*args, **kwargs) + if callable(response_hook): + response_hook(span, instance, response) + return response + if redis.VERSION >= _REDIS_ASYNCIO_VERSION: wrap_function_wrapper( "redis.asyncio", f"{redis_class}.execute_command", - _traced_execute_command, + _async_traced_execute_command, ) wrap_function_wrapper( "redis.asyncio.client", f"{pipeline_class}.execute", - _traced_execute_pipeline, + _async_traced_execute_pipeline, ) wrap_function_wrapper( "redis.asyncio.client", f"{pipeline_class}.immediate_execute_command", - _traced_execute_command, + _async_traced_execute_command, ) if redis.VERSION >= _REDIS_ASYNCIO_CLUSTER_VERSION: wrap_function_wrapper( "redis.asyncio.cluster", "RedisCluster.execute_command", - _traced_execute_command, + _async_traced_execute_command, ) wrap_function_wrapper( "redis.asyncio.cluster", "ClusterPipeline.execute", - _traced_execute_pipeline, + _async_traced_execute_pipeline, )
diff --git a/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py b/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py --- a/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py +++ b/instrumentation/opentelemetry-instrumentation-redis/tests/test_redis.py @@ -11,9 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import asyncio from unittest import mock import redis +import redis.asyncio from opentelemetry import trace from opentelemetry.instrumentation.redis import RedisInstrumentor @@ -21,6 +23,24 @@ from opentelemetry.trace import SpanKind +class AsyncMock: + """A sufficient async mock implementation. + + Python 3.7 doesn't have an inbuilt async mock class, so this is used. + """ + + def __init__(self): + self.mock = mock.Mock() + + async def __call__(self, *args, **kwargs): + future = asyncio.Future() + future.set_result("random") + return future + + def __getattr__(self, item): + return AsyncMock() + + class TestRedis(TestBase): def setUp(self): super().setUp() @@ -87,6 +107,35 @@ def test_instrument_uninstrument(self): spans = self.memory_exporter.get_finished_spans() self.assertEqual(len(spans), 1) + def test_instrument_uninstrument_async_client_command(self): + redis_client = redis.asyncio.Redis() + + with mock.patch.object(redis_client, "connection", AsyncMock()): + asyncio.run(redis_client.get("key")) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + self.memory_exporter.clear() + + # Test uninstrument + RedisInstrumentor().uninstrument() + + with mock.patch.object(redis_client, "connection", AsyncMock()): + asyncio.run(redis_client.get("key")) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 0) + self.memory_exporter.clear() + + # Test instrument again + RedisInstrumentor().instrument() + + with mock.patch.object(redis_client, "connection", AsyncMock()): + asyncio.run(redis_client.get("key")) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + def test_response_hook(self): redis_client = redis.Redis() connection = redis.connection.Connection() diff --git a/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py b/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py --- a/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py +++ b/tests/opentelemetry-docker-tests/tests/redis/test_redis_functional.py @@ -13,6 +13,7 @@ # limitations under the License. import asyncio +from time import time_ns import redis import redis.asyncio @@ -318,6 +319,29 @@ def test_basics(self): ) self.assertEqual(span.attributes.get("db.redis.args_length"), 2) + def test_execute_command_traced_full_time(self): + """Command should be traced for coroutine execution time, not creation time.""" + coro_created_time = None + finish_time = None + + async def pipeline_simple(): + nonlocal coro_created_time + nonlocal finish_time + + # delay coroutine creation from coroutine execution + coro = self.redis_client.get("foo") + coro_created_time = time_ns() + await coro + finish_time = time_ns() + + async_call(pipeline_simple()) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertTrue(span.start_time > coro_created_time) + self.assertTrue(span.end_time < finish_time) + def test_pipeline_traced(self): async def pipeline_simple(): async with self.redis_client.pipeline( @@ -340,6 +364,35 @@ async def pipeline_simple(): ) self.assertEqual(span.attributes.get("db.redis.pipeline_length"), 3) + def test_pipeline_traced_full_time(self): + """Command should be traced for coroutine execution time, not creation time.""" + coro_created_time = None + finish_time = None + + async def pipeline_simple(): + async with self.redis_client.pipeline( + transaction=False + ) as pipeline: + nonlocal coro_created_time + nonlocal finish_time + pipeline.set("blah", 32) + pipeline.rpush("foo", "éé") + pipeline.hgetall("xxx") + + # delay coroutine creation from coroutine execution + coro = pipeline.execute() + coro_created_time = time_ns() + await coro + finish_time = time_ns() + + async_call(pipeline_simple()) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertTrue(span.start_time > coro_created_time) + self.assertTrue(span.end_time < finish_time) + def test_pipeline_immediate(self): async def pipeline_immediate(): async with self.redis_client.pipeline() as pipeline: @@ -359,6 +412,33 @@ async def pipeline_immediate(): span.attributes.get(SpanAttributes.DB_STATEMENT), "SET ? ?" ) + def test_pipeline_immediate_traced_full_time(self): + """Command should be traced for coroutine execution time, not creation time.""" + coro_created_time = None + finish_time = None + + async def pipeline_simple(): + async with self.redis_client.pipeline( + transaction=False + ) as pipeline: + nonlocal coro_created_time + nonlocal finish_time + pipeline.set("a", 1) + + # delay coroutine creation from coroutine execution + coro = pipeline.immediate_execute_command("SET", "b", 2) + coro_created_time = time_ns() + await coro + finish_time = time_ns() + + async_call(pipeline_simple()) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertTrue(span.start_time > coro_created_time) + self.assertTrue(span.end_time < finish_time) + def test_parent(self): """Ensure OpenTelemetry works with redis.""" ot_tracer = trace.get_tracer("redis_svc") @@ -408,6 +488,29 @@ def test_basics(self): ) self.assertEqual(span.attributes.get("db.redis.args_length"), 2) + def test_execute_command_traced_full_time(self): + """Command should be traced for coroutine execution time, not creation time.""" + coro_created_time = None + finish_time = None + + async def pipeline_simple(): + nonlocal coro_created_time + nonlocal finish_time + + # delay coroutine creation from coroutine execution + coro = self.redis_client.get("foo") + coro_created_time = time_ns() + await coro + finish_time = time_ns() + + async_call(pipeline_simple()) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertTrue(span.start_time > coro_created_time) + self.assertTrue(span.end_time < finish_time) + def test_pipeline_traced(self): async def pipeline_simple(): async with self.redis_client.pipeline( @@ -430,6 +533,35 @@ async def pipeline_simple(): ) self.assertEqual(span.attributes.get("db.redis.pipeline_length"), 3) + def test_pipeline_traced_full_time(self): + """Command should be traced for coroutine execution time, not creation time.""" + coro_created_time = None + finish_time = None + + async def pipeline_simple(): + async with self.redis_client.pipeline( + transaction=False + ) as pipeline: + nonlocal coro_created_time + nonlocal finish_time + pipeline.set("blah", 32) + pipeline.rpush("foo", "éé") + pipeline.hgetall("xxx") + + # delay coroutine creation from coroutine execution + coro = pipeline.execute() + coro_created_time = time_ns() + await coro + finish_time = time_ns() + + async_call(pipeline_simple()) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertTrue(span.start_time > coro_created_time) + self.assertTrue(span.end_time < finish_time) + def test_parent(self): """Ensure OpenTelemetry works with redis.""" ot_tracer = trace.get_tracer("redis_svc")
Redis asyncio clients not correctly traced Currently the redis asyncio clients (Redis, Pipeline etc) are not being traced correctly. Every trace is a tiny fragment of the actual time, occurring around the start time of the redis command being called. I narrowed this down to the instrumentor for redis not wrapping the async variants as expected (it uses the same sync approach that it uses for the sync redis - and as a result the span only traces the coroutine creation, but not the coroutine actually being awaited). The result is a very short trace (< 1ms) at the start of the redis call that doesn't actually show the full time of the redis command. I have fixed this in a follow up PR, which I will put up shortly and link. **Describe your environment** Python version: 3.11 redis-py version: 4.5.5 opentelemetry-instrumentation-redis version: current master or 0.39b0 OS: Ubuntu **Steps to reproduce** Here is an example script, which calls an async redis command, and an async redis pipeline. For each async call: - A parent span is started - asyncio.sleep for 50 milliseconds (to create a time delta between the parent span and the actual redis span for visual clarity) - call a redis command (debug-sleep) for 100 milliseconds The data can then be visualised and the parent span compared to the redis-instrumentor generated span to see that the span doesn't encompass the full time of the command. ``` import asyncio from opentelemetry.instrumentation.redis import RedisInstrumentor from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter from redis.asyncio import Redis tracer_provider = TracerProvider( resource=Resource.create({ "service.name": "my-app", "service.instance.id": "my-app-4", }), ) tracer = tracer_provider.get_tracer(__name__) async def call_redis_command() -> None: redis_pool = Redis(host="0.0.0.0", port=6379) with tracer.start_as_current_span("redis-command-wrapper"): await asyncio.sleep(0.05) await redis_pool.execute_command("DEBUG", "SLEEP", "0.1") async def call_redis_pipeline() -> None: redis_pool = Redis(host="0.0.0.0", port=6379) with tracer.start_as_current_span("redis-pipeline-wrapper"): await asyncio.sleep(0.05) p = await redis_pool.pipeline() p.execute_command("DEBUG", "SLEEP", "0.1") await p.execute() exporter = ConsoleSpanExporter() span_processor = BatchSpanProcessor(span_exporter=exporter) tracer_provider.add_span_processor(span_processor) RedisInstrumentor().instrument(tracer_provider=tracer_provider) asyncio.run(call_redis_command()) asyncio.run(call_redis_pipeline()) ``` Below I have used jaeger to visualise the traces for ease of reading **What is the expected behavior?** Expecting to see 100 milliseconds of a redis command, wrapped by the 150 milliseconds parent span ![image](https://github.com/open-telemetry/opentelemetry-python-contrib/assets/66319645/2e822246-350e-4a8a-8731-941fbc83ece4) (This is a screenshot from when I fix the code) **What is the actual behavior?** Actually seeing a really tiny redis command span (< 0.2ms) wrapped by the 150millisecond parent span ![image](https://github.com/open-telemetry/opentelemetry-python-contrib/assets/66319645/a7fe29e6-2fbe-4b1b-ba65-4fb9b1457f73) (This is a screenshot from the results of the example provided script)
2023-05-30T21:19:18
open-telemetry/opentelemetry-python-contrib
1,833
open-telemetry__opentelemetry-python-contrib-1833
[ "1832" ]
ffc9334dd73d1247f28db92a143ec7cc91624995
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py --- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py @@ -250,24 +250,30 @@ def _start_span( # * ipv4:127.0.0.1:57284 # * ipv4:10.2.1.1:57284,127.0.0.1:57284 # - try: - ip, port = ( - context.peer().split(",")[0].split(":", 1)[1].rsplit(":", 1) - ) - ip = unquote(ip) - attributes.update( - { - SpanAttributes.NET_PEER_IP: ip, - SpanAttributes.NET_PEER_PORT: port, - } - ) + if context.peer() != "unix:": + try: + ip, port = ( + context.peer() + .split(",")[0] + .split(":", 1)[1] + .rsplit(":", 1) + ) + ip = unquote(ip) + attributes.update( + { + SpanAttributes.NET_PEER_IP: ip, + SpanAttributes.NET_PEER_PORT: port, + } + ) - # other telemetry sources add this, so we will too - if ip in ("[::1]", "127.0.0.1"): - attributes[SpanAttributes.NET_PEER_NAME] = "localhost" + # other telemetry sources add this, so we will too + if ip in ("[::1]", "127.0.0.1"): + attributes[SpanAttributes.NET_PEER_NAME] = "localhost" - except IndexError: - logger.warning("Failed to parse peer address '%s'", context.peer()) + except IndexError: + logger.warning( + "Failed to parse peer address '%s'", context.peer() + ) return self._tracer.start_as_current_span( name=handler_call_details.method,
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_server_interceptor.py b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_server_interceptor.py --- a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_server_interceptor.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_server_interceptor.py @@ -15,6 +15,8 @@ # pylint:disable=unused-argument # pylint:disable=no-self-use +import contextlib +import tempfile import threading from concurrent import futures @@ -78,23 +80,32 @@ def ServerStreamingMethod(self, request, context): class TestOpenTelemetryServerInterceptor(TestBase): - def test_instrumentor(self): - def handler(request, context): - return b"" - - grpc_server_instrumentor = GrpcInstrumentorServer() - grpc_server_instrumentor.instrument() - with futures.ThreadPoolExecutor(max_workers=1) as executor: + net_peer_span_attributes = { + SpanAttributes.NET_PEER_IP: "[::1]", + SpanAttributes.NET_PEER_NAME: "localhost", + } + + @contextlib.contextmanager + def server(self, max_workers=1, interceptors=None): + with futures.ThreadPoolExecutor(max_workers=max_workers) as executor: server = grpc.server( executor, options=(("grpc.so_reuseport", 0),), + interceptors=interceptors or [], ) - server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler),)) - port = server.add_insecure_port("[::]:0") channel = grpc.insecure_channel(f"localhost:{port:d}") + yield server, channel + + def test_instrumentor(self): + def handler(request, context): + return b"" + grpc_server_instrumentor = GrpcInstrumentorServer() + grpc_server_instrumentor.instrument() + with self.server(max_workers=1) as (server, channel): + server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler),)) rpc_call = "TestServicer/handler" try: server.start() @@ -117,8 +128,7 @@ def handler(request, context): self.assertSpanHasAttributes( span, { - SpanAttributes.NET_PEER_IP: "[::1]", - SpanAttributes.NET_PEER_NAME: "localhost", + **self.net_peer_span_attributes, SpanAttributes.RPC_METHOD: "handler", SpanAttributes.RPC_SERVICE: "TestServicer", SpanAttributes.RPC_SYSTEM: "grpc", @@ -137,17 +147,8 @@ def handler(request, context): grpc_server_instrumentor = GrpcInstrumentorServer() grpc_server_instrumentor.instrument() grpc_server_instrumentor.uninstrument() - with futures.ThreadPoolExecutor(max_workers=1) as executor: - server = grpc.server( - executor, - options=(("grpc.so_reuseport", 0),), - ) - + with self.server(max_workers=1) as (server, channel): server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler),)) - - port = server.add_insecure_port("[::]:0") - channel = grpc.insecure_channel(f"localhost:{port:d}") - rpc_call = "TestServicer/test" try: server.start() @@ -164,15 +165,11 @@ def test_create_span(self): # Intercept gRPC calls... interceptor = server_interceptor() - with futures.ThreadPoolExecutor(max_workers=1) as executor: - server = grpc.server( - executor, - options=(("grpc.so_reuseport", 0),), - interceptors=[interceptor], - ) + with self.server( + max_workers=1, + interceptors=[interceptor], + ) as (server, channel): add_GRPCTestServerServicer_to_server(Servicer(), server) - port = server.add_insecure_port("[::]:0") - channel = grpc.insecure_channel(f"localhost:{port:d}") rpc_call = "/GRPCTestServer/SimpleMethod" request = Request(client_id=1, request_data="test") @@ -199,8 +196,7 @@ def test_create_span(self): self.assertSpanHasAttributes( span, { - SpanAttributes.NET_PEER_IP: "[::1]", - SpanAttributes.NET_PEER_NAME: "localhost", + **self.net_peer_span_attributes, SpanAttributes.RPC_METHOD: "SimpleMethod", SpanAttributes.RPC_SERVICE: "GRPCTestServer", SpanAttributes.RPC_SYSTEM: "grpc", @@ -231,15 +227,11 @@ def SimpleMethod(self, request, context): interceptor = server_interceptor() # setup the server - with futures.ThreadPoolExecutor(max_workers=1) as executor: - server = grpc.server( - executor, - options=(("grpc.so_reuseport", 0),), - interceptors=[interceptor], - ) + with self.server( + max_workers=1, + interceptors=[interceptor], + ) as (server, channel): add_GRPCTestServerServicer_to_server(TwoSpanServicer(), server) - port = server.add_insecure_port("[::]:0") - channel = grpc.insecure_channel(f"localhost:{port:d}") # setup the RPC rpc_call = "/GRPCTestServer/SimpleMethod" @@ -268,8 +260,7 @@ def SimpleMethod(self, request, context): self.assertSpanHasAttributes( parent_span, { - SpanAttributes.NET_PEER_IP: "[::1]", - SpanAttributes.NET_PEER_NAME: "localhost", + **self.net_peer_span_attributes, SpanAttributes.RPC_METHOD: "SimpleMethod", SpanAttributes.RPC_SERVICE: "GRPCTestServer", SpanAttributes.RPC_SYSTEM: "grpc", @@ -292,15 +283,11 @@ def test_create_span_streaming(self): # Intercept gRPC calls... interceptor = server_interceptor() - with futures.ThreadPoolExecutor(max_workers=1) as executor: - server = grpc.server( - executor, - options=(("grpc.so_reuseport", 0),), - interceptors=[interceptor], - ) + with self.server( + max_workers=1, + interceptors=[interceptor], + ) as (server, channel): add_GRPCTestServerServicer_to_server(Servicer(), server) - port = server.add_insecure_port("[::]:0") - channel = grpc.insecure_channel(f"localhost:{port:d}") # setup the RPC rpc_call = "/GRPCTestServer/ServerStreamingMethod" @@ -328,8 +315,7 @@ def test_create_span_streaming(self): self.assertSpanHasAttributes( span, { - SpanAttributes.NET_PEER_IP: "[::1]", - SpanAttributes.NET_PEER_NAME: "localhost", + **self.net_peer_span_attributes, SpanAttributes.RPC_METHOD: "ServerStreamingMethod", SpanAttributes.RPC_SERVICE: "GRPCTestServer", SpanAttributes.RPC_SYSTEM: "grpc", @@ -360,15 +346,11 @@ def ServerStreamingMethod(self, request, context): # Intercept gRPC calls... interceptor = server_interceptor() - with futures.ThreadPoolExecutor(max_workers=1) as executor: - server = grpc.server( - executor, - options=(("grpc.so_reuseport", 0),), - interceptors=[interceptor], - ) + with self.server( + max_workers=1, + interceptors=[interceptor], + ) as (server, channel): add_GRPCTestServerServicer_to_server(TwoSpanServicer(), server) - port = server.add_insecure_port("[::]:0") - channel = grpc.insecure_channel(f"localhost:{port:d}") # setup the RPC rpc_call = "/GRPCTestServer/ServerStreamingMethod" @@ -397,8 +379,7 @@ def ServerStreamingMethod(self, request, context): self.assertSpanHasAttributes( parent_span, { - SpanAttributes.NET_PEER_IP: "[::1]", - SpanAttributes.NET_PEER_NAME: "localhost", + **self.net_peer_span_attributes, SpanAttributes.RPC_METHOD: "ServerStreamingMethod", SpanAttributes.RPC_SERVICE: "GRPCTestServer", SpanAttributes.RPC_SYSTEM: "grpc", @@ -427,17 +408,12 @@ def handler(request, context): active_span_in_handler = trace.get_current_span() return b"" - with futures.ThreadPoolExecutor(max_workers=1) as executor: - server = grpc.server( - executor, - options=(("grpc.so_reuseport", 0),), - interceptors=[interceptor], - ) + with self.server( + max_workers=1, + interceptors=[interceptor], + ) as (server, channel): server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler),)) - port = server.add_insecure_port("[::]:0") - channel = grpc.insecure_channel(f"localhost:{port:d}") - active_span_before_call = trace.get_current_span() try: server.start() @@ -463,17 +439,12 @@ def handler(request, context): active_spans_in_handler.append(trace.get_current_span()) return b"" - with futures.ThreadPoolExecutor(max_workers=1) as executor: - server = grpc.server( - executor, - options=(("grpc.so_reuseport", 0),), - interceptors=[interceptor], - ) + with self.server( + max_workers=1, + interceptors=[interceptor], + ) as (server, channel): server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler),)) - port = server.add_insecure_port("[::]:0") - channel = grpc.insecure_channel(f"localhost:{port:d}") - try: server.start() channel.unary_unary("TestServicer/handler")(b"") @@ -496,8 +467,7 @@ def handler(request, context): self.assertSpanHasAttributes( span, { - SpanAttributes.NET_PEER_IP: "[::1]", - SpanAttributes.NET_PEER_NAME: "localhost", + **self.net_peer_span_attributes, SpanAttributes.RPC_METHOD: "handler", SpanAttributes.RPC_SERVICE: "TestServicer", SpanAttributes.RPC_SYSTEM: "grpc", @@ -527,17 +497,12 @@ def handler(request, context): active_spans_in_handler.append(trace.get_current_span()) return b"" - with futures.ThreadPoolExecutor(max_workers=2) as executor: - server = grpc.server( - executor, - options=(("grpc.so_reuseport", 0),), - interceptors=[interceptor], - ) + with self.server( + max_workers=2, + interceptors=[interceptor], + ) as (server, channel): server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler),)) - port = server.add_insecure_port("[::]:0") - channel = grpc.insecure_channel(f"localhost:{port:d}") - try: server.start() # Interleave calls so spans are active on each thread at the same @@ -568,8 +533,7 @@ def handler(request, context): self.assertSpanHasAttributes( span, { - SpanAttributes.NET_PEER_IP: "[::1]", - SpanAttributes.NET_PEER_NAME: "localhost", + **self.net_peer_span_attributes, SpanAttributes.RPC_METHOD: "handler", SpanAttributes.RPC_SERVICE: "TestServicer", SpanAttributes.RPC_SYSTEM: "grpc", @@ -592,18 +556,11 @@ def test_abort(self): def handler(request, context): context.abort(grpc.StatusCode.FAILED_PRECONDITION, failure_message) - with futures.ThreadPoolExecutor(max_workers=1) as executor: - server = grpc.server( - executor, - options=(("grpc.so_reuseport", 0),), - interceptors=[interceptor], - ) - + with self.server( + max_workers=1, + interceptors=[interceptor], + ) as (server, channel): server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler),)) - - port = server.add_insecure_port("[::]:0") - channel = grpc.insecure_channel(f"localhost:{port:d}") - rpc_call = "TestServicer/handler" server.start() @@ -635,8 +592,7 @@ def handler(request, context): self.assertSpanHasAttributes( span, { - SpanAttributes.NET_PEER_IP: "[::1]", - SpanAttributes.NET_PEER_NAME: "localhost", + **self.net_peer_span_attributes, SpanAttributes.RPC_METHOD: "handler", SpanAttributes.RPC_SERVICE: "TestServicer", SpanAttributes.RPC_SYSTEM: "grpc", @@ -647,6 +603,28 @@ def handler(request, context): ) +class TestOpenTelemetryServerInterceptorUnix( + TestOpenTelemetryServerInterceptor, +): + net_peer_span_attributes = {} + + @contextlib.contextmanager + def server(self, max_workers=1, interceptors=None): + with futures.ThreadPoolExecutor( + max_workers=max_workers + ) as executor, tempfile.TemporaryDirectory() as tmp: + server = grpc.server( + executor, + options=(("grpc.so_reuseport", 0),), + interceptors=interceptors or [], + ) + + sock = f"unix://{tmp}/grpc.sock" + server.add_insecure_port(sock) + channel = grpc.insecure_channel(sock) + yield server, channel + + def get_latch(num): """Get a countdown latch function for use in n threads.""" cv = threading.Condition()
gRPC server throws ValueError for Unix sockets **Describe your environment** ``` Ubuntu 20.04 x86_64 Python 3.10.9 ``` **Steps to reproduce** I'll post a PR that addresses the issue with a test that reproduces the problem shortly. This problem can be triggered by running the `grpc` instrumentation and utilizing Unix sockets for connections. ```py server.add_insecure_port("unix:///tmp/grpc.sock") channel = grpc.insecure_channel("unix:///tmp/grpc.sock") ``` **What is the expected behavior?** The `grpc` instrumentation should collect traces when running on a Unix socket. **What is the actual behavior?** A `ValueError` is thrown here: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py#L254-L256 `context.peer()` is `unix:` which results in `ip, port = [""]`.
2023-06-01T17:18:48
open-telemetry/opentelemetry-python-contrib
1,854
open-telemetry__opentelemetry-python-contrib-1854
[ "1844" ]
bcf770d079dc7b76aa5260ebf30f1620ffe51408
diff --git a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py --- a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py @@ -25,7 +25,7 @@ import httpx from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor - url = "https://httpbin.org/get" + url = "https://some.url/get" HTTPXClientInstrumentor().instrument() with httpx.Client() as client: @@ -46,7 +46,7 @@ import httpx from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor - url = "https://httpbin.org/get" + url = "https://some.url/get" with httpx.Client(transport=telemetry_transport) as client: HTTPXClientInstrumentor.instrument_client(client) @@ -91,7 +91,7 @@ SyncOpenTelemetryTransport, ) - url = "https://httpbin.org/get" + url = "https://some.url/get" transport = httpx.HTTPTransport() telemetry_transport = SyncOpenTelemetryTransport(transport)
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py --- a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py @@ -705,11 +705,11 @@ def test_response_attributes_invalid_status_code(self): self.assertEqual(self.span.set_status.call_count, 1) def test_credential_removal(self): - self.scope["server"] = ("username:[email protected]", 80) + self.scope["server"] = ("username:password@mock", 80) self.scope["path"] = "/status/200" attrs = otel_asgi.collect_request_attributes(self.scope) self.assertEqual( - attrs[SpanAttributes.HTTP_URL], "http://httpbin.org/status/200" + attrs[SpanAttributes.HTTP_URL], "http://mock/status/200" ) def test_collect_target_attribute_missing(self): diff --git a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py --- a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py @@ -97,7 +97,7 @@ class BaseTestCases: class BaseTest(TestBase, metaclass=abc.ABCMeta): # pylint: disable=no-member - URL = "http://httpbin.org/status/200" + URL = "http://mock/status/200" response_hook = staticmethod(_response_hook) request_hook = staticmethod(_request_hook) no_update_request_hook = staticmethod(_no_update_request_hook) @@ -165,7 +165,7 @@ def test_basic_multiple(self): self.assert_span(num_spans=2) def test_not_foundbasic(self): - url_404 = "http://httpbin.org/status/404" + url_404 = "http://mock/status/404" with respx.mock: respx.get(url_404).mock(httpx.Response(404)) diff --git a/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py b/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py --- a/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py +++ b/instrumentation/opentelemetry-instrumentation-requests/tests/test_requests_integration.py @@ -63,7 +63,7 @@ class RequestsIntegrationTestBase(abc.ABC): # pylint: disable=no-member # pylint: disable=too-many-public-methods - URL = "http://httpbin.org/status/200" + URL = "http://mock/status/200" # pylint: disable=invalid-name def setUp(self): @@ -152,7 +152,7 @@ def response_hook(span, request_obj, response): self.assertEqual(span.attributes["response_hook_attr"], "value") def test_excluded_urls_explicit(self): - url_404 = "http://httpbin.org/status/404" + url_404 = "http://mock/status/404" httpretty.register_uri( httpretty.GET, url_404, @@ -194,7 +194,7 @@ def name_callback(method, url): self.assertEqual(span.name, "HTTP GET") def test_not_foundbasic(self): - url_404 = "http://httpbin.org/status/404" + url_404 = "http://mock/status/404" httpretty.register_uri( httpretty.GET, url_404, @@ -460,7 +460,7 @@ def perform_request(url: str, session: requests.Session = None): return session.get(url) def test_credential_removal(self): - new_url = "http://username:[email protected]/status/200" + new_url = "http://username:password@mock/status/200" self.perform_request(new_url) span = self.assert_span() diff --git a/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py b/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py --- a/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-urllib/tests/test_metrics_instrumentation.py @@ -27,8 +27,8 @@ class TestUrllibMetricsInstrumentation(TestBase): - URL = "http://httpbin.org/status/200" - URL_POST = "http://httpbin.org/post" + URL = "http://mock/status/200" + URL_POST = "http://mock/post" def setUp(self): super().setUp() diff --git a/instrumentation/opentelemetry-instrumentation-urllib/tests/test_urllib_integration.py b/instrumentation/opentelemetry-instrumentation-urllib/tests/test_urllib_integration.py --- a/instrumentation/opentelemetry-instrumentation-urllib/tests/test_urllib_integration.py +++ b/instrumentation/opentelemetry-instrumentation-urllib/tests/test_urllib_integration.py @@ -46,9 +46,9 @@ class RequestsIntegrationTestBase(abc.ABC): # pylint: disable=no-member - URL = "http://httpbin.org/status/200" - URL_TIMEOUT = "http://httpbin.org/timeout/0" - URL_EXCEPTION = "http://httpbin.org/exception/0" + URL = "http://mock/status/200" + URL_TIMEOUT = "http://mock/timeout/0" + URL_EXCEPTION = "http://mock/exception/0" # pylint: disable=invalid-name def setUp(self): @@ -83,7 +83,7 @@ def setUp(self): ) httpretty.register_uri( httpretty.GET, - "http://httpbin.org/status/500", + "http://mock/status/500", status=500, ) @@ -142,7 +142,7 @@ def test_basic(self): ) def test_excluded_urls_explicit(self): - url_201 = "http://httpbin.org/status/201" + url_201 = "http://mock/status/201" httpretty.register_uri( httpretty.GET, url_201, @@ -172,7 +172,7 @@ def test_excluded_urls_from_env(self): self.assert_span(num_spans=1) def test_not_foundbasic(self): - url_404 = "http://httpbin.org/status/404/" + url_404 = "http://mock/status/404/" httpretty.register_uri( httpretty.GET, url_404, @@ -336,14 +336,14 @@ def test_custom_tracer_provider(self): def test_requests_exception_with_response(self, *_, **__): with self.assertRaises(HTTPError): - self.perform_request("http://httpbin.org/status/500") + self.perform_request("http://mock/status/500") span = self.assert_span() self.assertEqual( dict(span.attributes), { SpanAttributes.HTTP_METHOD: "GET", - SpanAttributes.HTTP_URL: "http://httpbin.org/status/500", + SpanAttributes.HTTP_URL: "http://mock/status/500", SpanAttributes.HTTP_STATUS_CODE: 500, }, ) @@ -365,7 +365,7 @@ def test_requests_timeout_exception(self, *_, **__): self.assertEqual(span.status.status_code, StatusCode.ERROR) def test_credential_removal(self): - url = "http://username:[email protected]/status/200" + url = "http://username:password@mock/status/200" with self.assertRaises(Exception): self.perform_request(url) diff --git a/instrumentation/opentelemetry-instrumentation-urllib3/tests/test_urllib3_integration.py b/instrumentation/opentelemetry-instrumentation-urllib3/tests/test_urllib3_integration.py --- a/instrumentation/opentelemetry-instrumentation-urllib3/tests/test_urllib3_integration.py +++ b/instrumentation/opentelemetry-instrumentation-urllib3/tests/test_urllib3_integration.py @@ -35,8 +35,8 @@ class TestURLLib3Instrumentor(TestBase): - HTTP_URL = "http://httpbin.org/status/200" - HTTPS_URL = "https://httpbin.org/status/200" + HTTP_URL = "http://mock/status/200" + HTTPS_URL = "https://mock/status/200" def setUp(self): super().setUp() @@ -123,7 +123,7 @@ def test_basic_http_success(self): self.assert_success_span(response, self.HTTP_URL) def test_basic_http_success_using_connection_pool(self): - pool = urllib3.HTTPConnectionPool("httpbin.org") + pool = urllib3.HTTPConnectionPool("mock") response = pool.request("GET", "/status/200") self.assert_success_span(response, self.HTTP_URL) @@ -133,13 +133,13 @@ def test_basic_https_success(self): self.assert_success_span(response, self.HTTPS_URL) def test_basic_https_success_using_connection_pool(self): - pool = urllib3.HTTPSConnectionPool("httpbin.org") + pool = urllib3.HTTPSConnectionPool("mock") response = pool.request("GET", "/status/200") self.assert_success_span(response, self.HTTPS_URL) def test_basic_not_found(self): - url_404 = "http://httpbin.org/status/404" + url_404 = "http://mock/status/404" httpretty.register_uri(httpretty.GET, url_404, status=404) response = self.perform_request(url_404) @@ -152,30 +152,30 @@ def test_basic_not_found(self): self.assertIs(trace.status.StatusCode.ERROR, span.status.status_code) def test_basic_http_non_default_port(self): - url = "http://httpbin.org:666/status/200" + url = "http://mock:666/status/200" httpretty.register_uri(httpretty.GET, url, body="Hello!") response = self.perform_request(url) self.assert_success_span(response, url) def test_basic_http_absolute_url(self): - url = "http://httpbin.org:666/status/200" + url = "http://mock:666/status/200" httpretty.register_uri(httpretty.GET, url, body="Hello!") - pool = urllib3.HTTPConnectionPool("httpbin.org", port=666) + pool = urllib3.HTTPConnectionPool("mock", port=666) response = pool.request("GET", url) self.assert_success_span(response, url) def test_url_open_explicit_arg_parameters(self): - url = "http://httpbin.org:666/status/200" + url = "http://mock:666/status/200" httpretty.register_uri(httpretty.GET, url, body="Hello!") - pool = urllib3.HTTPConnectionPool("httpbin.org", port=666) + pool = urllib3.HTTPConnectionPool("mock", port=666) response = pool.urlopen(method="GET", url="/status/200") self.assert_success_span(response, url) def test_excluded_urls_explicit(self): - url_201 = "http://httpbin.org/status/201" + url_201 = "http://mock/status/201" httpretty.register_uri( httpretty.GET, url_201, @@ -301,7 +301,7 @@ def url_filter(url): self.assert_success_span(response, self.HTTP_URL) def test_credential_removal(self): - url = "http://username:[email protected]/status/200" + url = "http://username:password@mock/status/200" response = self.perform_request(url) self.assert_success_span(response, self.HTTP_URL) @@ -339,7 +339,7 @@ def request_hook(span, request, headers, body): headers = {"header1": "value1", "header2": "value2"} body = "param1=1&param2=2" - pool = urllib3.HTTPConnectionPool("httpbin.org") + pool = urllib3.HTTPConnectionPool("mock") response = pool.request( "POST", "/status/200", body=body, headers=headers ) @@ -366,7 +366,7 @@ def request_hook(span, request, headers, body): body = "param1=1&param2=2" - pool = urllib3.HTTPConnectionPool("httpbin.org") + pool = urllib3.HTTPConnectionPool("mock") response = pool.urlopen("POST", "/status/200", body) self.assertEqual(b"Hello!", response.data) diff --git a/instrumentation/opentelemetry-instrumentation-urllib3/tests/test_urllib3_metrics.py b/instrumentation/opentelemetry-instrumentation-urllib3/tests/test_urllib3_metrics.py --- a/instrumentation/opentelemetry-instrumentation-urllib3/tests/test_urllib3_metrics.py +++ b/instrumentation/opentelemetry-instrumentation-urllib3/tests/test_urllib3_metrics.py @@ -26,7 +26,7 @@ class TestURLLib3InstrumentorMetric(HttpTestBase, TestBase): - HTTP_URL = "http://httpbin.org/status/200" + HTTP_URL = "http://mock/status/200" def setUp(self): super().setUp() @@ -68,11 +68,11 @@ def test_basic_metrics(self): min_data_point=client_duration_estimated, attributes={ "http.flavor": "1.1", - "http.host": "httpbin.org", + "http.host": "mock", "http.method": "GET", "http.scheme": "http", "http.status_code": 200, - "net.peer.name": "httpbin.org", + "net.peer.name": "mock", "net.peer.port": 80, }, ) @@ -91,11 +91,11 @@ def test_basic_metrics(self): min_data_point=0, attributes={ "http.flavor": "1.1", - "http.host": "httpbin.org", + "http.host": "mock", "http.method": "GET", "http.scheme": "http", "http.status_code": 200, - "net.peer.name": "httpbin.org", + "net.peer.name": "mock", "net.peer.port": 80, }, ) @@ -116,11 +116,11 @@ def test_basic_metrics(self): min_data_point=expected_size, attributes={ "http.flavor": "1.1", - "http.host": "httpbin.org", + "http.host": "mock", "http.method": "GET", "http.scheme": "http", "http.status_code": 200, - "net.peer.name": "httpbin.org", + "net.peer.name": "mock", "net.peer.port": 80, }, ) @@ -144,11 +144,11 @@ def test_str_request_body_size_metrics(self): min_data_point=6, attributes={ "http.flavor": "1.1", - "http.host": "httpbin.org", + "http.host": "mock", "http.method": "POST", "http.scheme": "http", "http.status_code": 200, - "net.peer.name": "httpbin.org", + "net.peer.name": "mock", "net.peer.port": 80, }, ) @@ -172,11 +172,11 @@ def test_bytes_request_body_size_metrics(self): min_data_point=6, attributes={ "http.flavor": "1.1", - "http.host": "httpbin.org", + "http.host": "mock", "http.method": "POST", "http.scheme": "http", "http.status_code": 200, - "net.peer.name": "httpbin.org", + "net.peer.name": "mock", "net.peer.port": 80, }, ) @@ -201,11 +201,11 @@ def test_fields_request_body_size_metrics(self): min_data_point=expected_value, attributes={ "http.flavor": "1.1", - "http.host": "httpbin.org", + "http.host": "mock", "http.method": "POST", "http.scheme": "http", "http.status_code": 200, - "net.peer.name": "httpbin.org", + "net.peer.name": "mock", "net.peer.port": 80, }, ) @@ -229,11 +229,11 @@ def test_bytesio_request_body_size_metrics(self): min_data_point=6, attributes={ "http.flavor": "1.1", - "http.host": "httpbin.org", + "http.host": "mock", "http.method": "POST", "http.scheme": "http", "http.status_code": 200, - "net.peer.name": "httpbin.org", + "net.peer.name": "mock", "net.peer.port": 80, }, ) diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/tests/test_wsgi_middleware.py b/instrumentation/opentelemetry-instrumentation-wsgi/tests/test_wsgi_middleware.py --- a/instrumentation/opentelemetry-instrumentation-wsgi/tests/test_wsgi_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-wsgi/tests/test_wsgi_middleware.py @@ -437,10 +437,10 @@ def test_response_attributes(self): self.span.set_attribute.assert_has_calls(expected, any_order=True) def test_credential_removal(self): - self.environ["HTTP_HOST"] = "username:[email protected]" + self.environ["HTTP_HOST"] = "username:password@mock" self.environ["PATH_INFO"] = "/status/200" expected = { - SpanAttributes.HTTP_URL: "http://httpbin.com/status/200", + SpanAttributes.HTTP_URL: "http://mock/status/200", SpanAttributes.NET_HOST_PORT: 80, } self.assertGreaterEqual(
Mock httpbin for requests tests
2023-06-12T18:08:30
open-telemetry/opentelemetry-python-contrib
1,867
open-telemetry__opentelemetry-python-contrib-1867
[ "1865" ]
32ae65ed55150131a6889e6661de5684b133b5da
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py @@ -511,6 +511,11 @@ def __init__( unit="By", description="measures the size of HTTP response messages (compressed).", ) + self.server_request_size_histogram = self.meter.create_histogram( + name=MetricInstruments.HTTP_SERVER_REQUEST_SIZE, + unit="By", + description="Measures the size of HTTP request messages (compressed).", + ) self.active_requests_counter = self.meter.create_up_down_counter( name=MetricInstruments.HTTP_SERVER_ACTIVE_REQUESTS, unit="requests", @@ -603,6 +608,16 @@ async def __call__(self, scope, receive, send): self.server_response_size_histogram.record( self.content_length_header, duration_attrs ) + request_size = asgi_getter.get(scope, "content-length") + if request_size: + try: + request_size_amount = int(request_size[0]) + except ValueError: + pass + else: + self.server_request_size_histogram.record( + request_size_amount, duration_attrs + ) if token: context.detach(token)
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py --- a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py @@ -47,16 +47,19 @@ "http.server.active_requests", "http.server.duration", "http.server.response.size", + "http.server.request.size", ] _recommended_attrs = { "http.server.active_requests": _active_requests_count_attrs, "http.server.duration": _duration_attrs, "http.server.response.size": _duration_attrs, + "http.server.request.size": _duration_attrs, } async def http_app(scope, receive, send): message = await receive() + scope["headers"] = [(b"content-length", b"128")] assert scope["type"] == "http" if message.get("type") == "http.request": await send( @@ -99,6 +102,7 @@ async def error_asgi(scope, receive, send): assert isinstance(scope, dict) assert scope["type"] == "http" message = await receive() + scope["headers"] = [(b"content-length", b"128")] if message.get("type") == "http.request": try: raise ValueError @@ -592,6 +596,8 @@ def test_basic_metric_success(self): ) elif metric.name == "http.server.response.size": self.assertEqual(1024, point.sum) + elif metric.name == "http.server.request.size": + self.assertEqual(128, point.sum) elif isinstance(point, NumberDataPoint): self.assertDictEqual( expected_requests_count_attributes, @@ -630,7 +636,7 @@ async def target_asgi(scope, receive, send): expected_target, ) assertions += 1 - self.assertEqual(assertions, 2) + self.assertEqual(assertions, 3) def test_no_metric_for_websockets(self): self.scope = { diff --git a/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py b/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py --- a/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-fastapi/tests/test_fastapi_instrumentation.py @@ -45,6 +45,7 @@ "http.server.active_requests", "http.server.duration", "http.server.response.size", + "http.server.request.size", ] _recommended_attrs = { "http.server.active_requests": _active_requests_count_attrs, @@ -53,6 +54,10 @@ *_duration_attrs, SpanAttributes.HTTP_TARGET, }, + "http.server.request.size": { + *_duration_attrs, + SpanAttributes.HTTP_TARGET, + }, } @@ -251,8 +256,13 @@ def test_basic_metric_success(self): def test_basic_post_request_metric_success(self): start = default_timer() - self._client.post("/foobar") + response = self._client.post( + "/foobar", + json={"foo": "bar"}, + ) duration = max(round((default_timer() - start) * 1000), 0) + response_size = int(response.headers.get("content-length")) + request_size = int(response.request.headers.get("content-length")) metrics_list = self.memory_metrics_reader.get_metrics_data() for metric in ( metrics_list.resource_metrics[0].scope_metrics[0].metrics @@ -260,7 +270,12 @@ def test_basic_post_request_metric_success(self): for point in list(metric.data.data_points): if isinstance(point, HistogramDataPoint): self.assertEqual(point.count, 1) - self.assertAlmostEqual(duration, point.sum, delta=30) + if metric.name == "http.server.duration": + self.assertAlmostEqual(duration, point.sum, delta=30) + elif metric.name == "http.server.response.size": + self.assertEqual(response_size, point.sum) + elif metric.name == "http.server.request.size": + self.assertEqual(request_size, point.sum) if isinstance(point, NumberDataPoint): self.assertEqual(point.value, 0) diff --git a/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py b/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py --- a/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py @@ -50,11 +50,13 @@ "http.server.active_requests", "http.server.duration", "http.server.response.size", + "http.server.request.size", ] _recommended_attrs = { "http.server.active_requests": _active_requests_count_attrs, "http.server.duration": _duration_attrs, "http.server.response.size": _duration_attrs, + "http.server.request.size": _duration_attrs, } @@ -165,8 +167,13 @@ def test_basic_post_request_metric_success(self): "http.scheme": "http", "http.server_name": "testserver", } - self._client.post("/foobar") + response = self._client.post( + "/foobar", + json={"foo": "bar"}, + ) duration = max(round((default_timer() - start) * 1000), 0) + response_size = int(response.headers.get("content-length")) + request_size = int(response.request.headers.get("content-length")) metrics_list = self.memory_metrics_reader.get_metrics_data() for metric in ( metrics_list.resource_metrics[0].scope_metrics[0].metrics @@ -174,10 +181,15 @@ def test_basic_post_request_metric_success(self): for point in list(metric.data.data_points): if isinstance(point, HistogramDataPoint): self.assertEqual(point.count, 1) - self.assertAlmostEqual(duration, point.sum, delta=30) self.assertDictEqual( dict(point.attributes), expected_duration_attributes ) + if metric.name == "http.server.duration": + self.assertAlmostEqual(duration, point.sum, delta=30) + elif metric.name == "http.server.response.size": + self.assertEqual(response_size, point.sum) + elif metric.name == "http.server.request.size": + self.assertEqual(request_size, point.sum) if isinstance(point, NumberDataPoint): self.assertDictEqual( expected_requests_count_attributes,
Add http.server.request.size for ASGI metric implementation As described here: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/http-metrics.md#metric-httpserverrequestsize
@shalevr Thank you for opening this issue. Could you please assign this to me as well?
2023-06-19T22:03:24
open-telemetry/opentelemetry-python-contrib
1,870
open-telemetry__opentelemetry-python-contrib-1870
[ "1868" ]
256d8ce12d28d80586446f94e14fa59a91596230
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import json sanitized_keys = ( "message", @@ -51,6 +52,9 @@ def _unflatten_dict(d): def sanitize_body(body) -> str: + if isinstance(body, str): + body = json.loads(body) + flatten_body = _flatten_dict(body) for key in flatten_body:
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py @@ -479,3 +479,7 @@ def test_body_sanitization(self, _): sanitize_body(sanitization_queries.filter_query), str(sanitization_queries.filter_query_sanitized), ) + self.assertEqual( + sanitize_body(json.dumps(sanitization_queries.interval_query)), + str(sanitization_queries.interval_query_sanitized), + )
Elasticsearch sanitization does not work for bulk queries **Describe your environment** Discovered in elasticsearch 5.5.3 and elasticsearch-dsl 5.4.0 and caused by moving to the default sanitization in #1758. The issue is illustrated here where `body` comes in as a string, not as a dictionary: <img width="1355" alt="image" src="https://github.com/open-telemetry/opentelemetry-python-contrib/assets/684275/0f49ce7d-9558-4148-a2fd-28dc4bd70983"> This is caseud by the bulk flow specifically as the body gets translated to a string here: <img width="1174" alt="image" src="https://github.com/open-telemetry/opentelemetry-python-contrib/assets/684275/2106e7aa-2bde-4579-b562-a8dc23007a1c"> which looks like this: <img width="1144" alt="image" src="https://github.com/open-telemetry/opentelemetry-python-contrib/assets/684275/ae72d60a-5047-4559-ad05-6fc1ddb2658d"> **Steps to reproduce** I don't have a super straightforward way to reproduce other than to use the bulk API from elasticsearch. **What is the expected behavior?** What did you expect to see? **What is the actual behavior?** The below stacktrace: ``` File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/elasticsearch/helpers/__init__.py", line 95, in _process_bulk_chunk resp = client.bulk('\n'.join(bulk_actions) + '\n', **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/elasticsearch/client/utils.py", line 73, in _wrapped return func(*args, params=params, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/elasticsearch/client/__init__.py", line 1173, in bulk return self.transport.perform_request('POST', _make_path(index, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/opentelemetry/instrumentation/elasticsearch/__init__.py", line 224, in wrapper attributes[SpanAttributes.DB_STATEMENT] = sanitize_body( ^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/opentelemetry/instrumentation/elasticsearch/utils.py", line 54, in sanitize_body flatten_body = _flatten_dict(body) ^^^^^^^^^^^^^^^^^^^ File "/Users/phillip/Library/Caches/pypoetry/virtualenvs/someenv/lib/python3.11/site-packages/opentelemetry/instrumentation/elasticsearch/utils.py", line 30, in _flatten_dict for k, v in d.items(): ^^^^^^^ AttributeError: 'str' object has no attribute 'items' ``` **Additional context** Add any other context about the problem here.
Hi @phillipuniverse thanks I'll look it up
2023-06-21T12:53:34
open-telemetry/opentelemetry-python-contrib
1,889
open-telemetry__opentelemetry-python-contrib-1889
[ "1883" ]
7603a1fc69474398289c2944796249e70bba0c82
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py @@ -538,6 +538,7 @@ async def __call__(self, scope, receive, send): receive: An awaitable callable yielding dictionaries send: An awaitable callable taking a single dictionary as argument. """ + start = default_timer() if scope["type"] not in ("http", "websocket"): return await self.app(scope, receive, send) @@ -591,7 +592,6 @@ async def __call__(self, scope, receive, send): send, duration_attrs, ) - start = default_timer() await self.app(scope, otel_receive, otel_send) finally:
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py --- a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py @@ -14,6 +14,7 @@ # pylint: disable=too-many-lines +import asyncio import sys import unittest from timeit import default_timer @@ -796,5 +797,38 @@ async def wrapped_app(scope, receive, send): ) +class TestAsgiApplicationRaisingError(AsgiTestBase): + def tearDown(self): + pass + + @mock.patch( + "opentelemetry.instrumentation.asgi.collect_custom_request_headers_attributes", + side_effect=ValueError("whatever"), + ) + def test_asgi_issue_1883( + self, mock_collect_custom_request_headers_attributes + ): + """ + Test that exception UnboundLocalError local variable 'start' referenced before assignment is not raised + See https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1883 + """ + app = otel_asgi.OpenTelemetryMiddleware(simple_asgi) + self.seed_app(app) + self.send_default_request() + try: + asyncio.get_event_loop().run_until_complete( + self.communicator.stop() + ) + except ValueError as exc_info: + self.assertEqual(exc_info.args[0], "whatever") + except Exception as exc_info: # pylint: disable=W0703 + self.fail( + "expecting ValueError('whatever'), received instead: " + + str(exc_info) + ) + else: + self.fail("expecting ValueError('whatever')") + + if __name__ == "__main__": unittest.main()
[ASGI] UnboundLocalError local variable 'start' referenced before assignment This error arises when an exception occurs in [this try block](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/dadcd01524449ddee07a8d8405890a60caeb8c8e/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py#L567) before the `start = default_timer()` line. In such case the `start` variable is undefined. Unfortunately, [the `finally` block tries to compute the duration using this variable](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/dadcd01524449ddee07a8d8405890a60caeb8c8e/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py#L602). Bug instroduced with https://github.com/open-telemetry/opentelemetry-python-contrib/commit/cbf005be6fb17f35b4cbaa4e76ac30bb64b3a258 related issue : #1478
2023-07-14T16:06:53
open-telemetry/opentelemetry-python-contrib
1,920
open-telemetry__opentelemetry-python-contrib-1920
[ "1201" ]
773e431bf5706a804246cf536bee1b6bee7284f6
diff --git a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py --- a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py @@ -131,7 +131,21 @@ def response_hook(span, request, response): # status_code, headers, stream, extensions = response pass - HTTPXClientInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook) + async def async_request_hook(span, request): + # method, url, headers, stream, extensions = request + pass + + async def async_response_hook(span, request, response): + # method, url, headers, stream, extensions = request + # status_code, headers, stream, extensions = response + pass + + HTTPXClientInstrumentor().instrument( + request_hook=request_hook, + response_hook=response_hook, + async_request_hook=async_request_hook, + async_response_hook=async_response_hook + ) Or if you are using the transport classes directly: @@ -139,7 +153,7 @@ def response_hook(span, request, response): .. code-block:: python - from opentelemetry.instrumentation.httpx import SyncOpenTelemetryTransport + from opentelemetry.instrumentation.httpx import SyncOpenTelemetryTransport, AsyncOpenTelemetryTransport def request_hook(span, request): # method, url, headers, stream, extensions = request @@ -150,6 +164,15 @@ def response_hook(span, request, response): # status_code, headers, stream, extensions = response pass + async def async_request_hook(span, request): + # method, url, headers, stream, extensions = request + pass + + async def async_response_hook(span, request, response): + # method, url, headers, stream, extensions = request + # status_code, headers, stream, extensions = response + pass + transport = httpx.HTTPTransport() telemetry_transport = SyncOpenTelemetryTransport( transport, @@ -157,6 +180,13 @@ def response_hook(span, request, response): response_hook=response_hook ) + async_transport = httpx.AsyncHTTPTransport() + async_telemetry_transport = AsyncOpenTelemetryTransport( + async_transport, + request_hook=async_request_hook, + response_hook=async_response_hook + ) + API --- """ @@ -377,8 +407,8 @@ def __init__( self, transport: httpx.AsyncBaseTransport, tracer_provider: typing.Optional[TracerProvider] = None, - request_hook: typing.Optional[RequestHook] = None, - response_hook: typing.Optional[ResponseHook] = None, + request_hook: typing.Optional[AsyncRequestHook] = None, + response_hook: typing.Optional[AsyncResponseHook] = None, ): self._transport = transport self._tracer = get_tracer( @@ -511,21 +541,27 @@ def _instrument(self, **kwargs): Args: **kwargs: Optional arguments ``tracer_provider``: a TracerProvider, defaults to global - ``request_hook``: A hook that receives the span and request that is called - right after the span is created - ``response_hook``: A hook that receives the span, request, and response - that is called right before the span ends + ``request_hook``: A ``httpx.Client`` hook that receives the span and request + that is called right after the span is created + ``response_hook``: A ``httpx.Client`` hook that receives the span, request, + and response that is called right before the span ends + ``async_request_hook``: Async ``request_hook`` for ``httpx.AsyncClient`` + ``async_response_hook``: Async``response_hook`` for ``httpx.AsyncClient`` """ self._original_client = httpx.Client self._original_async_client = httpx.AsyncClient request_hook = kwargs.get("request_hook") response_hook = kwargs.get("response_hook") + async_request_hook = kwargs.get("async_request_hook", request_hook) + async_response_hook = kwargs.get("async_response_hook", response_hook) if callable(request_hook): _InstrumentedClient._request_hook = request_hook - _InstrumentedAsyncClient._request_hook = request_hook + if callable(async_request_hook): + _InstrumentedAsyncClient._request_hook = async_request_hook if callable(response_hook): _InstrumentedClient._response_hook = response_hook - _InstrumentedAsyncClient._response_hook = response_hook + if callable(async_response_hook): + _InstrumentedAsyncClient._response_hook = async_response_hook tracer_provider = kwargs.get("tracer_provider") _InstrumentedClient._tracer_provider = tracer_provider _InstrumentedAsyncClient._tracer_provider = tracer_provider @@ -546,8 +582,12 @@ def _uninstrument(self, **kwargs): def instrument_client( client: typing.Union[httpx.Client, httpx.AsyncClient], tracer_provider: TracerProvider = None, - request_hook: typing.Optional[RequestHook] = None, - response_hook: typing.Optional[ResponseHook] = None, + request_hook: typing.Union[ + typing.Optional[RequestHook], typing.Optional[AsyncRequestHook] + ] = None, + response_hook: typing.Union[ + typing.Optional[ResponseHook], typing.Optional[AsyncResponseHook] + ] = None, ) -> None: """Instrument httpx Client or AsyncClient
diff --git a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py --- a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py @@ -421,6 +421,28 @@ def test_response_hook(self): ) HTTPXClientInstrumentor().uninstrument() + def test_response_hook_sync_async_kwargs(self): + HTTPXClientInstrumentor().instrument( + tracer_provider=self.tracer_provider, + response_hook=_response_hook, + async_response_hook=_async_response_hook, + ) + client = self.create_client() + result = self.perform_request(self.URL, client=client) + + self.assertEqual(result.text, "Hello!") + span = self.assert_span() + self.assertEqual( + span.attributes, + { + SpanAttributes.HTTP_METHOD: "GET", + SpanAttributes.HTTP_URL: self.URL, + SpanAttributes.HTTP_STATUS_CODE: 200, + HTTP_RESPONSE_BODY: "Hello!", + }, + ) + HTTPXClientInstrumentor().uninstrument() + def test_request_hook(self): HTTPXClientInstrumentor().instrument( tracer_provider=self.tracer_provider, @@ -434,6 +456,20 @@ def test_request_hook(self): self.assertEqual(span.name, "GET" + self.URL) HTTPXClientInstrumentor().uninstrument() + def test_request_hook_sync_async_kwargs(self): + HTTPXClientInstrumentor().instrument( + tracer_provider=self.tracer_provider, + request_hook=_request_hook, + async_request_hook=_async_request_hook, + ) + client = self.create_client() + result = self.perform_request(self.URL, client=client) + + self.assertEqual(result.text, "Hello!") + span = self.assert_span() + self.assertEqual(span.name, "GET" + self.URL) + HTTPXClientInstrumentor().uninstrument() + def test_request_hook_no_span_update(self): HTTPXClientInstrumentor().instrument( tracer_provider=self.tracer_provider,
HTTPXClientInstrumentor mixes async and non async hooks **Describe your environment** Python 3.10.3 opentelemetry-api==1.12.0rc2 opentelemetry-sdk==1.12.0rc2 opentelemetry-instrumentation-httpx==0.32b0 **Steps to reproduce** Run this example: ``` from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor import httpx, asyncio def httpx_request_hook(span, request): print("test") HTTPXClientInstrumentor().instrument(request_hook=httpx_request_hook) # This succeeds with httpx.Client() as client: client.get("http://example.com") async def test_func(): async with httpx.AsyncClient() as client: await client.get("http://example.com") # this fails with `TypeError: object NoneType can't be used in 'await' expression` asyncio.run(test_func()) ``` **What is the expected behavior?** After instrumentation it should be possible to successfully use httpx in both sync and async contexts. **What is the actual behavior?** If a non-async hook is provided then an exception is raised when the AsyncClient is used. If an async hook is provided then it never executes when the non-async Client is used. **Additional context** It seems to me this should either allow both async and sync hooks to be provided separately. Or it should require that the hook be non-blocking and only support non-async hooks.
I'm willing to write up a PR with either of the suggestions in "Additional context" or some other option if someone closer to the project wants to weigh in on what the preferred approach would be. +1 to former suggestion. @alec-deason send a PR for the fix :)
2023-08-25T00:52:43
open-telemetry/opentelemetry-python-contrib
1,952
open-telemetry__opentelemetry-python-contrib-1952
[ "1684" ]
3b9d6264e2d305f6c049769174ee24ed595bf0bf
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py @@ -576,7 +576,7 @@ async def __call__(self, scope, receive, send): if scope["type"] == "http": self.active_requests_counter.add(1, active_requests_count_attrs) try: - with trace.use_span(span, end_on_exit=True) as current_span: + with trace.use_span(span, end_on_exit=False) as current_span: if current_span.is_recording(): for key, value in attributes.items(): current_span.set_attribute(key, value) @@ -630,6 +630,8 @@ async def __call__(self, scope, receive, send): ) if token: context.detach(token) + if span.is_recording(): + span.end() # pylint: enable=too-many-branches @@ -653,8 +655,11 @@ async def otel_receive(): def _get_otel_send( self, server_span, server_span_name, scope, send, duration_attrs ): + expecting_trailers = False + @wraps(send) async def otel_send(message): + nonlocal expecting_trailers with self.tracer.start_as_current_span( " ".join((server_span_name, scope["type"], "send")) ) as send_span: @@ -668,6 +673,8 @@ async def otel_send(message): ] = status_code set_status_code(server_span, status_code) set_status_code(send_span, status_code) + + expecting_trailers = message.get("trailers", False) elif message["type"] == "websocket.send": set_status_code(server_span, 200) set_status_code(send_span, 200) @@ -703,5 +710,15 @@ async def otel_send(message): pass await send(message) + if ( + not expecting_trailers + and message["type"] == "http.response.body" + and not message.get("more_body", False) + ) or ( + expecting_trailers + and message["type"] == "http.response.trailers" + and not message.get("more_trailers", False) + ): + server_span.end() return otel_send
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py --- a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py @@ -16,6 +16,7 @@ import asyncio import sys +import time import unittest from timeit import default_timer from unittest import mock @@ -57,6 +58,8 @@ "http.server.request.size": _duration_attrs, } +_SIMULATED_BACKGROUND_TASK_EXECUTION_TIME_S = 0.01 + async def http_app(scope, receive, send): message = await receive() @@ -99,6 +102,108 @@ async def simple_asgi(scope, receive, send): await websocket_app(scope, receive, send) +async def long_response_asgi(scope, receive, send): + assert isinstance(scope, dict) + assert scope["type"] == "http" + message = await receive() + scope["headers"] = [(b"content-length", b"128")] + assert scope["type"] == "http" + if message.get("type") == "http.request": + await send( + { + "type": "http.response.start", + "status": 200, + "headers": [ + [b"Content-Type", b"text/plain"], + [b"content-length", b"1024"], + ], + } + ) + await send( + {"type": "http.response.body", "body": b"*", "more_body": True} + ) + await send( + {"type": "http.response.body", "body": b"*", "more_body": True} + ) + await send( + {"type": "http.response.body", "body": b"*", "more_body": True} + ) + await send( + {"type": "http.response.body", "body": b"*", "more_body": False} + ) + + +async def background_execution_asgi(scope, receive, send): + assert isinstance(scope, dict) + assert scope["type"] == "http" + message = await receive() + scope["headers"] = [(b"content-length", b"128")] + assert scope["type"] == "http" + if message.get("type") == "http.request": + await send( + { + "type": "http.response.start", + "status": 200, + "headers": [ + [b"Content-Type", b"text/plain"], + [b"content-length", b"1024"], + ], + } + ) + await send( + { + "type": "http.response.body", + "body": b"*", + } + ) + time.sleep(_SIMULATED_BACKGROUND_TASK_EXECUTION_TIME_S) + + +async def background_execution_trailers_asgi(scope, receive, send): + assert isinstance(scope, dict) + assert scope["type"] == "http" + message = await receive() + scope["headers"] = [(b"content-length", b"128")] + assert scope["type"] == "http" + if message.get("type") == "http.request": + await send( + { + "type": "http.response.start", + "status": 200, + "headers": [ + [b"Content-Type", b"text/plain"], + [b"content-length", b"1024"], + ], + "trailers": True, + } + ) + await send( + {"type": "http.response.body", "body": b"*", "more_body": True} + ) + await send( + {"type": "http.response.body", "body": b"*", "more_body": False} + ) + await send( + { + "type": "http.response.trailers", + "headers": [ + [b"trailer", b"test-trailer"], + ], + "more_trailers": True, + } + ) + await send( + { + "type": "http.response.trailers", + "headers": [ + [b"trailer", b"second-test-trailer"], + ], + "more_trailers": False, + } + ) + time.sleep(_SIMULATED_BACKGROUND_TASK_EXECUTION_TIME_S) + + async def error_asgi(scope, receive, send): assert isinstance(scope, dict) assert scope["type"] == "http" @@ -127,14 +232,19 @@ def validate_outputs(self, outputs, error=None, modifiers=None): # Ensure modifiers is a list modifiers = modifiers or [] # Check for expected outputs - self.assertEqual(len(outputs), 2) response_start = outputs[0] - response_body = outputs[1] + response_final_body = [ + output + for output in outputs + if output["type"] == "http.response.body" + ][-1] + self.assertEqual(response_start["type"], "http.response.start") - self.assertEqual(response_body["type"], "http.response.body") + self.assertEqual(response_final_body["type"], "http.response.body") + self.assertEqual(response_final_body.get("more_body", False), False) # Check http response body - self.assertEqual(response_body["body"], b"*") + self.assertEqual(response_final_body["body"], b"*") # Check http response start self.assertEqual(response_start["status"], 200) @@ -153,7 +263,6 @@ def validate_outputs(self, outputs, error=None, modifiers=None): # Check spans span_list = self.memory_exporter.get_finished_spans() - self.assertEqual(len(span_list), 4) expected = [ { "name": "GET / http receive", @@ -194,6 +303,7 @@ def validate_outputs(self, outputs, error=None, modifiers=None): for modifier in modifiers: expected = modifier(expected) # Check that output matches + self.assertEqual(len(span_list), len(expected)) for span, expected in zip(span_list, expected): self.assertEqual(span.name, expected["name"]) self.assertEqual(span.kind, expected["kind"]) @@ -232,6 +342,80 @@ def test_asgi_exc_info(self): outputs = self.get_all_output() self.validate_outputs(outputs, error=ValueError) + def test_long_response(self): + """Test that the server span is ended on the final response body message. + + If the server span is ended early then this test will fail due + to discrepancies in the expected list of spans and the emitted list of spans. + """ + app = otel_asgi.OpenTelemetryMiddleware(long_response_asgi) + self.seed_app(app) + self.send_default_request() + outputs = self.get_all_output() + + def add_more_body_spans(expected: list): + more_body_span = { + "name": "GET / http send", + "kind": trace_api.SpanKind.INTERNAL, + "attributes": {"type": "http.response.body"}, + } + extra_spans = [more_body_span] * 3 + expected[2:2] = extra_spans + return expected + + self.validate_outputs(outputs, modifiers=[add_more_body_spans]) + + def test_background_execution(self): + """Test that the server span is ended BEFORE the background task is finished.""" + app = otel_asgi.OpenTelemetryMiddleware(background_execution_asgi) + self.seed_app(app) + self.send_default_request() + outputs = self.get_all_output() + self.validate_outputs(outputs) + span_list = self.memory_exporter.get_finished_spans() + server_span = span_list[-1] + assert server_span.kind == SpanKind.SERVER + span_duration_nanos = server_span.end_time - server_span.start_time + self.assertLessEqual( + span_duration_nanos, + _SIMULATED_BACKGROUND_TASK_EXECUTION_TIME_S * 10**9, + ) + + def test_trailers(self): + """Test that trailers are emitted as expected and that the server span is ended + BEFORE the background task is finished.""" + app = otel_asgi.OpenTelemetryMiddleware( + background_execution_trailers_asgi + ) + self.seed_app(app) + self.send_default_request() + outputs = self.get_all_output() + + def add_body_and_trailer_span(expected: list): + body_span = { + "name": "GET / http send", + "kind": trace_api.SpanKind.INTERNAL, + "attributes": {"type": "http.response.body"}, + } + trailer_span = { + "name": "GET / http send", + "kind": trace_api.SpanKind.INTERNAL, + "attributes": {"type": "http.response.trailers"}, + } + expected[2:2] = [body_span] + expected[4:4] = [trailer_span] * 2 + return expected + + self.validate_outputs(outputs, modifiers=[add_body_and_trailer_span]) + span_list = self.memory_exporter.get_finished_spans() + server_span = span_list[-1] + assert server_span.kind == SpanKind.SERVER + span_duration_nanos = server_span.end_time - server_span.start_time + self.assertLessEqual( + span_duration_nanos, + _SIMULATED_BACKGROUND_TASK_EXECUTION_TIME_S * 10**9, + ) + def test_override_span_name(self): """Test that default span_names can be overwritten by our callback function.""" span_name = "Dymaxion"
Exclude background tasks from request duration in ASGI middleware **Is your feature request related to a problem?** The duration of a request span in FastAPI includes the duration of evaluating background tasks. There is no span whose duration represents the time bewteen receiving a request and sending the response. An example: ```python import asyncio from typing import TypedDict from fastapi import BackgroundTasks, Depends, FastAPI from opentelemetry import trace from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter app = FastAPI() exporter = ConsoleSpanExporter() provider = TracerProvider() processor = BatchSpanProcessor(exporter) provider.add_span_processor(processor) trace.set_tracer_provider(provider) FastAPIInstrumentor.instrument_app(app) def get_context( background_tasks: BackgroundTasks, ): return Context(background_tasks=background_tasks) async def some_background_task(): await asyncio.sleep(10) class Context(TypedDict): background_tasks: BackgroundTasks @app.get("/") async def main(context=Depends(get_context)): print(context.keys()) context["background_tasks"].add_task(some_background_task) return None ``` **Describe the solution you'd like** Either end the server span after we have sent the response (this is how dd-trace solve it in https://github.com/DataDog/dd-trace-py/pull/3799) *OR* create a new span that stops after the response has been sent. **Additional context** This is important to calculation latency metrics using the FastAPI instrumentation
Thanks for reporting! We'll try to reproduce the issue. I agree that there should be two new spans - producer (because FastAPI is producing the task) and consumer (because FastAPI is consuming the task, [similarly to how celery instrumentation behaves](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py#L148-L150)). Is that what you had in mind? Thanks for the reply! Let me explain in a bit more detail. Let's say we are calling the endpoint `foo`. We currently get spans that look like this: ``` /foo <-----------------> ... other stuff http send <-> background task execution <-> ``` i.e. the `/foo` span does not end until the background task has finished executing, even though the client has the response. There is no span that currently represents the time from receiving the request to the time sending it. The celery implementation might do this if the consumer span is not a parent of the request span? We'd then have a whole different trace for the background task execution?
2023-09-14T11:42:48
open-telemetry/opentelemetry-python-contrib
1,959
open-telemetry__opentelemetry-python-contrib-1959
[ "1958" ]
f34771618ca92299a26a9a72a2d31ebc6802a174
diff --git a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py --- a/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py @@ -307,17 +307,17 @@ def _create_client_histograms(meter) -> Dict[str, Histogram]: MetricInstruments.HTTP_CLIENT_DURATION: meter.create_histogram( name=MetricInstruments.HTTP_CLIENT_DURATION, unit="ms", - description="measures the duration outbound HTTP requests", + description="Measures the duration of outbound HTTP requests.", ), MetricInstruments.HTTP_CLIENT_REQUEST_SIZE: meter.create_histogram( name=MetricInstruments.HTTP_CLIENT_REQUEST_SIZE, unit="By", - description="measures the size of HTTP request messages (compressed)", + description="Measures the size of HTTP request messages.", ), MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE: meter.create_histogram( name=MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE, unit="By", - description="measures the size of HTTP response messages (compressed)", + description="Measures the size of HTTP response messages.", ), } diff --git a/instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py b/instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py --- a/instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py @@ -183,17 +183,17 @@ def _instrument(self, **kwargs): duration_histogram = meter.create_histogram( name=MetricInstruments.HTTP_CLIENT_DURATION, unit="ms", - description="measures the duration outbound HTTP requests", + description="Measures the duration of outbound HTTP requests.", ) request_size_histogram = meter.create_histogram( name=MetricInstruments.HTTP_CLIENT_REQUEST_SIZE, unit="By", - description="measures the size of HTTP request messages (compressed)", + description="Measures the size of HTTP request messages.", ) response_size_histogram = meter.create_histogram( name=MetricInstruments.HTTP_CLIENT_RESPONSE_SIZE, unit="By", - description="measures the size of HTTP response messages (compressed)", + description="Measures the size of HTTP response messages.", ) _instrument(
`http.client.duration` metric description is incorrect **Describe your environment** This is for the OpenTelemetry demo. This is the loadgenerator service in the demo that uses Locust (2.15.1) to generate a load for the demo application. It uses python 3.11-slim-buster as the base image for Python runtime. In code we leverage the `urllib3` instrumentation package with a call to `URLLib3Instrumentor().instrument()` at the top of the the locustfile.py The `http.client.duration` metric has a description that does not conform with the OpenTelemetry semantic convention. This creates an issue when used with OpenTelemetry instrumented services that emit the same metric from a different language SDK (ie: Javascript). The OpenTelemetry Collector's, Prometheus exporter will specifically drop a metric if received that has the same name but a different description from a metric previously received. **Steps to reproduce** Run the demo and look at the OpenTelemetry collector logs to show an error on the Prometheus export. The error will be for the `http.client.duration` metric, indicating the Help text (description) does not match what was received prior for the same metric name. **What is the expected behavior?** The description for the `http.client.duration` metric should be: `Measures the duration of outbound HTTP requests.` This is defined in the OpenTelemetry [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/main/model/metrics/http.yaml#L157) for this metric. **What is the actual behavior?** The description for the `http.client.duration` metric is: `measures the duration outbound HTTP requests` **Additional context** https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e318c947a23152c8ff1700f0aad44261be0588cd/instrumentation/opentelemetry-instrumentation-urllib3/src/opentelemetry/instrumentation/urllib3/__init__.py#L176
2023-09-19T20:32:58
open-telemetry/opentelemetry-python-contrib
1,980
open-telemetry__opentelemetry-python-contrib-1980
[ "1979" ]
e318c947a23152c8ff1700f0aad44261be0588cd
diff --git a/instrumentation/opentelemetry-instrumentation-sklearn/src/opentelemetry/instrumentation/sklearn/__init__.py b/instrumentation/opentelemetry-instrumentation-sklearn/src/opentelemetry/instrumentation/sklearn/__init__.py --- a/instrumentation/opentelemetry-instrumentation-sklearn/src/opentelemetry/instrumentation/sklearn/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-sklearn/src/opentelemetry/instrumentation/sklearn/__init__.py @@ -35,7 +35,7 @@ ).instrument() -Model intrumentation example: +Model instrumentation example: .. code-block:: python @@ -291,7 +291,7 @@ class descendent) is being instrumented with opentelemetry. Within a SklearnInstrumentor(packages=packages).instrument() - Model intrumentation example: + Model instrumentation example: .. code-block:: python diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -84,6 +84,10 @@ "library": "flask >= 1.0, < 3.0", "instrumentation": "opentelemetry-instrumentation-flask==0.42b0.dev", }, + "werkzeug": { + "library": "werkzeug < 3.0.0", + "instrumentation": "opentelemetry-instrumentation-flask==0.42b0.dev", + }, "grpcio": { "library": "grpcio ~= 1.27", "instrumentation": "opentelemetry-instrumentation-grpc==0.42b0.dev", diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/propagators.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/propagators.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/propagators.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/propagators.py @@ -59,12 +59,13 @@ def set(self, carrier, key, value): # pylint: disable=no-self-use class FuncSetter(Setter): - """FuncSetter coverts a function into a valid Setter. Any function that can - set values in a carrier can be converted into a Setter by using FuncSetter. - This is useful when injecting trace context into non-dict objects such - HTTP Response objects for different framework. + """FuncSetter converts a function into a valid Setter. Any function that + can set values in a carrier can be converted into a Setter by using + FuncSetter. This is useful when injecting trace context into non-dict + objects such HTTP Response objects for different framework. - For example, it can be used to create a setter for Falcon response object as: + For example, it can be used to create a setter for Falcon response object + as: setter = FuncSetter(falcon.api.Response.append_header)
Flask tests failing See [here](https://github.com/open-telemetry/opentelemetry-python-contrib/actions/runs/6207312216/job/17291927392?pr=1920#step:6:4210). This happens because we are using `flask<3.0.0` and `werkzeug>=3.0.0`.
2023-10-03T15:13:26
open-telemetry/opentelemetry-python-contrib
2,013
open-telemetry__opentelemetry-python-contrib-2013
[ "1975" ]
42faa1a34a711c35576833e56d0e28ea0cae4993
diff --git a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py --- a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py @@ -251,6 +251,16 @@ def response_hook(span: Span, status: str, response_headers: List): from opentelemetry import context, trace from opentelemetry.instrumentation.flask.package import _instruments from opentelemetry.instrumentation.flask.version import __version__ + +try: + flask_version = flask.__version__ +except AttributeError: + try: + from importlib import metadata + except ImportError: + import importlib_metadata as metadata + flask_version = metadata.version("flask") + from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.propagators import ( get_global_response_propagator, @@ -271,7 +281,7 @@ def response_hook(span: Span, status: str, response_headers: List): _excluded_urls_from_env = get_excluded_urls("FLASK") -if package_version.parse(flask.__version__) >= package_version.parse("2.2.0"): +if package_version.parse(flask_version) >= package_version.parse("2.2.0"): def _request_ctx_ref() -> weakref.ReferenceType: return weakref.ref(flask.globals.request_ctx._get_current_object()) @@ -420,7 +430,7 @@ def _before_request(): # https://flask.palletsprojects.com/en/1.1.x/api/#flask.has_request_context if flask and flask.request: if commenter_options.get("framework", True): - flask_info["framework"] = f"flask:{flask.__version__}" + flask_info["framework"] = f"flask:{flask_version}" if ( commenter_options.get("controller", True) and flask.request.endpoint diff --git a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/package.py b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/package.py --- a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/package.py +++ b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/package.py @@ -13,6 +13,6 @@ # limitations under the License. -_instruments = ("flask >= 1.0, < 3.0",) +_instruments = ("flask >= 1.0",) _supports_metrics = True diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -85,11 +85,11 @@ "instrumentation": "opentelemetry-instrumentation-fastapi==0.44b0.dev", }, { - "library": "flask >= 1.0, < 3.0", + "library": "werkzeug < 3.0.0", "instrumentation": "opentelemetry-instrumentation-flask==0.44b0.dev", }, { - "library": "werkzeug < 3.0.0", + "library": "flask >= 1.0", "instrumentation": "opentelemetry-instrumentation-flask==0.44b0.dev", }, {
Add support for Flask 3.0.0 **Describe your environment** * Python 3.11 * Otel Python API/SDK 1.20.0 * opentelemetry-instrument 0.41b0 * Flask 3.0.0, released 2 days ago: https://github.com/pallets/flask/releases/tag/3.0.0 * Running on local in docker container `FROM python:3.11` Attempting to do `opentelemetry-bootstrap` after installing Flask 3 results in a quiet message: `instrumentation for package flask<3.0,>=1.0 is available but version flask==3.0.0 is installed. Skipping.` Attempting to manually install `opentelemetry-instrumentation-flask` gives a different message: `DependencyConflict: requested: "flask >= 1.0, < 3.0" but found: "flask 3.0.0"` The Flask app successfully loads and Otel API can be used to manually create spans (`tracer.start_as_current_span`). But `SERVER` spans are not created for requests accepted by the Flask app that should be intercepted by instrumentation. Here is an example 1-span trace exported to console with only the `INTERNAL` type span: ``` 172.26.0.1 - - [02/Oct/2023 22:19:02] "GET /test/ HTTP/1.1" 200 - { "name": "my_test_trace", "context": { "trace_id": "0x3c881a21a3f4f695ebf19c55be7d36c5", "span_id": "0xd58cce9986a06144", "trace_state": "[]" }, "kind": "SpanKind.INTERNAL", "parent_id": null, "start_time": "2023-10-02T22:19:02.966587Z", "end_time": "2023-10-02T22:19:02.966610Z", "status": { "status_code": "UNSET" }, "attributes": {}, "events": [], "links": [], "resource": { "attributes": { "telemetry.sdk.language": "python", "telemetry.sdk.name": "opentelemetry", "telemetry.sdk.version": "1.20.0", "service.name": "unknown_service" }, "schema_url": "" } } ``` **Steps to reproduce** I've posted a repo here with two ways to reproduce: https://github.com/tammy-baylis-swi/otel-flask-three **What is the expected behavior?** There should be no errors at bootstrap or manual install/instrument of Flask, and traces should include `SERVER` type spans when the Flask app receives a request. I think instrumentation should support Flask 3 at some point. **What is the actual behavior?** There are error messages (see above) and traces do not include `SERVER` type spans when the Flask app receives a request. **Additional context** I think it's from this version requirement in [opentelemetry-instrumentation-flask](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/e318c947a23152c8ff1700f0aad44261be0588cd/instrumentation/opentelemetry-instrumentation-flask/pyproject.toml#L37-L39). Upgrading to [Flask 3](https://github.com/pallets/flask/releases/tag/3.0.0) from the previous 2.3.3 might be breaking so some testing is needed. ``` [project.optional-dependencies] instruments = [ "flask >= 1.0, < 3.0", ] ```
Yes, we currently don't support Flask >= 3.0.0 Thanks @ocelotl for Label and Title fix!
2023-10-21T04:44:47
open-telemetry/opentelemetry-python-contrib
2,019
open-telemetry__opentelemetry-python-contrib-2019
[ "1756" ]
46a8c59e03b70dc258c0bb1d66d597b276e136a2
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py --- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py @@ -17,9 +17,9 @@ import wrapt from opentelemetry.semconv.trace import SpanAttributes -from opentelemetry.trace.status import Status, StatusCode from ._server import OpenTelemetryServerInterceptor, _wrap_rpc_behavior +from ._utilities import _server_status # pylint:disable=abstract-method @@ -36,12 +36,8 @@ async def abort(self, code, details="", trailing_metadata=tuple()): self._self_active_span.set_attribute( SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0] ) - self._self_active_span.set_status( - Status( - status_code=StatusCode.ERROR, - description=f"{code}:{details}", - ) - ) + status = _server_status(code, details) + self._self_active_span.set_status(status) return await self.__wrapped__.abort(code, details, trailing_metadata) def set_code(self, code): @@ -51,23 +47,15 @@ def set_code(self, code): SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0] ) if code != grpc.StatusCode.OK: - self._self_active_span.set_status( - Status( - status_code=StatusCode.ERROR, - description=f"{code}:{details}", - ) - ) + status = _server_status(code, details) + self._self_active_span.set_status(status) return self.__wrapped__.set_code(code) def set_details(self, details): self._self_details = details if self._self_code != grpc.StatusCode.OK: - self._self_active_span.set_status( - Status( - status_code=StatusCode.ERROR, - description=f"{self._self_code}:{details}", - ) - ) + status = _server_status(self._self_code, details) + self._self_active_span.set_status(status) return self.__wrapped__.set_details(details) diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py --- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py @@ -31,7 +31,8 @@ from opentelemetry.context import attach, detach from opentelemetry.propagate import extract from opentelemetry.semconv.trace import SpanAttributes -from opentelemetry.trace.status import Status, StatusCode + +from ._utilities import _server_status logger = logging.getLogger(__name__) @@ -124,12 +125,8 @@ def abort(self, code, details): self._active_span.set_attribute( SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0] ) - self._active_span.set_status( - Status( - status_code=StatusCode.ERROR, - description=f"{code}:{details}", - ) - ) + status = _server_status(code, details) + self._active_span.set_status(status) return self._servicer_context.abort(code, details) def abort_with_status(self, status): @@ -158,23 +155,15 @@ def set_code(self, code): SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0] ) if code != grpc.StatusCode.OK: - self._active_span.set_status( - Status( - status_code=StatusCode.ERROR, - description=f"{code}:{details}", - ) - ) + status = _server_status(code, details) + self._active_span.set_status(status) return self._servicer_context.set_code(code) def set_details(self, details): self._details = details if self._code != grpc.StatusCode.OK: - self._active_span.set_status( - Status( - status_code=StatusCode.ERROR, - description=f"{self._code}:{details}", - ) - ) + status = _server_status(self._code, details) + self._active_span.set_status(status) return self._servicer_context.set_details(details) diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_utilities.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_utilities.py --- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_utilities.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_utilities.py @@ -14,6 +14,10 @@ """Internal utilities.""" +import grpc + +from opentelemetry.trace.status import Status, StatusCode + class RpcInfo: def __init__( @@ -31,3 +35,21 @@ def __init__( self.request = request self.response = response self.error = error + + +def _server_status(code, details): + error_status = Status( + status_code=StatusCode.ERROR, description=f"{code}:{details}" + ) + status_codes = { + grpc.StatusCode.UNKNOWN: error_status, + grpc.StatusCode.DEADLINE_EXCEEDED: error_status, + grpc.StatusCode.UNIMPLEMENTED: error_status, + grpc.StatusCode.INTERNAL: error_status, + grpc.StatusCode.UNAVAILABLE: error_status, + grpc.StatusCode.DATA_LOSS: error_status, + } + + return status_codes.get( + code, Status(status_code=StatusCode.UNSET, description="") + )
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_aio_server_interceptor.py b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_aio_server_interceptor.py --- a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_aio_server_interceptor.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_aio_server_interceptor.py @@ -507,9 +507,7 @@ async def test_abort(self): class AbortServicer(GRPCTestServerServicer): # pylint:disable=C0103 async def SimpleMethod(self, request, context): - await context.abort( - grpc.StatusCode.FAILED_PRECONDITION, failure_message - ) + await context.abort(grpc.StatusCode.INTERNAL, failure_message) testcase = self @@ -520,9 +518,7 @@ async def request(channel): with testcase.assertRaises(grpc.RpcError) as cm: await channel.unary_unary(rpc_call)(msg) - self.assertEqual( - cm.exception.code(), grpc.StatusCode.FAILED_PRECONDITION - ) + self.assertEqual(cm.exception.code(), grpc.StatusCode.INTERNAL) self.assertEqual(cm.exception.details(), failure_message) await run_with_test_server(request, servicer=AbortServicer()) @@ -543,7 +539,7 @@ async def request(channel): self.assertEqual(span.status.status_code, StatusCode.ERROR) self.assertEqual( span.status.description, - f"{grpc.StatusCode.FAILED_PRECONDITION}:{failure_message}", + f"{grpc.StatusCode.INTERNAL}:{failure_message}", ) # Check attributes @@ -555,7 +551,7 @@ async def request(channel): SpanAttributes.RPC_METHOD: "SimpleMethod", SpanAttributes.RPC_SERVICE: "GRPCTestServer", SpanAttributes.RPC_SYSTEM: "grpc", - SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.FAILED_PRECONDITION.value[ + SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.INTERNAL.value[ 0 ], }, @@ -605,11 +601,8 @@ async def request(channel): ) # make sure this span errored, with the right status and detail - self.assertEqual(span.status.status_code, StatusCode.ERROR) - self.assertEqual( - span.status.description, - f"{grpc.StatusCode.FAILED_PRECONDITION}:{failure_message}", - ) + self.assertEqual(span.status.status_code, StatusCode.UNSET) + self.assertEqual(span.status.description, None) # Check attributes self.assertSpanHasAttributes( diff --git a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_server_interceptor.py b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_server_interceptor.py --- a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_server_interceptor.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_server_interceptor.py @@ -552,28 +552,45 @@ def test_abort(self): # our detailed failure message failure_message = "This is a test failure" - # aborting RPC handler - def handler(request, context): + # aborting RPC handlers + def error_status_handler(request, context): + context.abort(grpc.StatusCode.INTERNAL, failure_message) + + def unset_status_handler(request, context): context.abort(grpc.StatusCode.FAILED_PRECONDITION, failure_message) - with self.server( - max_workers=1, - interceptors=[interceptor], - ) as (server, channel): - server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler),)) - rpc_call = "TestServicer/handler" + rpc_call_error = "TestServicer/error_status_handler" + rpc_call_unset = "TestServicer/unset_status_handler" + + rpc_calls = { + rpc_call_error: error_status_handler, + rpc_call_unset: unset_status_handler, + } + + for rpc_call, handler in rpc_calls.items(): + with self.server( + max_workers=1, + interceptors=[interceptor], + ) as (server, channel): + server.add_generic_rpc_handlers( + (UnaryUnaryRpcHandler(handler),) + ) - server.start() - # unfortunately, these are just bare exceptions in grpc... - with self.assertRaises(Exception): - channel.unary_unary(rpc_call)(b"") - server.stop(None) + server.start() + + with self.assertRaises(Exception): + channel.unary_unary(rpc_call)(b"") + + # unfortunately, these are just bare exceptions in grpc... + server.stop(None) spans_list = self.memory_exporter.get_finished_spans() - self.assertEqual(len(spans_list), 1) + self.assertEqual(len(spans_list), 2) + + # check error span span = spans_list[0] - self.assertEqual(span.name, rpc_call) + self.assertEqual(span.name, rpc_call_error) self.assertIs(span.kind, trace.SpanKind.SERVER) # Check version and name in span's instrumentation info @@ -585,15 +602,43 @@ def handler(request, context): self.assertEqual(span.status.status_code, StatusCode.ERROR) self.assertEqual( span.status.description, - f"{grpc.StatusCode.FAILED_PRECONDITION}:{failure_message}", + f"{grpc.StatusCode.INTERNAL}:{failure_message}", + ) + + # Check attributes + self.assertSpanHasAttributes( + span, + { + **self.net_peer_span_attributes, + SpanAttributes.RPC_METHOD: "error_status_handler", + SpanAttributes.RPC_SERVICE: "TestServicer", + SpanAttributes.RPC_SYSTEM: "grpc", + SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.INTERNAL.value[ + 0 + ], + }, ) + # check unset status span + span = spans_list[1] + + self.assertEqual(span.name, rpc_call_unset) + self.assertIs(span.kind, trace.SpanKind.SERVER) + + # Check version and name in span's instrumentation info + self.assertEqualSpanInstrumentationInfo( + span, opentelemetry.instrumentation.grpc + ) + + self.assertEqual(span.status.description, None) + self.assertEqual(span.status.status_code, StatusCode.UNSET) + # Check attributes self.assertSpanHasAttributes( span, { **self.net_peer_span_attributes, - SpanAttributes.RPC_METHOD: "handler", + SpanAttributes.RPC_METHOD: "unset_status_handler", SpanAttributes.RPC_SERVICE: "TestServicer", SpanAttributes.RPC_SYSTEM: "grpc", SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.FAILED_PRECONDITION.value[
Update gRPC server status code to span status code mapping Spec ref: https://github.com/open-telemetry/opentelemetry-specification/pull/3333
Hello @srikanthccv I would like to contribute to this project This is already taken. You can find other good-first-issues not assigned to anybody here https://github.com/open-telemetry/opentelemetry-python-contrib/issues?q=is%3Aopen+label%3A%22good+first+issue%22+no%3Aassignee thanks Any movement on this issue? Would be happy to help here. For anyone picking this up (as I've been reviewing the same issue in the Java implementation) - The problem in Python land is in the handling of grpc.RpcError when it gets thrown: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_client.py#L154 https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_client.py#L212 As per the upstream docs, anything that doesn't return OK will throw RpcError(): https://grpc.github.io/grpc/python/grpc.html#grpc-exceptions However, rather than just treating that as an error, as the code currently does, we need to be checking the status first, and then decide if that status is actually one we should mark the span as error or not (e.g. NOT_FOUND should not mark the span as an error). @srikanthccv I'd be happy to pick this up.
2023-10-25T13:02:58
open-telemetry/opentelemetry-python-contrib
2,026
open-telemetry__opentelemetry-python-contrib-2026
[ "2009" ]
a93bd74dc3969c13db1d6f603689ee8d9650ce66
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py @@ -189,11 +189,13 @@ def client_response_hook(span: Span, message: dict): --- """ +from __future__ import annotations + import typing import urllib from functools import wraps from timeit import default_timer -from typing import Tuple +from typing import Any, Awaitable, Callable, Tuple, cast from asgiref.compatibility import guarantee_single_callable @@ -332,55 +334,28 @@ def collect_request_attributes(scope): return result -def collect_custom_request_headers_attributes(scope): - """returns custom HTTP request headers to be added into SERVER span as span attributes - Refer specification https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers +def collect_custom_headers_attributes( + scope_or_response_message: dict[str, Any], + sanitize: SanitizeValue, + header_regexes: list[str], + normalize_names: Callable[[str], str], +) -> dict[str, str]: """ + Returns custom HTTP request or response headers to be added into SERVER span as span attributes. - sanitize = SanitizeValue( - get_custom_headers( - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS - ) - ) - - # Decode headers before processing. - headers = { - _key.decode("utf8"): _value.decode("utf8") - for (_key, _value) in scope.get("headers") - } - - return sanitize.sanitize_header_values( - headers, - get_custom_headers( - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST - ), - normalise_request_header_name, - ) - - -def collect_custom_response_headers_attributes(message): - """returns custom HTTP response headers to be added into SERVER span as span attributes - Refer specification https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers + Refer specifications: + - https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers """ - - sanitize = SanitizeValue( - get_custom_headers( - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS - ) - ) - # Decode headers before processing. - headers = { + headers: dict[str, str] = { _key.decode("utf8"): _value.decode("utf8") - for (_key, _value) in message.get("headers") + for (_key, _value) in scope_or_response_message.get("headers") + or cast("list[tuple[bytes, bytes]]", []) } - return sanitize.sanitize_header_values( headers, - get_custom_headers( - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE - ), - normalise_response_header_name, + header_regexes, + normalize_names, ) @@ -493,6 +468,9 @@ def __init__( tracer_provider=None, meter_provider=None, meter=None, + http_capture_headers_server_request: list[str] | None = None, + http_capture_headers_server_response: list[str] | None = None, + http_capture_headers_sanitize_fields: list[str] | None = None, ): self.app = guarantee_single_callable(app) self.tracer = trace.get_tracer( @@ -540,7 +518,41 @@ def __init__( self.client_response_hook = client_response_hook self.content_length_header = None - async def __call__(self, scope, receive, send): + # Environment variables as constructor parameters + self.http_capture_headers_server_request = ( + http_capture_headers_server_request + or ( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST + ) + ) + or None + ) + self.http_capture_headers_server_response = ( + http_capture_headers_server_response + or ( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE + ) + ) + or None + ) + self.http_capture_headers_sanitize_fields = SanitizeValue( + http_capture_headers_sanitize_fields + or ( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS + ) + ) + or [] + ) + + async def __call__( + self, + scope: dict[str, Any], + receive: Callable[[], Awaitable[dict[str, Any]]], + send: Callable[[dict[str, Any]], Awaitable[None]], + ) -> None: """The ASGI application Args: @@ -583,7 +595,14 @@ async def __call__(self, scope, receive, send): if current_span.kind == trace.SpanKind.SERVER: custom_attributes = ( - collect_custom_request_headers_attributes(scope) + collect_custom_headers_attributes( + scope, + self.http_capture_headers_sanitize_fields, + self.http_capture_headers_server_request, + normalise_request_header_name, + ) + if self.http_capture_headers_server_request + else {} ) if len(custom_attributes) > 0: current_span.set_attributes(custom_attributes) @@ -658,7 +677,7 @@ def _get_otel_send( expecting_trailers = False @wraps(send) - async def otel_send(message): + async def otel_send(message: dict[str, Any]): nonlocal expecting_trailers with self.tracer.start_as_current_span( " ".join((server_span_name, scope["type"], "send")) @@ -685,7 +704,14 @@ async def otel_send(message): and "headers" in message ): custom_response_attributes = ( - collect_custom_response_headers_attributes(message) + collect_custom_headers_attributes( + message, + self.http_capture_headers_sanitize_fields, + self.http_capture_headers_server_response, + normalise_response_header_name, + ) + if self.http_capture_headers_server_response + else {} ) if len(custom_response_attributes) > 0: server_span.set_attributes( diff --git a/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py b/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py --- a/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-django/src/opentelemetry/instrumentation/django/middleware/otel_middleware.py @@ -43,10 +43,17 @@ from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.trace import Span, SpanKind, use_span from opentelemetry.util.http import ( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE, + SanitizeValue, _parse_active_request_count_attrs, _parse_duration_attrs, + get_custom_headers, get_excluded_urls, get_traced_request_attrs, + normalise_request_header_name, + normalise_response_header_name, ) try: @@ -91,10 +98,7 @@ def __call__(self, request): try: from opentelemetry.instrumentation.asgi import asgi_getter, asgi_setter from opentelemetry.instrumentation.asgi import ( - collect_custom_request_headers_attributes as asgi_collect_custom_request_attributes, - ) - from opentelemetry.instrumentation.asgi import ( - collect_custom_response_headers_attributes as asgi_collect_custom_response_attributes, + collect_custom_headers_attributes as asgi_collect_custom_headers_attributes, ) from opentelemetry.instrumentation.asgi import ( collect_request_attributes as asgi_collect_request_attributes, @@ -108,7 +112,6 @@ def __call__(self, request): set_status_code = None _is_asgi_supported = False - _logger = getLogger(__name__) _attributes_by_preference = [ [ @@ -249,7 +252,18 @@ def process_request(self, request): ) if span.is_recording() and span.kind == SpanKind.SERVER: attributes.update( - asgi_collect_custom_request_attributes(carrier) + asgi_collect_custom_headers_attributes( + carrier, + SanitizeValue( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS + ) + ), + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST + ), + normalise_request_header_name, + ) ) else: if span.is_recording() and span.kind == SpanKind.SERVER: @@ -336,8 +350,17 @@ def process_response(self, request, response): for key, value in response.items(): asgi_setter.set(custom_headers, key, value) - custom_res_attributes = ( - asgi_collect_custom_response_attributes(custom_headers) + custom_res_attributes = asgi_collect_custom_headers_attributes( + custom_headers, + SanitizeValue( + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS + ) + ), + get_custom_headers( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE + ), + normalise_response_header_name, ) for key, value in custom_res_attributes.items(): span.set_attribute(key, value) diff --git a/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py b/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py --- a/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py +++ b/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + from os import environ from re import IGNORECASE as RE_IGNORECASE from re import compile as re_compile from re import search -from typing import Iterable, List, Optional +from typing import Callable, Iterable, Optional from urllib.parse import urlparse, urlunparse from opentelemetry.semconv.trace import SpanAttributes @@ -84,9 +86,12 @@ def sanitize_header_value(self, header: str, value: str) -> str: ) def sanitize_header_values( - self, headers: dict, header_regexes: list, normalize_function: callable - ) -> dict: - values = {} + self, + headers: dict[str, str], + header_regexes: list[str], + normalize_function: Callable[[str], str], + ) -> dict[str, str]: + values: dict[str, str] = {} if header_regexes: header_regexes_compiled = re_compile( @@ -216,14 +221,14 @@ def sanitize_method(method: Optional[str]) -> Optional[str]: return "UNKNOWN" -def get_custom_headers(env_var: str) -> List[str]: - custom_headers = environ.get(env_var, []) +def get_custom_headers(env_var: str) -> list[str]: + custom_headers = environ.get(env_var, None) if custom_headers: - custom_headers = [ + return [ custom_headers.strip() for custom_headers in custom_headers.split(",") ] - return custom_headers + return [] def _parse_active_request_count_attrs(req_attrs):
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_custom_headers.py b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_custom_headers.py --- a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_custom_headers.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_custom_headers.py @@ -1,4 +1,4 @@ -from unittest import mock +import os import opentelemetry.instrumentation.asgi as otel_asgi from opentelemetry.test.asgitestutil import AsgiTestBase @@ -72,21 +72,22 @@ async def websocket_app_with_custom_headers(scope, receive, send): break [email protected]( - "os.environ", - { - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS: ".*my-secret.*", - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,Regex-Test-Header-.*,Regex-Invalid-Test-Header-.*,.*my-secret.*", - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,my-custom-regex-header-.*,invalid-regex-header-.*,.*my-secret.*", - }, -) class TestCustomHeaders(AsgiTestBase, TestBase): + constructor_params = {} + __test__ = False + + def __init_subclass__(cls) -> None: + if cls is not TestCustomHeaders: + cls.__test__ = True + def setUp(self): super().setUp() self.tracer_provider, self.exporter = TestBase.create_tracer_provider() self.tracer = self.tracer_provider.get_tracer(__name__) self.app = otel_asgi.OpenTelemetryMiddleware( - simple_asgi, tracer_provider=self.tracer_provider + simple_asgi, + tracer_provider=self.tracer_provider, + **self.constructor_params, ) def test_http_custom_request_headers_in_span_attributes(self): @@ -148,7 +149,9 @@ def test_http_custom_request_headers_not_in_span_attributes(self): def test_http_custom_response_headers_in_span_attributes(self): self.app = otel_asgi.OpenTelemetryMiddleware( - http_app_with_custom_headers, tracer_provider=self.tracer_provider + http_app_with_custom_headers, + tracer_provider=self.tracer_provider, + **self.constructor_params, ) self.seed_app(self.app) self.send_default_request() @@ -175,7 +178,9 @@ def test_http_custom_response_headers_in_span_attributes(self): def test_http_custom_response_headers_not_in_span_attributes(self): self.app = otel_asgi.OpenTelemetryMiddleware( - http_app_with_custom_headers, tracer_provider=self.tracer_provider + http_app_with_custom_headers, + tracer_provider=self.tracer_provider, + **self.constructor_params, ) self.seed_app(self.app) self.send_default_request() @@ -277,6 +282,7 @@ def test_websocket_custom_response_headers_in_span_attributes(self): self.app = otel_asgi.OpenTelemetryMiddleware( websocket_app_with_custom_headers, tracer_provider=self.tracer_provider, + **self.constructor_params, ) self.seed_app(self.app) self.send_input({"type": "websocket.connect"}) @@ -317,6 +323,7 @@ def test_websocket_custom_response_headers_not_in_span_attributes(self): self.app = otel_asgi.OpenTelemetryMiddleware( websocket_app_with_custom_headers, tracer_provider=self.tracer_provider, + **self.constructor_params, ) self.seed_app(self.app) self.send_input({"type": "websocket.connect"}) @@ -333,3 +340,46 @@ def test_websocket_custom_response_headers_not_in_span_attributes(self): if span.kind == SpanKind.SERVER: for key, _ in not_expected.items(): self.assertNotIn(key, span.attributes) + + +SANITIZE_FIELDS_TEST_VALUE = ".*my-secret.*" +SERVER_REQUEST_TEST_VALUE = "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,Regex-Test-Header-.*,Regex-Invalid-Test-Header-.*,.*my-secret.*" +SERVER_RESPONSE_TEST_VALUE = "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,my-custom-regex-header-.*,invalid-regex-header-.*,.*my-secret.*" + + +class TestCustomHeadersEnv(TestCustomHeaders): + def setUp(self): + os.environ.update( + { + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS: SANITIZE_FIELDS_TEST_VALUE, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST: SERVER_REQUEST_TEST_VALUE, + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE: SERVER_RESPONSE_TEST_VALUE, + } + ) + super().setUp() + + def tearDown(self): + os.environ.pop( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS, None + ) + os.environ.pop( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST, None + ) + os.environ.pop( + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE, None + ) + super().tearDown() + + +class TestCustomHeadersConstructor(TestCustomHeaders): + constructor_params = { + "http_capture_headers_sanitize_fields": SANITIZE_FIELDS_TEST_VALUE.split( + "," + ), + "http_capture_headers_server_request": SERVER_REQUEST_TEST_VALUE.split( + "," + ), + "http_capture_headers_server_response": SERVER_RESPONSE_TEST_VALUE.split( + "," + ), + } diff --git a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py --- a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py @@ -983,18 +983,16 @@ class TestAsgiApplicationRaisingError(AsgiTestBase): def tearDown(self): pass - @mock.patch( - "opentelemetry.instrumentation.asgi.collect_custom_request_headers_attributes", - side_effect=ValueError("whatever"), - ) - def test_asgi_issue_1883( - self, mock_collect_custom_request_headers_attributes - ): + def test_asgi_issue_1883(self): """ Test that exception UnboundLocalError local variable 'start' referenced before assignment is not raised See https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1883 """ - app = otel_asgi.OpenTelemetryMiddleware(simple_asgi) + + async def bad_app(_scope, _receive, _send): + raise ValueError("whatever") + + app = otel_asgi.OpenTelemetryMiddleware(bad_app) self.seed_app(app) self.send_default_request() try: diff --git a/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py b/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py --- a/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-starlette/tests/test_starlette_instrumentation.py @@ -467,15 +467,18 @@ async def _(websocket: WebSocket) -> None: return app [email protected]( - "os.environ", - { - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS: ".*my-secret.*", - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,Regex-Test-Header-.*,Regex-Invalid-Test-Header-.*,.*my-secret.*", - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,my-custom-regex-header-.*,invalid-regex-header-.*,.*my-secret.*", - }, -) class TestHTTPAppWithCustomHeaders(TestBaseWithCustomHeaders): + @patch.dict( + "os.environ", + { + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS: ".*my-secret.*", + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,Regex-Test-Header-.*,Regex-Invalid-Test-Header-.*,.*my-secret.*", + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,my-custom-regex-header-.*,invalid-regex-header-.*,.*my-secret.*", + }, + ) + def setUp(self) -> None: + super().setUp() + def test_custom_request_headers_in_span_attributes(self): expected = { "http.request.header.custom_test_header_1": ( @@ -590,15 +593,18 @@ def test_custom_response_headers_not_in_span_attributes(self): self.assertNotIn(key, server_span.attributes) [email protected]( - "os.environ", - { - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS: ".*my-secret.*", - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,Regex-Test-Header-.*,Regex-Invalid-Test-Header-.*,.*my-secret.*", - OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,my-custom-regex-header-.*,invalid-regex-header-.*,.*my-secret.*", - }, -) class TestWebSocketAppWithCustomHeaders(TestBaseWithCustomHeaders): + @patch.dict( + "os.environ", + { + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS: ".*my-secret.*", + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,Regex-Test-Header-.*,Regex-Invalid-Test-Header-.*,.*my-secret.*", + OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE: "Custom-Test-Header-1,Custom-Test-Header-2,Custom-Test-Header-3,my-custom-regex-header-.*,invalid-regex-header-.*,.*my-secret.*", + }, + ) + def setUp(self) -> None: + super().setUp() + def test_custom_request_headers_in_span_attributes(self): expected = { "http.request.header.custom_test_header_1": (
Allow configuring header extraction via runtime parameters Currently, web framework instrumentations only allow configuring header extraction via environment variables. I want to add the option to configure them via constructor parameters. I think the code should be quite straightforward but I want to check if this would be an acceptable change before making a PR. Some background on why it wasn't implemented in the first place might also be helpful if it's something other than 'no one asked for it yet'.
@lzchen @srikanthccv any thoughts? I don't see any reason why it couldn't be added as a runtime parameter as well. I think we followed what Java did at the time and used the same environment variables for consistency. We already support configuring some capabilities such as excluding URLs with construction params. This change is fine. Please send a pull request.
2023-10-30T17:04:51
open-telemetry/opentelemetry-python-contrib
2,066
open-telemetry__opentelemetry-python-contrib-2066
[ "2065" ]
b29682b5616f465394679c065c6d0d2519620bc1
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py --- a/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py @@ -12,13 +12,63 @@ # See the License for the specific language governing permissions and # limitations under the License. +import grpc import grpc.aio - -from ._server import ( - OpenTelemetryServerInterceptor, - _OpenTelemetryServicerContext, - _wrap_rpc_behavior, -) +import wrapt + +from opentelemetry.semconv.trace import SpanAttributes +from opentelemetry.trace.status import Status, StatusCode + +from ._server import OpenTelemetryServerInterceptor, _wrap_rpc_behavior + + +# pylint:disable=abstract-method +class _OpenTelemetryAioServicerContext(wrapt.ObjectProxy): + def __init__(self, servicer_context, active_span): + super().__init__(servicer_context) + self._self_active_span = active_span + self._self_code = grpc.StatusCode.OK + self._self_details = None + + async def abort(self, code, details="", trailing_metadata=tuple()): + self._self_code = code + self._self_details = details + self._self_active_span.set_attribute( + SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0] + ) + self._self_active_span.set_status( + Status( + status_code=StatusCode.ERROR, + description=f"{code}:{details}", + ) + ) + return await self.__wrapped__.abort(code, details, trailing_metadata) + + def set_code(self, code): + self._self_code = code + details = self._self_details or code.value[1] + self._self_active_span.set_attribute( + SpanAttributes.RPC_GRPC_STATUS_CODE, code.value[0] + ) + if code != grpc.StatusCode.OK: + self._self_active_span.set_status( + Status( + status_code=StatusCode.ERROR, + description=f"{code}:{details}", + ) + ) + return self.__wrapped__.set_code(code) + + def set_details(self, details): + self._self_details = details + if self._self_code != grpc.StatusCode.OK: + self._self_active_span.set_status( + Status( + status_code=StatusCode.ERROR, + description=f"{self._self_code}:{details}", + ) + ) + return self.__wrapped__.set_details(details) class OpenTelemetryAioServerInterceptor( @@ -66,7 +116,7 @@ async def _unary_interceptor(request_or_iterator, context): set_status_on_exception=False, ) as span: # wrap the context - context = _OpenTelemetryServicerContext(context, span) + context = _OpenTelemetryAioServicerContext(context, span) # And now we run the actual RPC. try: @@ -91,7 +141,7 @@ async def _stream_interceptor(request_or_iterator, context): context, set_status_on_exception=False, ) as span: - context = _OpenTelemetryServicerContext(context, span) + context = _OpenTelemetryAioServicerContext(context, span) try: async for response in behavior(
diff --git a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_aio_server_interceptor.py b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_aio_server_interceptor.py --- a/instrumentation/opentelemetry-instrumentation-grpc/tests/test_aio_server_interceptor.py +++ b/instrumentation/opentelemetry-instrumentation-grpc/tests/test_aio_server_interceptor.py @@ -88,8 +88,11 @@ async def run_with_test_server( channel = grpc.aio.insecure_channel(f"localhost:{port:d}") await server.start() - resp = await runnable(channel) - await server.stop(1000) + + try: + resp = await runnable(channel) + finally: + await server.stop(1000) return resp @@ -514,9 +517,79 @@ async def request(channel): request = Request(client_id=1, request_data=failure_message) msg = request.SerializeToString() - with testcase.assertRaises(Exception): + with testcase.assertRaises(grpc.RpcError) as cm: + await channel.unary_unary(rpc_call)(msg) + + self.assertEqual( + cm.exception.code(), grpc.StatusCode.FAILED_PRECONDITION + ) + self.assertEqual(cm.exception.details(), failure_message) + + await run_with_test_server(request, servicer=AbortServicer()) + + spans_list = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans_list), 1) + span = spans_list[0] + + self.assertEqual(span.name, rpc_call) + self.assertIs(span.kind, trace.SpanKind.SERVER) + + # Check version and name in span's instrumentation info + self.assertEqualSpanInstrumentationInfo( + span, opentelemetry.instrumentation.grpc + ) + + # make sure this span errored, with the right status and detail + self.assertEqual(span.status.status_code, StatusCode.ERROR) + self.assertEqual( + span.status.description, + f"{grpc.StatusCode.FAILED_PRECONDITION}:{failure_message}", + ) + + # Check attributes + self.assertSpanHasAttributes( + span, + { + SpanAttributes.NET_PEER_IP: "[::1]", + SpanAttributes.NET_PEER_NAME: "localhost", + SpanAttributes.RPC_METHOD: "SimpleMethod", + SpanAttributes.RPC_SERVICE: "GRPCTestServer", + SpanAttributes.RPC_SYSTEM: "grpc", + SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.FAILED_PRECONDITION.value[ + 0 + ], + }, + ) + + async def test_abort_with_trailing_metadata(self): + """Check that we can catch an abort properly when trailing_metadata provided""" + rpc_call = "/GRPCTestServer/SimpleMethod" + failure_message = "failure message" + + class AbortServicer(GRPCTestServerServicer): + # pylint:disable=C0103 + async def SimpleMethod(self, request, context): + metadata = (("meta", "data"),) + await context.abort( + grpc.StatusCode.FAILED_PRECONDITION, + failure_message, + trailing_metadata=metadata, + ) + + testcase = self + + async def request(channel): + request = Request(client_id=1, request_data=failure_message) + msg = request.SerializeToString() + + with testcase.assertRaises(grpc.RpcError) as cm: await channel.unary_unary(rpc_call)(msg) + self.assertEqual( + cm.exception.code(), grpc.StatusCode.FAILED_PRECONDITION + ) + self.assertEqual(cm.exception.details(), failure_message) + await run_with_test_server(request, servicer=AbortServicer()) spans_list = self.memory_exporter.get_finished_spans()
grpc: trailing metadata can't be passed when aborting an AIO server RPC **Describe your environment** Python: 3.12.0 `opentelemetry-distro[otlp]==0.42b0` `opentelemetry-instrumentation-grpc==0.42b0` `grpcio==1.59.2` **Steps to reproduce** Full replication can be found [here](https://github.com/cookiefission/testbench/tree/main/otel/python-grpc-aio-abort-issue). `server.py`: ```python import asyncio import logging import grpc import helloworld_pb2 import helloworld_pb2_grpc class Greeter(helloworld_pb2_grpc.GreeterServicer): async def SayHello( self, request: helloworld_pb2.HelloRequest, context: grpc.aio.ServicerContext, ) -> helloworld_pb2.HelloReply: metadata = ( ("this", "should"), ("work", "nicely") ) await context.abort(code=grpc.StatusCode.ABORTED, details="This is the actual error message", trailing_metadata=metadata) async def serve() -> None: server = grpc.aio.server() helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server) listen_addr = "[::]:50051" server.add_insecure_port(listen_addr) logging.info("Starting server on %s", listen_addr) await server.start() await server.wait_for_termination() if __name__ == "__main__": logging.basicConfig(level=logging.INFO) asyncio.run(serve()) ``` This server is just adopted from [the grpc examples](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/async_greeter_server.py). The helloworld protobuf artefacts can be found [there](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/helloworld_pb2_grpc.py). For a client, the [`async_greeter_client.py`](https://github.com/grpc/grpc/blob/master/examples/python/helloworld/async_greeter_client.py) from grpc examples can be used. This only happens when `trailing_metadata` is passed as an argument to `context.abort`. **What is the expected behavior?** Running the server without opentelemetry, the (expected) error from running the client is: ``` grpc.aio._call.AioRpcError: <AioRpcError of RPC that terminated with: status = StatusCode.ABORTED details = "This is the actual error message" debug_error_string = "UNKNOWN:Error received from peer {grpc_message:"This is the actual error message", grpc_status:10, created_time:"2023-11-16T15:09:03.552303+00:00"}" > ``` When running the server under `opentelemetry-instrument`, I expect the same output as when it is run without OTEL. **What is the actual behavior?** Instead, an exception is still raised but the underlying code, details, and trailing_metadata are missing. This makes any nuanced error handling on the client side impossible. ``` grpc.aio._call.AioRpcError: <AioRpcError of RPC that terminated with: status = StatusCode.UNKNOWN details = "Unexpected <class 'TypeError'>: _OpenTelemetryServicerContext.abort() got an unexpected keyword argument 'trailing_metadata'" debug_error_string = "UNKNOWN:Error received from peer {created_time:"2023-11-16T15:30:02.017999+00:00", grpc_status:2, grpc_message:"Unexpected <class \'TypeError\'>: _OpenTelemetryServicerContext.abort() got an unexpected keyword argument \'trailing_metadata\'"}" > ``` **Additional context** The `abort` method for [`grpc.ServicerContext`](https://grpc.github.io/grpc/python/grpc.html#grpc.ServicerContext.abort) and [`grpc.aio.ServicerContext`](https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.ServicerContext.abort) is different. It's unclear why. The AIO instrumentation [re-uses](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_aio_server.py#L19) the [`_OpenTelemetryServicerContext`](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/instrumentation/opentelemetry-instrumentation-grpc/src/opentelemetry/instrumentation/grpc/_server.py#L68) from the non-async server instrumentation which is how this bug snuck in, despite there being a test in place for the abort.
2023-11-16T17:26:24
open-telemetry/opentelemetry-python-contrib
2,090
open-telemetry__opentelemetry-python-contrib-2090
[ "2089" ]
4bf3577fb76480dacca12eecd8ed93cb9fccc274
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -77,7 +77,7 @@ "instrumentation": "opentelemetry-instrumentation-elasticsearch==0.43b0.dev", }, { - "library": "falcon >= 1.4.1, < 4.0.0", + "library": "falcon >= 1.4.1, < 3.1.2", "instrumentation": "opentelemetry-instrumentation-falcon==0.43b0.dev", }, {
Fix Falcon dependency Falcon test cases are [failing](https://github.com/open-telemetry/opentelemetry-python/actions/runs/7202220508/job/19619933938?pr=3581) with its latest release.
2023-12-14T00:13:48
open-telemetry/opentelemetry-python-contrib
2,101
open-telemetry__opentelemetry-python-contrib-2101
[ "2053" ]
2a174b25437ac7bbe763d790d231ecd04bbe2168
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -24,10 +24,6 @@ "library": "aiohttp ~= 3.0", "instrumentation": "opentelemetry-instrumentation-aiohttp-client==0.46b0.dev", }, - { - "library": "aiohttp ~= 3.0", - "instrumentation": "opentelemetry-instrumentation-aiohttp-server==0.46b0.dev", - }, { "library": "aiopg >= 0.13.0, < 2.0.0", "instrumentation": "opentelemetry-instrumentation-aiopg==0.46b0.dev", @@ -191,7 +187,6 @@ "opentelemetry-instrumentation-dbapi==0.46b0.dev", "opentelemetry-instrumentation-logging==0.46b0.dev", "opentelemetry-instrumentation-sqlite3==0.46b0.dev", - "opentelemetry-instrumentation-threading==0.46b0.dev", "opentelemetry-instrumentation-urllib==0.46b0.dev", "opentelemetry-instrumentation-wsgi==0.46b0.dev", ] diff --git a/scripts/otel_packaging.py b/scripts/otel_packaging.py --- a/scripts/otel_packaging.py +++ b/scripts/otel_packaging.py @@ -12,43 +12,55 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os -import subprocess -from subprocess import CalledProcessError +from tomli import load +from os import path, listdir +from subprocess import check_output, CalledProcessError +from requests import get -import tomli - -scripts_path = os.path.dirname(os.path.abspath(__file__)) -root_path = os.path.dirname(scripts_path) -instrumentations_path = os.path.join(root_path, "instrumentation") +scripts_path = path.dirname(path.abspath(__file__)) +root_path = path.dirname(scripts_path) +instrumentations_path = path.join(root_path, "instrumentation") def get_instrumentation_packages(): - for pkg in sorted(os.listdir(instrumentations_path)): - pkg_path = os.path.join(instrumentations_path, pkg) - if not os.path.isdir(pkg_path): + for pkg in sorted(listdir(instrumentations_path)): + pkg_path = path.join(instrumentations_path, pkg) + if not path.isdir(pkg_path): continue + error = f"Could not get version for package {pkg}" + try: - version = subprocess.check_output( + hatch_version = check_output( "hatch version", shell=True, cwd=pkg_path, - universal_newlines=True, + universal_newlines=True ) + except CalledProcessError as exc: print(f"Could not get hatch version from path {pkg_path}") print(exc.output) - raise exc - pyproject_toml_path = os.path.join(pkg_path, "pyproject.toml") + try: + response = get(f"https://pypi.org/pypi/{pkg}/json", timeout=10) + + except Exception: + print(error) + continue + + if response.status_code != 200: + print(error) + continue + + pyproject_toml_path = path.join(pkg_path, "pyproject.toml") with open(pyproject_toml_path, "rb") as file: - pyproject_toml = tomli.load(file) + pyproject_toml = load(file) instrumentation = { "name": pyproject_toml["project"]["name"], - "version": version.strip(), + "version": hatch_version.strip(), "instruments": pyproject_toml["project"]["optional-dependencies"][ "instruments" ],
ERROR: No matching distribution found for opentelemetry-instrumentation-aiohttp-server==0.42b0 Python Version - 3.8 **Steps to reproduce** Using 'opentelemetry-bootstrap -a install' Command to Install the Dependencies for Auto Instrumentation. **What is the expected behavior?** package : opentelemetry-instrumentation-aiohttp-server==0.42b0 should be downloaded from pypi **What is the actual behavior?** ERROR: No matching distribution found for opentelemetry-instrumentation-aiohttp-server==0.42b0
There was an issue releasing the new version of `opentelemetry-instrumentation-aiohttp-server==0.42b0`, we are working on it to get it resolved. This is also breaking for users (like me) that bring in all instrumentations transitively by depending on `opentelemetry-contrib-instrumentations`. For example, this is broken (poetry example): ```toml opentelemetry-contrib-instrumentations = { version = "0.42b0" } ``` In poetry it fails with: ``` Because opentelemetry-contrib-instrumentations (0.42b0) depends on opentelemetry-instrumentation-aiohttp-server (0.42b0) which doesn't match any versions, opentelemetry-contrib-instrumentations is forbidden. ``` We are also affected by this. We've removed opentelemetry instrumention temporarily as a workaround, but this is not an option for everybody of course. any updates on this issue? Receiving the same behaviour with 3.10.3 ``` $ opentelemetry-bootstrap -a install ERROR: Could not find a version that satisfies the requirement opentelemetry-instrumentation-aiohttp-server==0.42b0 (from versions: none) ERROR: No matching distribution found for opentelemetry-instrumentation-aiohttp-server==0.42b0 ``` > There was an issue releasing the new version of `opentelemetry-instrumentation-aiohttp-server==0.42b0`, we are working on it to get it resolved. Any ETA for this issue resolution.?? As we are forced to disable the instrumentation. This issue posted last month and still not working. At least to have a workaround before new version release > There was an issue releasing the new version of `opentelemetry-instrumentation-aiohttp-server==0.42b0`, we are working on it to get it resolved. Hi Team, Kindly help here with resolution. We need to disable instrumentation for all affected services. We fixed it by using the previous version: ``` pip install opentelemetry-distro==0.41b0 opentelemetry-exporter-otlp==1.20.0 ``` After this `opentelemetry-bootstrap --action install` works fine. The above fix works- but would be great to have a complete fix and get back to reliable python install. In 0.43b0 I'm seeing this same error but with the `opentelemetry-instrumentation-aws-lambda==0.43b0` package and possibly others. Confirmed still broken in 0.43b0 for `opentelemetry-instrumentation-aiohttp-server` We have this issue because we can't release this package anymore to PyPi, we have reported it [here](https://github.com/pypi/support/issues/3353) already. I'm investigating a workaround for the time being, will report back here...
2023-12-21T03:51:20
open-telemetry/opentelemetry-python-contrib
2,132
open-telemetry__opentelemetry-python-contrib-2132
[ "2095" ]
3400ecea5d9e6c96c950cbf2183feeb3a84b3e91
diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/package.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/package.py --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/package.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/package.py @@ -13,4 +13,4 @@ # limitations under the License. -_instruments = ("confluent-kafka >= 1.8.2, <= 2.2.0",) +_instruments = ("confluent-kafka >= 1.8.2, <= 2.3.0",) diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -65,7 +65,7 @@ "instrumentation": "opentelemetry-instrumentation-celery==0.44b0.dev", }, { - "library": "confluent-kafka >= 1.8.2, <= 2.2.0", + "library": "confluent-kafka >= 1.8.2, <= 2.3.0", "instrumentation": "opentelemetry-instrumentation-confluent-kafka==0.44b0.dev", }, {
Add support for confluent-kafka v2.3 **Is your feature request related to a problem?** We've recently upgraded our confluent-kafka python version to v2.3. But this version is not yet supported by the instrumentor. **Describe the solution you'd like** Confluent kafka version 2.3.x is supported by the instrumentor
2024-01-18T14:05:17
open-telemetry/opentelemetry-python-contrib
2,257
open-telemetry__opentelemetry-python-contrib-2257
[ "2256" ]
efb327d4d7b9ba1bd283534e5afbfd4424acf3f9
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -84,10 +84,6 @@ "library": "fastapi ~= 0.58", "instrumentation": "opentelemetry-instrumentation-fastapi==0.44b0.dev", }, - { - "library": "werkzeug < 3.0.0", - "instrumentation": "opentelemetry-instrumentation-flask==0.44b0.dev", - }, { "library": "flask >= 1.0", "instrumentation": "opentelemetry-instrumentation-flask==0.44b0.dev",
Remove werkzeug from Flask instrumentation dependencies
2024-02-22T23:24:20
open-telemetry/opentelemetry-python-contrib
2,300
open-telemetry__opentelemetry-python-contrib-2300
[ "2055" ]
955b483f7a35edd8df20d3751676a1da6e151ed3
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py @@ -672,7 +672,9 @@ async def otel_receive(): if receive_span.is_recording(): if message["type"] == "websocket.receive": set_status_code(receive_span, 200) - receive_span.set_attribute("type", message["type"]) + receive_span.set_attribute( + "asgi.event.type", message["type"] + ) return message return otel_receive @@ -703,7 +705,7 @@ async def otel_send(message: dict[str, Any]): elif message["type"] == "websocket.send": set_status_code(server_span, 200) set_status_code(send_span, 200) - send_span.set_attribute("type", message["type"]) + send_span.set_attribute("asgi.event.type", message["type"]) if ( server_span.is_recording() and server_span.kind == trace.SpanKind.SERVER
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py --- a/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py +++ b/instrumentation/opentelemetry-instrumentation-asgi/tests/test_asgi_middleware.py @@ -268,20 +268,20 @@ def validate_outputs(self, outputs, error=None, modifiers=None): { "name": "GET / http receive", "kind": trace_api.SpanKind.INTERNAL, - "attributes": {"type": "http.request"}, + "attributes": {"asgi.event.type": "http.request"}, }, { "name": "GET / http send", "kind": trace_api.SpanKind.INTERNAL, "attributes": { SpanAttributes.HTTP_STATUS_CODE: 200, - "type": "http.response.start", + "asgi.event.type": "http.response.start", }, }, { "name": "GET / http send", "kind": trace_api.SpanKind.INTERNAL, - "attributes": {"type": "http.response.body"}, + "attributes": {"asgi.event.type": "http.response.body"}, }, { "name": "GET /", @@ -358,7 +358,7 @@ def add_more_body_spans(expected: list): more_body_span = { "name": "GET / http send", "kind": trace_api.SpanKind.INTERNAL, - "attributes": {"type": "http.response.body"}, + "attributes": {"asgi.event.type": "http.response.body"}, } extra_spans = [more_body_span] * 3 expected[2:2] = extra_spans @@ -396,12 +396,12 @@ def add_body_and_trailer_span(expected: list): body_span = { "name": "GET / http send", "kind": trace_api.SpanKind.INTERNAL, - "attributes": {"type": "http.response.body"}, + "attributes": {"asgi.event.type": "http.response.body"}, } trailer_span = { "name": "GET / http send", "kind": trace_api.SpanKind.INTERNAL, - "attributes": {"type": "http.response.trailers"}, + "attributes": {"asgi.event.type": "http.response.trailers"}, } expected[2:2] = [body_span] expected[4:4] = [trailer_span] * 2 @@ -582,18 +582,18 @@ def test_websocket(self): { "name": "/ websocket receive", "kind": trace_api.SpanKind.INTERNAL, - "attributes": {"type": "websocket.connect"}, + "attributes": {"asgi.event.type": "websocket.connect"}, }, { "name": "/ websocket send", "kind": trace_api.SpanKind.INTERNAL, - "attributes": {"type": "websocket.accept"}, + "attributes": {"asgi.event.type": "websocket.accept"}, }, { "name": "/ websocket receive", "kind": trace_api.SpanKind.INTERNAL, "attributes": { - "type": "websocket.receive", + "asgi.event.type": "websocket.receive", SpanAttributes.HTTP_STATUS_CODE: 200, }, }, @@ -601,14 +601,14 @@ def test_websocket(self): "name": "/ websocket send", "kind": trace_api.SpanKind.INTERNAL, "attributes": { - "type": "websocket.send", + "asgi.event.type": "websocket.send", SpanAttributes.HTTP_STATUS_CODE: 200, }, }, { "name": "/ websocket receive", "kind": trace_api.SpanKind.INTERNAL, - "attributes": {"type": "websocket.disconnect"}, + "attributes": {"asgi.event.type": "websocket.disconnect"}, }, { "name": "/",
ASGI instrumentation creates a 'type' attribute See https://github.com/open-telemetry/opentelemetry-python-contrib/blob/7166de673fa9dd7abc178bec3103d4adebb05e0d/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py#L681 This results in an attribute like `{'type': 'http.request.body'}`, which seems wrong. `type` is a _super_ generic key and I don't think leaking the details of the web framework spec (`http.request.body` is an ASGI specific string) is very helpful to users. Can we get rid of this?
Any answer on this? I still see this apparently undocument `type` attribute everywhere and would love to understand why it's being included. @open-telemetry/opentelemetry-python-contrib-approvers I think we should not use any generic word like "type" in our instrumentations. Thanks @bogdandrutu, PR submitted #2145.
2024-02-27T19:16:12
open-telemetry/opentelemetry-python-contrib
2,302
open-telemetry__opentelemetry-python-contrib-2302
[ "2297" ]
3273d8c39f289e2c9350ae781406dfb99692bb76
diff --git a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py --- a/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py @@ -245,22 +245,13 @@ def response_hook(span: Span, status: str, response_headers: List): from typing import Collection import flask +import importlib_metadata as metadata from packaging import version as package_version import opentelemetry.instrumentation.wsgi as otel_wsgi from opentelemetry import context, trace from opentelemetry.instrumentation.flask.package import _instruments from opentelemetry.instrumentation.flask.version import __version__ - -try: - flask_version = flask.__version__ -except AttributeError: - try: - from importlib import metadata - except ImportError: - import importlib_metadata as metadata - flask_version = metadata.version("flask") - from opentelemetry.instrumentation.instrumentor import BaseInstrumentor from opentelemetry.instrumentation.propagators import ( get_global_response_propagator, @@ -281,6 +272,8 @@ def response_hook(span: Span, status: str, response_headers: List): _excluded_urls_from_env = get_excluded_urls("FLASK") +flask_version = metadata.version("flask") + if package_version.parse(flask_version) >= package_version.parse("2.2.0"): def _request_ctx_ref() -> weakref.ReferenceType:
Flask instrumentation DeprecationWarning - The '__version__' attribute is deprecated and will be removed in Flask 3.1 **Describe your environment** With the just-release opentelemetry-instrumentation-flask 0.44b0 I get this from the flask instrumentation: ``` DeprecationWarning: The '__version__' attribute is deprecated and will be removed in Flask 3.1. Use feature detection or 'importlib.metadata.version("flask")' instead. ``` Problematic code is at https://github.com/open-telemetry/opentelemetry-python-contrib/blame/8daa8ad48108775d8e799a3abc3ed06f84b4c00e/instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py#L254-L263 and caused by #2013. Flask version is 3.0.2. **Steps to reproduce** Instrument Flask > 3 **What is the expected behavior?** No DeprecationWarning **What is the actual behavior?** DeprecationWarning **Additional context** N/A
2024-02-27T22:09:03
open-telemetry/opentelemetry-python-contrib
2,342
open-telemetry__opentelemetry-python-contrib-2342
[ "2029", "2029" ]
1e0b11f07a2a87849d7ffa1f4f7db9f26b815c7e
diff --git a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py --- a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py @@ -113,10 +113,8 @@ def keys(self, carrier): class CeleryInstrumentor(BaseInstrumentor): - def __init__(self): - super().__init__() - self.metrics = None - self.task_id_to_start_time = {} + metrics = None + task_id_to_start_time = {} def instrumentation_dependencies(self) -> Collection[str]: return _instruments
diff --git a/instrumentation/opentelemetry-instrumentation-celery/tests/test_duplicate.py b/instrumentation/opentelemetry-instrumentation-celery/tests/test_duplicate.py new file mode 100644 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-celery/tests/test_duplicate.py @@ -0,0 +1,30 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from opentelemetry.instrumentation.celery import CeleryInstrumentor + + +class TestUtils(unittest.TestCase): + def test_duplicate_instrumentaion(self): + first = CeleryInstrumentor() + first.instrument() + second = CeleryInstrumentor() + second.instrument() + CeleryInstrumentor().uninstrument() + self.assertIsNotNone(first.metrics) + self.assertIsNotNone(second.metrics) + self.assertEqual(first.task_id_to_start_time, {}) + self.assertEqual(second.task_id_to_start_time, {})
Celery Instrumentation TypeError **Describe your environment** ``` celery = "5.3.4" opentelemetry-sdk = "1.20" opentelemetry-propagator-b3 = "1.20" opentelemetry-exporter-jaeger = "1.20" opentelemetry-instrumentation-django = "0.41b0" opentelemetry-instrumentation-asgi = "0.41b0" opentelemetry-instrumentation-celery = "0.41b0" ``` **Steps to reproduce** ```py $ cat web2/__init__.py import logging import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mycoolproject.settings") # NOQA from kombu import Queue from celery import Celery, signals from django.conf import settings from opentelemetry.instrumentation.celery import CeleryInstrumentor from opentelemetry.exporter.jaeger.thrift import JaegerExporter from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider logger = logging.getLogger(__name__) DISABLE_TRACING = os.getenv('DISABLE_TRACING', False) == 'true' TRACING_COLLECTOR_ENDPOINT = os.getenv('TRACING_COLLECTOR_ENDPOINT', 'jaeger-collector.monitoring') TRACING_COLLECTOR_PORT = os.getenv('TRACING_COLLECTOR_PORT', '14268') @signals.worker_process_init.connect(weak=False) def worker_process_init(*args, **kwargs): init_celery_tracing() def init_celery_tracing(): CeleryInstrumentor().instrument() trace.set_tracer_provider(TracerProvider()) if DISABLE_TRACING: span_processor = BatchSpanProcessor(ConsoleSpanExporter()) else: jaeger_exporter = JaegerExporter( collector_endpoint=f'http://{TRACING_COLLECTOR_ENDPOINT}:{TRACING_COLLECTOR_PORT}/api/traces?format=jaeger.thrift', ) span_processor = BatchSpanProcessor(jaeger_exporter) trace.get_tracer_provider().add_span_processor(span_processor) # this stops celery from hijacking the django logging config @signals.setup_logging.connect def setup_celery_logging(**kwargs): pass tasks_app = Celery('web', broker=settings.CELERY_BROKER_URL) tasks_app.conf.task_queues = ( Queue('default2', routing_key='task.#'), ) tasks_app.conf.task_default_queue = 'default2' tasks_app.conf.task_default_exchange_type = 'topic' tasks_app.conf.task_default_routing_key = 'task.default2' tasks_app.conf.result_backend = 'redis://redis:6379/0' tasks_app.conf.update( task_routes=({ 'web2.tasks.itsatest': { 'queue': 'default2', 'routing_key': 'task.default2.itsatest', }, },), imports=( 'web2.tasks', ), worker_prefetch_multiplier=5, task_soft_time_limit=300, task_time_limit=360, worker_max_tasks_per_child=100, broker_transport_options={ 'confirm_publish': True } ) ``` ```py $ cat web2/tasks.py from web2 import tasks_app @tasks_app.task(ignore_result=True) def itsatest(): print('itsatest') return 'itsatest' ``` ```sh poetry run celery --app web2.tasks worker ``` ``` from web2.tasks import * itsatest.delay() ``` **What is the expected behavior?** No errors when executing tasks **What is the actual behavior?** ``` [2023-11-01 20:14:58 +0000] [416] [celery.worker.strategy] [INFO] Task web2.tasks.itsatest[50b55ccd-be08-4f5c-82ee-538b363498ec] received [2023-11-01 20:14:58 +0000] [416] [celery.pool] [DEBUG] TaskPool: Apply <function fast_trace_task at 0xffff8c758a60> (args:('web2.tasks.itsatest', '50b55ccd-be08-4f5c-82ee-538b363498ec', {'lang': 'py', 'task': 'web2.tasks.itsatest', 'id': '50b55ccd-be08-4f5c-82ee-538b363498ec', 'shadow': None, 'eta': None, 'expires': None, 'group': None, 'group_index': None, 'retries': 0, 'timelimit': [None, None], 'root_id': '50b55ccd-be08-4f5c-82ee-538b363498ec', 'parent_id': None, 'argsrepr': '()', 'kwargsrepr': '{}', 'origin': 'gen1251@4676bacd79b5', 'ignore_result': True, 'stamped_headers': None, 'stamps': {}, 'properties': {'content_type': 'application/json', 'content_encoding': 'utf-8', 'application_headers': {'lang': 'py', 'task': 'web2.tasks.itsatest', 'id': '50b55ccd-be08-4f5c-82ee-538b363498ec', 'shadow': None, 'eta': None, 'expires': None, 'group': None, 'group_index': None, 'retries': 0, 'timelimit': [None, None], 'root_id': '50b55ccd-be08-4f5c-82ee-538b363498ec', 'parent_id': None, 'argsrepr': '()', 'kwargsrepr': '{}', 'origin': 'gen1251@4676bacd79b5', 'ignore_result': True, 'stamped_headers': None, 'stamps': {}}, 'delivery_mode':... kwargs:{}) [2023-11-01 20:14:58 +0000] [518] [opentelemetry.instrumentation.celery] [DEBUG] prerun signal start task_id=50b55ccd-be08-4f5c-82ee-538b363498ec itsatest [2023-11-01 20:14:58 +0000] [518] [celery.app.trace] [INFO] Task web2.tasks.itsatest[50b55ccd-be08-4f5c-82ee-538b363498ec] succeeded in 0.013544000015826896s: 'itsatest' [2023-11-01 20:14:58 +0000] [518] [opentelemetry.instrumentation.celery] [DEBUG] postrun signal task_id=50b55ccd-be08-4f5c-82ee-538b363498ec [2023-11-01 20:14:58 +0000] [518] [celery.utils.dispatch.signal] [ERROR] Signal handler <bound method CeleryInstrumentor._trace_postrun of <opentelemetry.instrumentation.celery.CeleryInstrumentor object at 0xffff44692110>> raised: TypeError("'NoneType' object is not subscriptable") Traceback (most recent call last): File "/opt/.venv/web-tq7C0_9c-py3.10/lib/python3.10/site-packages/celery/utils/dispatch/signal.py", line 276, in send response = receiver(signal=self, sender=sender, **named) File "/opt/.venv/web-tq7C0_9c-py3.10/lib/python3.10/site-packages/opentelemetry/instrumentation/celery/__init__.py", line 195, in _trace_postrun self._record_histograms(task_id, labels) File "/opt/.venv/web-tq7C0_9c-py3.10/lib/python3.10/site-packages/opentelemetry/instrumentation/celery/__init__.py", line 309, in _record_histograms self.metrics["flower.task.runtime.seconds"].record( TypeError: 'NoneType' object is not subscriptable ``` **Additional context** It looks like this functionality was introduced fairly recently in #1679 by @shalevr and @Akochavi Celery Instrumentation TypeError **Describe your environment** ``` celery = "5.3.4" opentelemetry-sdk = "1.20" opentelemetry-propagator-b3 = "1.20" opentelemetry-exporter-jaeger = "1.20" opentelemetry-instrumentation-django = "0.41b0" opentelemetry-instrumentation-asgi = "0.41b0" opentelemetry-instrumentation-celery = "0.41b0" ``` **Steps to reproduce** ```py $ cat web2/__init__.py import logging import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mycoolproject.settings") # NOQA from kombu import Queue from celery import Celery, signals from django.conf import settings from opentelemetry.instrumentation.celery import CeleryInstrumentor from opentelemetry.exporter.jaeger.thrift import JaegerExporter from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider logger = logging.getLogger(__name__) DISABLE_TRACING = os.getenv('DISABLE_TRACING', False) == 'true' TRACING_COLLECTOR_ENDPOINT = os.getenv('TRACING_COLLECTOR_ENDPOINT', 'jaeger-collector.monitoring') TRACING_COLLECTOR_PORT = os.getenv('TRACING_COLLECTOR_PORT', '14268') @signals.worker_process_init.connect(weak=False) def worker_process_init(*args, **kwargs): init_celery_tracing() def init_celery_tracing(): CeleryInstrumentor().instrument() trace.set_tracer_provider(TracerProvider()) if DISABLE_TRACING: span_processor = BatchSpanProcessor(ConsoleSpanExporter()) else: jaeger_exporter = JaegerExporter( collector_endpoint=f'http://{TRACING_COLLECTOR_ENDPOINT}:{TRACING_COLLECTOR_PORT}/api/traces?format=jaeger.thrift', ) span_processor = BatchSpanProcessor(jaeger_exporter) trace.get_tracer_provider().add_span_processor(span_processor) # this stops celery from hijacking the django logging config @signals.setup_logging.connect def setup_celery_logging(**kwargs): pass tasks_app = Celery('web', broker=settings.CELERY_BROKER_URL) tasks_app.conf.task_queues = ( Queue('default2', routing_key='task.#'), ) tasks_app.conf.task_default_queue = 'default2' tasks_app.conf.task_default_exchange_type = 'topic' tasks_app.conf.task_default_routing_key = 'task.default2' tasks_app.conf.result_backend = 'redis://redis:6379/0' tasks_app.conf.update( task_routes=({ 'web2.tasks.itsatest': { 'queue': 'default2', 'routing_key': 'task.default2.itsatest', }, },), imports=( 'web2.tasks', ), worker_prefetch_multiplier=5, task_soft_time_limit=300, task_time_limit=360, worker_max_tasks_per_child=100, broker_transport_options={ 'confirm_publish': True } ) ``` ```py $ cat web2/tasks.py from web2 import tasks_app @tasks_app.task(ignore_result=True) def itsatest(): print('itsatest') return 'itsatest' ``` ```sh poetry run celery --app web2.tasks worker ``` ``` from web2.tasks import * itsatest.delay() ``` **What is the expected behavior?** No errors when executing tasks **What is the actual behavior?** ``` [2023-11-01 20:14:58 +0000] [416] [celery.worker.strategy] [INFO] Task web2.tasks.itsatest[50b55ccd-be08-4f5c-82ee-538b363498ec] received [2023-11-01 20:14:58 +0000] [416] [celery.pool] [DEBUG] TaskPool: Apply <function fast_trace_task at 0xffff8c758a60> (args:('web2.tasks.itsatest', '50b55ccd-be08-4f5c-82ee-538b363498ec', {'lang': 'py', 'task': 'web2.tasks.itsatest', 'id': '50b55ccd-be08-4f5c-82ee-538b363498ec', 'shadow': None, 'eta': None, 'expires': None, 'group': None, 'group_index': None, 'retries': 0, 'timelimit': [None, None], 'root_id': '50b55ccd-be08-4f5c-82ee-538b363498ec', 'parent_id': None, 'argsrepr': '()', 'kwargsrepr': '{}', 'origin': 'gen1251@4676bacd79b5', 'ignore_result': True, 'stamped_headers': None, 'stamps': {}, 'properties': {'content_type': 'application/json', 'content_encoding': 'utf-8', 'application_headers': {'lang': 'py', 'task': 'web2.tasks.itsatest', 'id': '50b55ccd-be08-4f5c-82ee-538b363498ec', 'shadow': None, 'eta': None, 'expires': None, 'group': None, 'group_index': None, 'retries': 0, 'timelimit': [None, None], 'root_id': '50b55ccd-be08-4f5c-82ee-538b363498ec', 'parent_id': None, 'argsrepr': '()', 'kwargsrepr': '{}', 'origin': 'gen1251@4676bacd79b5', 'ignore_result': True, 'stamped_headers': None, 'stamps': {}}, 'delivery_mode':... kwargs:{}) [2023-11-01 20:14:58 +0000] [518] [opentelemetry.instrumentation.celery] [DEBUG] prerun signal start task_id=50b55ccd-be08-4f5c-82ee-538b363498ec itsatest [2023-11-01 20:14:58 +0000] [518] [celery.app.trace] [INFO] Task web2.tasks.itsatest[50b55ccd-be08-4f5c-82ee-538b363498ec] succeeded in 0.013544000015826896s: 'itsatest' [2023-11-01 20:14:58 +0000] [518] [opentelemetry.instrumentation.celery] [DEBUG] postrun signal task_id=50b55ccd-be08-4f5c-82ee-538b363498ec [2023-11-01 20:14:58 +0000] [518] [celery.utils.dispatch.signal] [ERROR] Signal handler <bound method CeleryInstrumentor._trace_postrun of <opentelemetry.instrumentation.celery.CeleryInstrumentor object at 0xffff44692110>> raised: TypeError("'NoneType' object is not subscriptable") Traceback (most recent call last): File "/opt/.venv/web-tq7C0_9c-py3.10/lib/python3.10/site-packages/celery/utils/dispatch/signal.py", line 276, in send response = receiver(signal=self, sender=sender, **named) File "/opt/.venv/web-tq7C0_9c-py3.10/lib/python3.10/site-packages/opentelemetry/instrumentation/celery/__init__.py", line 195, in _trace_postrun self._record_histograms(task_id, labels) File "/opt/.venv/web-tq7C0_9c-py3.10/lib/python3.10/site-packages/opentelemetry/instrumentation/celery/__init__.py", line 309, in _record_histograms self.metrics["flower.task.runtime.seconds"].record( TypeError: 'NoneType' object is not subscriptable ``` **Additional context** It looks like this functionality was introduced fairly recently in #1679 by @shalevr and @Akochavi
Validated reproduction steps: I've been able to reproduce this behavior roughly following the steps above. Take note that python 3.10 is expected. @dacox @josephmcasey ~can you try using `worker_ready` signal instead?~ UPD: `worker_ready` is not the right signal to instrument Celery, unfortunately. I think this is a combination of 2 issues: * double initialization with the default instrumentation setup * the fact that instance level vars are reset on `CeleryInstrumentor.__init__()` See below. It looks like with `worker_process_init` `CeleryInstrumentor.__init__()` is called again and that effectively resets `self.metrics` to `None`. Do you think it is fine as a possible quick workaround for now? A proper one would be on the CeleryInstrumentor, most likely. Note: I even have to check specifically for `otel.instrumentation.celery.CeleryInstrumentor().is_instrumented_by_opentelemetry` not to see warnings on instrumenting twice. Any idea how to overcome this related issue? FYI: A workaround with adding a unique enough `dispatch_uid` to the `worker_process_init.connect()` call worked for my setup. @mikek , apologies for the delay in response, events in my personal life required me to be away from the computer. I'm unable to provide my full implementation, but the error provided above was resolvable by instantiating and controlling the meter provider passed into the instrumentor. ```python # Sets the global default meter provider metrics.set_meter_provider(provider) # Creates a meter from the global meter provider meter = metrics.get_meter("meter.celery") CeleryInstrumentor().instrument(tracer_provider=tracer_provider, meter_provider=meter) ``` I would concur with you assessment that this instrumentor has been done incorrectly and needs to be fixed, but it is good to know there are adequate workarounds. https://github.com/open-telemetry/opentelemetry-python-contrib/blob/7c12ad9844ac179e3f6a493491707a9bafd06f6b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py#L118 https://github.com/open-telemetry/opentelemetry-python-contrib/blob/7c12ad9844ac179e3f6a493491707a9bafd06f6b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py#L143 @mikek @josephmcasey sorry for the delay in response! I'm a little unclear on what the workaround you're describing is 😅 might this be a problem with a similar cause to https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1790? @mikek @josephmcasey If either of you have a moment I'd really appreciate some more information on a workaround for this. My knowledge of OTEL isn't great and I'm struggling to follow the suggestions mentioned above. @dacox @paddyobrien I've tried, but was not able to reproduce this with the original setup in this issue. However, all I had to do in my setup (I am not able to provide it now) was this: ```diff - @worker_process_init.connect(weak=False) + @worker_process_init.connect(weak=False, dispatch_uid=f"init_celery_tracing p:{os.getpid()} t:{threading.get_native_id()}") def init_celery_tracing(*args, **kwargs): ... ``` There might be a better way, of course. Thank you @mikek unfortunately this didn't help prevent the error for me. None of the above solutions worked for me. I was able to get it working by doing: ```python meter_provider = MeterProvider(resource=resource) celery_instrumentor = CeleryInstrumentor() celery_instrumentor.create_celery_metrics(meter_provider.get_meter('meter.celery')) celery_instrumentor.instrument() ``` Not sure if it's a safe approach, but the error is gone. I am on: ``` opentelemetry-api==1.23.0 opentelemetry-sdk==1.23.0 opentelemetry-instrumentation-celery==0.44b0 ``` Let me know if that works for you, @paddyobrien. Validated reproduction steps: I've been able to reproduce this behavior roughly following the steps above. Take note that python 3.10 is expected. @dacox @josephmcasey ~can you try using `worker_ready` signal instead?~ UPD: `worker_ready` is not the right signal to instrument Celery, unfortunately. I think this is a combination of 2 issues: * double initialization with the default instrumentation setup * the fact that instance level vars are reset on `CeleryInstrumentor.__init__()` See below. It looks like with `worker_process_init` `CeleryInstrumentor.__init__()` is called again and that effectively resets `self.metrics` to `None`. Do you think it is fine as a possible quick workaround for now? A proper one would be on the CeleryInstrumentor, most likely. Note: I even have to check specifically for `otel.instrumentation.celery.CeleryInstrumentor().is_instrumented_by_opentelemetry` not to see warnings on instrumenting twice. Any idea how to overcome this related issue? FYI: A workaround with adding a unique enough `dispatch_uid` to the `worker_process_init.connect()` call worked for my setup. @mikek , apologies for the delay in response, events in my personal life required me to be away from the computer. I'm unable to provide my full implementation, but the error provided above was resolvable by instantiating and controlling the meter provider passed into the instrumentor. ```python # Sets the global default meter provider metrics.set_meter_provider(provider) # Creates a meter from the global meter provider meter = metrics.get_meter("meter.celery") CeleryInstrumentor().instrument(tracer_provider=tracer_provider, meter_provider=meter) ``` I would concur with you assessment that this instrumentor has been done incorrectly and needs to be fixed, but it is good to know there are adequate workarounds. https://github.com/open-telemetry/opentelemetry-python-contrib/blob/7c12ad9844ac179e3f6a493491707a9bafd06f6b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py#L118 https://github.com/open-telemetry/opentelemetry-python-contrib/blob/7c12ad9844ac179e3f6a493491707a9bafd06f6b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py#L143 @mikek @josephmcasey sorry for the delay in response! I'm a little unclear on what the workaround you're describing is 😅 might this be a problem with a similar cause to https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1790? @mikek @josephmcasey If either of you have a moment I'd really appreciate some more information on a workaround for this. My knowledge of OTEL isn't great and I'm struggling to follow the suggestions mentioned above. @dacox @paddyobrien I've tried, but was not able to reproduce this with the original setup in this issue. However, all I had to do in my setup (I am not able to provide it now) was this: ```diff - @worker_process_init.connect(weak=False) + @worker_process_init.connect(weak=False, dispatch_uid=f"init_celery_tracing p:{os.getpid()} t:{threading.get_native_id()}") def init_celery_tracing(*args, **kwargs): ... ``` There might be a better way, of course. Thank you @mikek unfortunately this didn't help prevent the error for me. None of the above solutions worked for me. I was able to get it working by doing: ```python meter_provider = MeterProvider(resource=resource) celery_instrumentor = CeleryInstrumentor() celery_instrumentor.create_celery_metrics(meter_provider.get_meter('meter.celery')) celery_instrumentor.instrument() ``` Not sure if it's a safe approach, but the error is gone. I am on: ``` opentelemetry-api==1.23.0 opentelemetry-sdk==1.23.0 opentelemetry-instrumentation-celery==0.44b0 ``` Let me know if that works for you, @paddyobrien.
2024-03-11T19:38:04
open-telemetry/opentelemetry-python-contrib
2,355
open-telemetry__opentelemetry-python-contrib-2355
[ "2150" ]
ada27842bd4596fb3adb7f5e952aafd114b6b78a
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py @@ -245,9 +245,11 @@ def wrapper(wrapped, _, args, kwargs): if method: attributes["elasticsearch.method"] = method if body: - attributes[SpanAttributes.DB_STATEMENT] = sanitize_body( - body - ) + # Don't set db.statement for bulk requests, as it can be very large + if isinstance(body, dict): + attributes[ + SpanAttributes.DB_STATEMENT + ] = sanitize_body(body) if params: attributes["elasticsearch.params"] = str(params) if doc_id:
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py @@ -51,6 +51,8 @@ Article = helpers.Article +# pylint: disable=too-many-public-methods + @mock.patch( "elasticsearch.connection.http_urllib3.Urllib3HttpConnection.perform_request" @@ -486,3 +488,35 @@ def test_body_sanitization(self, _): sanitize_body(json.dumps(sanitization_queries.interval_query)), str(sanitization_queries.interval_query_sanitized), ) + + def test_bulk(self, request_mock): + request_mock.return_value = (1, {}, "") + + es = Elasticsearch() + es.bulk( + [ + { + "_op_type": "index", + "_index": "sw", + "_doc_type": "_doc", + "_id": 1, + "doc": {"name": "adam"}, + }, + { + "_op_type": "index", + "_index": "sw", + "_doc_type": "_doc", + "_id": 1, + "doc": {"name": "adam"}, + }, + ] + ) + + spans_list = self.get_finished_spans() + self.assertEqual(len(spans_list), 1) + span = spans_list[0] + + # Check version and name in span's instrumentation info + self.assertEqualSpanInstrumentationInfo( + span, opentelemetry.instrumentation.elasticsearch + )
fix: elasticsearch `bulk` API produces lists # Description Fixes an issue seen in production where the elasticsearch bulk API would end up sending a list through this code path and not a dictionary. For example: ``` File \"/code/platform_be/core/logic/ingest.py\", line 378, in update_source_doc_version_history bulk(es_client, bulk_es_updates) File \"/opt/python/lib/python3.10/site-packages/elasticsearch/helpers/actions.py\", line 521, in bulk for ok, item in streaming_bulk( File \"/opt/python/lib/python3.10/site-packages/elasticsearch/helpers/actions.py\", line 436, in streaming_bulk for data, (ok, info) in zip( File \"/opt/python/lib/python3.10/site-packages/elasticsearch/helpers/actions.py\", line 339, in _process_bulk_chunk resp = client.bulk(*args, operations=bulk_actions, **kwargs) # type: ignore[arg-type] File \"/opt/python/lib/python3.10/site-packages/elasticsearch/_sync/client/utils.py\", line 414, in wrapped return api(*args, **kwargs) File \"/opt/python/lib/python3.10/site-packages/elasticsearch/_sync/client/__init__.py\", line 704, in bulk return self.perform_request( # type: ignore[return-value] File \"/opt/python/lib/python3.10/site-packages/elasticsearch/_sync/client/_base.py\", line 285, in perform_request meta, resp_body = self.transport.perform_request( File \"/opt/python/lib/python3.10/site-packages/opentelemetry/instrumentation/elasticsearch/__init__.py\", line 242, in wrapper attributes[SpanAttributes.DB_STATEMENT] = sanitize_body( File \"/opt/python/lib/python3.10/site-packages/opentelemetry/instrumentation/elasticsearch/utils.py\", line 58, in sanitize_body flatten_body = _flatten_dict(body) File \"/opt/python/lib/python3.10/site-packages/opentelemetry/instrumentation/elasticsearch/utils.py\", line 31, in _flatten_dict for k, v in d.items(): AttributeError: 'list' object has no attribute 'items'" ``` ## Type of change Please delete options that are not relevant. - [x] Bug fix (non-breaking change which fixes an issue) # How Has This Been Tested? Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration - [x] Unit tests # Does This PR Require a Core Repo Change? - [x] No. # Checklist: See [contributing.md](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/CONTRIBUTING.md) for styleguide, changelog guidelines, and more. - [ ] Followed the style guidelines of this project - [ ] Changelogs have been updated - [ ] Unit tests have been added - [ ] Documentation has been updated
Hi @lzchen / @ocelotl / @shalevr -- Is there anything I can do to help move this forward? This is preventing us from using ES instrumentation in our service. Thanks! Hello! I maintain the elasticsearch and elastic-transport packages at Elastic (and am actually working on native instrumentation - see https://github.com/elastic/elasticsearch-py/issues/2435). That said, I'm new to OpenTelemetry and I've never contributed to opentelemtry-python-contrib. While most Elasticsearch APIs accept dictionaries, indeed the [bulk API](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html) accepts a list of dictionaries, as you found out. However, the [Semantic Conventions for Elasticsearch](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/) define `db.statement` as follows: > The request body for a [search-type query](https://www.elastic.co/guide/en/elasticsearch/reference/current/search.html), as a json string. This makes sense. The bulk API can be used to send arbitrarily large amount of data, and you would not want that to be part of your span attributes. I believe the correct fix here is to only set `db.statement` when one of those APIs is used: Search, Async search, Multi search, EQL search, ES|QL query, Terms enum, Search template, Multi search template or Render search template. This isn't very easy to do in opentelemetry-python-contrib because the client does not pass the name of the endpoint to the transport (yet, I'm working on it). Which means it has to be guessed from the URL. I guess an easy way out here could also be to *not* set `db.statement` for bulk requests, which would solve your immediate issue, but would still not be compliant. > I guess an easy way out here could also be to _not_ set `db.statement` for bulk requests, which would solve your immediate issue, but would still not be compliant. Thanks for the context @pquentin -- I assume that the path forward for us would be to either wait for your native instrumentation or to do a stopgap fix here. From what you described it sounds like the stopgap is to just not generate any data if `db.statement` happens to be a list. This feels a bit like a heuristic, but if you think it's OK stopgap I'm happy to implement this. Native instrumentation will only be available with a future version of elasticsearch-py, so I think you want to keep the existing instrumentation for older versions? (Though it would be nice to automatically disable for newer versions maybe? But it would be a breaking change since the native implementation follows the semantic convention which probably did not exist when this repo was started.) Regarding the fix, the proper fix is definitely inspecting the URL to figure out if the request is a search request, but I also offered the stopgap as way to fix the most pressing issue with less effort. It's really up to you to decide what you implement! (But if you do ping me in the pull request, I'll be happy to take a look.) Made this change much simpler and ran tests with `CORE_REPO_SHA=a1253585f66d63e7c05a19f070f3bfe0ab6460c1 tox3 -e py311-test-instrumentation-elasticsearch-6` -- @pquentin or @xrmx if you have ideas on how to test this I'm all ears, or is this good enough? Ok, I'll look into adding a simple test
2024-03-19T13:40:51
open-telemetry/opentelemetry-python-contrib
2,372
open-telemetry__opentelemetry-python-contrib-2372
[ "2371" ]
99678ccd3a6ab5afe2661d00a27a6cdf3391c145
diff --git a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py --- a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py @@ -340,17 +340,17 @@ def _instrumented_lambda_handler_call( # noqa pylint: disable=too-many-branches if span.is_recording(): lambda_context = args[1] # NOTE: The specs mention an exception here, allowing the - # `ResourceAttributes.FAAS_ID` attribute to be set as a span + # `SpanAttributes.CLOUD_RESOURCE_ID` attribute to be set as a span # attribute instead of a resource attribute. # # See more: - # https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/faas.md#example + # https://github.com/open-telemetry/semantic-conventions/blob/main/docs/faas/aws-lambda.md#resource-detector span.set_attribute( - ResourceAttributes.FAAS_ID, + SpanAttributes.CLOUD_RESOURCE_ID, lambda_context.invoked_function_arn, ) span.set_attribute( - SpanAttributes.FAAS_EXECUTION, + SpanAttributes.FAAS_INVOCATION_ID, lambda_context.aws_request_id, )
diff --git a/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py b/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py --- a/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py +++ b/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py @@ -145,8 +145,8 @@ def test_active_tracing(self): self.assertSpanHasAttributes( span, { - ResourceAttributes.FAAS_ID: MOCK_LAMBDA_CONTEXT.invoked_function_arn, - SpanAttributes.FAAS_EXECUTION: MOCK_LAMBDA_CONTEXT.aws_request_id, + SpanAttributes.CLOUD_RESOURCE_ID: MOCK_LAMBDA_CONTEXT.invoked_function_arn, + SpanAttributes.FAAS_INVOCATION_ID: MOCK_LAMBDA_CONTEXT.aws_request_id, ResourceAttributes.CLOUD_ACCOUNT_ID: MOCK_LAMBDA_CONTEXT.invoked_function_arn.split( ":" )[
Change AwsLambdaInstrumentor span attribute settings to match faas semconv **Is your feature request related to a problem?** No. **Describe the solution you'd like** Python AwsLambdaInstrumentor attribute settings are not consistent with the [latest faas semconv](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/faas/aws-lambda.md#all-triggers). Specifically, I think these instrumentor changes are needed: 1. `ResourceAttributes.FAAS_ID` --> `ResourceAttributes.CLOUD_RESOURCE_ID` 2. `SpanAttributes.FAAS_EXECUTION` --> `SpanAttributes.FAAS_INVOCATION_ID` For this solution, change [these set_attribute calls](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/37aba928d45713842941c7efc992726a79ea7d8a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py#L342-L355). Make special note in the release notes that this is breaking. This is not additive like #2368. **Describe alternatives you've considered** I've thought about and brought up 2 other ideas on Slack: Idea (A) The Python AwsLambdaInstrumentor upgrades to a newer semconv version. Would it need to be an opt-in for users like for http instrumentation? (i.e. [this recent issue](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/2351)) Would this need to be presented to an Otel Spec project group? (i.e. [this issue](https://github.com/open-telemetry/opentelemetry-specification/issues/3042)) Idea (B) The Python AwsLambdaInstrumentor gets request/response hooks implemented like in NodeJS. This would allow users to add any attributes they like. I see this was [requested](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1140), [discussed in Otel Spec](https://github.com/open-telemetry/opentelemetry-specification/issues/3042#issuecomment-1384964399), [PR'd](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1476) (rejected), discussed at [faas project sig](https://docs.google.com/document/d/187XYoQcXQ9JxS_5v2wvZ0NEysaJ02xoOYNXj08pT0zc/edit#heading=h.w0bc5a8eac74), alternatively [PR'd as a SpanProcessor requirement](https://github.com/open-telemetry/opentelemetry-specification/pull/3189) (rejected), and brought up again in [Slack](https://cloud-native.slack.com/archives/C04HVBETC9Z/p1684317968674529). I haven't found any other updates since May 2023. In [this otel-faas thread](https://cloud-native.slack.com/archives/C04HVBETC9Z/p1711387797268909), it was mentioned that the faas attributes are not stable so a breaking change would be ok. Therefore we could implement the quick, in-place change. **Additional context** 1. https://cloud-native.slack.com/archives/C01PD4HUVBL/p1711140906040599 2. https://cloud-native.slack.com/archives/C04HVBETC9Z/p1711387797268909
2024-03-26T18:00:51
open-telemetry/opentelemetry-python-contrib
2,397
open-telemetry__opentelemetry-python-contrib-2397
[ "1432" ]
2317adcc3478fe0d886d22b05e51d71e4daabfbd
diff --git a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py --- a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py @@ -77,6 +77,15 @@ def consume_hook(span: Span, body: bytes, properties: BasicProperties): PikaInstrumentor.instrument_channel(channel, publish_hook=publish_hook, consume_hook=consume_hook) +Consumer Instrumentation +------------------------ +For consumer instrumentation, pika supports two consuming modes: + +* Consumers using the `basic_consume` method which accepts a callback. This is supported for global instrumentation + (`PikaInstrumentor().instrument()`) as well channel specific instrumentation (`PikaInstrumentor().instrument_channel(channel)`) +* Consumers using the `consume` method which returns a generator over messages. This is supported for global + instrumentations only (`PikaInstrumentor().instrument()`) + API --- """ diff --git a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/pika_instrumentor.py b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/pika_instrumentor.py --- a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/pika_instrumentor.py +++ b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/pika_instrumentor.py @@ -20,7 +20,10 @@ import wrapt from packaging import version from pika.adapters import BlockingConnection -from pika.adapters.blocking_connection import BlockingChannel +from pika.adapters.blocking_connection import ( + BlockingChannel, + _QueueConsumerGeneratorInfo, +) from opentelemetry import trace from opentelemetry.instrumentation.instrumentor import BaseInstrumentor @@ -191,6 +194,24 @@ def wrapper(wrapped, instance, args, kwargs): wrapt.wrap_function_wrapper(channel, "basic_consume", wrapper) + @staticmethod + def _decorate_queue_consumer_generator( + tracer_provider: Optional[TracerProvider], + consume_hook: utils.HookT = utils.dummy_callback, + ) -> None: + tracer = trace.get_tracer(__name__, __version__, tracer_provider) + + def wrapper(wrapped, instance, args, kwargs): + res = wrapped(*args, **kwargs) + instance.pending_events = utils.ReadyMessagesDequeProxy( + instance.pending_events, instance, tracer, consume_hook + ) + return res + + wrapt.wrap_function_wrapper( + _QueueConsumerGeneratorInfo, "__init__", wrapper + ) + def _instrument(self, **kwargs: Dict[str, Any]) -> None: tracer_provider: TracerProvider = kwargs.get("tracer_provider", None) publish_hook: utils.HookT = kwargs.get( @@ -207,10 +228,15 @@ def _instrument(self, **kwargs: Dict[str, Any]) -> None: consume_hook=consume_hook, ) + self._decorate_queue_consumer_generator( + tracer_provider, consume_hook=consume_hook + ) + def _uninstrument(self, **kwargs: Dict[str, Any]) -> None: if hasattr(self, "__opentelemetry_tracer_provider"): delattr(self, "__opentelemetry_tracer_provider") unwrap(BlockingConnection, "channel") + unwrap(_QueueConsumerGeneratorInfo, "__init__") def instrumentation_dependencies(self) -> Collection[str]: return _instruments diff --git a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/utils.py b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/utils.py --- a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/utils.py +++ b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/utils.py @@ -1,8 +1,13 @@ from logging import getLogger from typing import Any, Callable, List, Optional +from pika.adapters.blocking_connection import ( + _ConsumerDeliveryEvt, + _QueueConsumerGeneratorInfo, +) from pika.channel import Channel from pika.spec import Basic, BasicProperties +from wrapt import ObjectProxy from opentelemetry import context, propagate, trace from opentelemetry.instrumentation.utils import is_instrumentation_enabled @@ -128,7 +133,7 @@ def decorated_function( def _get_span( tracer: Tracer, - channel: Channel, + channel: Optional[Channel], properties: BasicProperties, task_name: str, destination: str, @@ -157,7 +162,7 @@ def _generate_span_name( def _enrich_span( span: Span, - channel: Channel, + channel: Optional[Channel], properties: BasicProperties, task_destination: str, operation: Optional[MessagingOperationValues] = None, @@ -176,6 +181,8 @@ def _enrich_span( span.set_attribute( SpanAttributes.MESSAGING_CONVERSATION_ID, properties.correlation_id ) + if not channel: + return if not hasattr(channel.connection, "params"): span.set_attribute( SpanAttributes.NET_PEER_NAME, channel.connection._impl.params.host @@ -190,3 +197,75 @@ def _enrich_span( span.set_attribute( SpanAttributes.NET_PEER_PORT, channel.connection.params.port ) + + +# pylint:disable=abstract-method +class ReadyMessagesDequeProxy(ObjectProxy): + def __init__( + self, + wrapped, + queue_consumer_generator: _QueueConsumerGeneratorInfo, + tracer: Optional[Tracer], + consume_hook: HookT = dummy_callback, + ): + super().__init__(wrapped) + self._self_active_token = None + self._self_tracer = tracer + self._self_consume_hook = consume_hook + self._self_queue_consumer_generator = queue_consumer_generator + + def popleft(self, *args, **kwargs): + try: + # end active context if exists + if self._self_active_token: + context.detach(self._self_active_token) + except Exception as inst_exception: # pylint: disable=W0703 + _LOG.exception(inst_exception) + + evt = self.__wrapped__.popleft(*args, **kwargs) + + try: + # If a new message was received, create a span and set as active context + if isinstance(evt, _ConsumerDeliveryEvt): + method = evt.method + properties = evt.properties + if not properties: + properties = BasicProperties(headers={}) + if properties.headers is None: + properties.headers = {} + ctx = propagate.extract( + properties.headers, getter=_pika_getter + ) + if not ctx: + ctx = context.get_current() + message_ctx_token = context.attach(ctx) + span = _get_span( + self._self_tracer, + None, + properties, + destination=method.exchange + if method.exchange + else method.routing_key, + span_kind=SpanKind.CONSUMER, + task_name=self._self_queue_consumer_generator.consumer_tag, + operation=MessagingOperationValues.RECEIVE, + ) + try: + context.detach(message_ctx_token) + self._self_active_token = context.attach( + trace.set_span_in_context(span) + ) + self._self_consume_hook(span, evt.body, properties) + except Exception as hook_exception: # pylint: disable=W0703 + _LOG.exception(hook_exception) + finally: + # We must end the span here, because the next place we can hook + # is not the end of the user code, but only when the next message + # arrives. we still set this span's context as the active context + # so spans created by user code that handles this message will be + # children of this one. + span.end() + except Exception as inst_exception: # pylint: disable=W0703 + _LOG.exception(inst_exception) + + return evt
diff --git a/instrumentation/opentelemetry-instrumentation-pika/tests/test_pika_instrumentation.py b/instrumentation/opentelemetry-instrumentation-pika/tests/test_pika_instrumentation.py --- a/instrumentation/opentelemetry-instrumentation-pika/tests/test_pika_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-pika/tests/test_pika_instrumentation.py @@ -14,6 +14,7 @@ from unittest import TestCase, mock from pika.adapters import BlockingConnection +from pika.adapters.blocking_connection import _QueueConsumerGeneratorInfo from pika.channel import Channel from wrapt import BoundFunctionWrapper @@ -21,7 +22,10 @@ from opentelemetry.instrumentation.pika.pika_instrumentor import ( _consumer_callback_attribute_name, ) -from opentelemetry.instrumentation.pika.utils import dummy_callback +from opentelemetry.instrumentation.pika.utils import ( + ReadyMessagesDequeProxy, + dummy_callback, +) from opentelemetry.trace import Tracer @@ -40,13 +44,23 @@ def test_instrument_api(self) -> None: self.assertTrue( isinstance(BlockingConnection.channel, BoundFunctionWrapper) ) + self.assertTrue( + isinstance( + _QueueConsumerGeneratorInfo.__init__, BoundFunctionWrapper + ) + ) assert hasattr( instrumentation, "__opentelemetry_tracer_provider" ), "Tracer not stored for the object!" - instrumentation.uninstrument(channel=self.channel) + instrumentation.uninstrument() self.assertFalse( isinstance(BlockingConnection.channel, BoundFunctionWrapper) ) + self.assertFalse( + isinstance( + _QueueConsumerGeneratorInfo.__init__, BoundFunctionWrapper + ) + ) @mock.patch( "opentelemetry.instrumentation.pika.PikaInstrumentor._instrument_channel_functions" @@ -57,7 +71,7 @@ def test_instrument_api(self) -> None: @mock.patch( "opentelemetry.instrumentation.pika.PikaInstrumentor._instrument_blocking_channel_consumers" ) - def test_instrument( + def test_instrument_channel( self, instrument_blocking_channel_consumers: mock.MagicMock, instrument_basic_consume: mock.MagicMock, @@ -110,6 +124,23 @@ def test_instrument_basic_publish( self.channel.basic_publish, decorate_basic_publish.return_value ) + def test_instrument_queue_consumer_generator(self) -> None: + instrumentation = PikaInstrumentor() + instrumentation.instrument() + generator_info = _QueueConsumerGeneratorInfo( + params=("queue", False, False), consumer_tag="tag" + ) + self.assertTrue( + isinstance(generator_info.pending_events, ReadyMessagesDequeProxy) + ) + instrumentation.uninstrument() + generator_info = _QueueConsumerGeneratorInfo( + params=("queue", False, False), consumer_tag="tag" + ) + self.assertFalse( + isinstance(generator_info.pending_events, ReadyMessagesDequeProxy) + ) + def test_uninstrument_channel_functions(self) -> None: original_function = self.channel.basic_publish self.channel.basic_publish = mock.MagicMock() diff --git a/instrumentation/opentelemetry-instrumentation-pika/tests/test_utils.py b/instrumentation/opentelemetry-instrumentation-pika/tests/test_utils.py --- a/instrumentation/opentelemetry-instrumentation-pika/tests/test_utils.py +++ b/instrumentation/opentelemetry-instrumentation-pika/tests/test_utils.py @@ -11,8 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import collections from unittest import TestCase, mock +from pika.adapters.blocking_connection import ( + _ConsumerCancellationEvt, + _ConsumerDeliveryEvt, + _QueueConsumerGeneratorInfo, +) from pika.channel import Channel from pika.spec import Basic, BasicProperties @@ -448,3 +454,113 @@ def test_decorate_basic_publish_when_span_is_not_recording( exchange_name, routing_key, mock_body, properties, False ) self.assertEqual(retval, callback.return_value) + + # pylint: disable=too-many-statements + @mock.patch("opentelemetry.instrumentation.pika.utils._get_span") + @mock.patch("opentelemetry.propagate.extract") + @mock.patch("opentelemetry.context.detach") + @mock.patch("opentelemetry.context.attach") + @mock.patch("opentelemetry.context.get_current") + def test_decorate_deque_proxy( + self, + context_get_current: mock.MagicMock, + context_attach: mock.MagicMock, + context_detach: mock.MagicMock, + extract: mock.MagicMock, + get_span: mock.MagicMock, + ) -> None: + returned_span = mock.MagicMock() + get_span.return_value = returned_span + consume_hook = mock.MagicMock() + tracer = mock.MagicMock() + generator_info = mock.MagicMock( + spec=_QueueConsumerGeneratorInfo, + pending_events=mock.MagicMock(spec=collections.deque), + consumer_tag="mock_task_name", + ) + method = mock.MagicMock(spec=Basic.Deliver) + method.exchange = "test_exchange" + properties = mock.MagicMock() + evt = _ConsumerDeliveryEvt(method, properties, b"mock_body") + generator_info.pending_events.popleft.return_value = evt + proxy = utils.ReadyMessagesDequeProxy( + generator_info.pending_events, generator_info, tracer, consume_hook + ) + + # First call (no detach cleanup) + res = proxy.popleft() + self.assertEqual(res, evt) + generator_info.pending_events.popleft.assert_called_once() + extract.assert_called_once_with( + properties.headers, getter=utils._pika_getter + ) + context_get_current.assert_called_once() + self.assertEqual(context_attach.call_count, 2) + self.assertEqual(context_detach.call_count, 1) + get_span.assert_called_once_with( + tracer, + None, + properties, + destination=method.exchange, + span_kind=SpanKind.CONSUMER, + task_name=generator_info.consumer_tag, + operation=MessagingOperationValues.RECEIVE, + ) + consume_hook.assert_called_once() + returned_span.end.assert_called_once() + + generator_info.pending_events.reset_mock() + extract.reset_mock() + context_get_current.reset_mock() + get_span.reset_mock() + context_attach.reset_mock() + context_detach.reset_mock() + returned_span.end.reset_mock() + consume_hook.reset_mock() + + # Second call (has detach cleanup) + res = proxy.popleft() + self.assertEqual(res, evt) + generator_info.pending_events.popleft.assert_called_once() + extract.assert_called_once_with( + properties.headers, getter=utils._pika_getter + ) + context_get_current.assert_called_once() + self.assertEqual(context_attach.call_count, 2) + self.assertEqual(context_detach.call_count, 2) + get_span.assert_called_once_with( + tracer, + None, + properties, + destination=method.exchange, + span_kind=SpanKind.CONSUMER, + task_name=generator_info.consumer_tag, + operation=MessagingOperationValues.RECEIVE, + ) + consume_hook.assert_called_once() + returned_span.end.assert_called_once() + generator_info.pending_events.reset_mock() + + extract.reset_mock() + context_get_current.reset_mock() + get_span.reset_mock() + context_attach.reset_mock() + context_detach.reset_mock() + returned_span.end.reset_mock() + consume_hook.reset_mock() + + # Third call (cancellation event) + evt = _ConsumerCancellationEvt("") + generator_info.pending_events.popleft.return_value = evt + + res = proxy.popleft() + + self.assertEqual(res, evt) + generator_info.pending_events.popleft.assert_called_once() + extract.assert_not_called() + context_get_current.not_called() + context_detach.assert_called_once() + context_attach.assert_not_called() + get_span.assert_not_called() + consume_hook.assert_not_called() + returned_span.end.assert_not_called()
trace_id and span_id are 0 with channel.consume() generator function **Describe your environment** ``` localhost:~ # python3 --version Python 3.6.15 localhost:~ # pip3 freeze | grep opentelemetry opentelemetry-api==1.12.0 opentelemetry-instrumentation==0.33b0 opentelemetry-instrumentation-logging==0.33b0 opentelemetry-instrumentation-pika==0.33b0 opentelemetry-sdk==1.12.0 opentelemetry-semantic-conventions==0.33b0 ``` **Steps to reproduce** ``` """Sample RabbiMQ consumer application demonstrating tracing capabilities of using opentelemetry instrumentation libraries.""" import argparse import logging import pika import subprocess import sys from opentelemetry import trace from opentelemetry.instrumentation.logging import LoggingInstrumentor from opentelemetry.instrumentation.pika import PikaInstrumentor from opentelemetry.sdk.trace import TracerProvider LoggingInstrumentor().instrument(set_logging_format=False) logging.basicConfig(filename='consumer.log', filemode='w', format='%(asctime)s - %(levelname)s - %(threadName)s - %(filename)s:%(lineno)s - %(funcName)s - [trace_id=%(otelTraceID)s span_id=%(otelSpanID)s resource.service.name=%(otelServiceName)s] - %(message)s', datefmt='%H:%M:%S', level=logging.INFO) trace.set_tracer_provider(TracerProvider()) logger = logging.getLogger("Consumer_App") # Callback function which is called on incoming messages def callback(ch, method, properties, body): logger.info(" [x] Received Message: " + str(body.decode('utf-8'))) logger.info(" [x] Header " + str(properties.headers)) logger.info(" Exiting the process") sys.exit(0) def main(): logger.info("[START]: Consumer Application") parser = argparse.ArgumentParser() parser.add_argument("queue", help="queue name", type=str, default="testqueue") args = parser.parse_args() # Connect to rabbitmq. url = 'amqp://admin:password@localhost:31302' params = pika.URLParameters(url) connection = pika.BlockingConnection(params) mychannel = connection.channel() # start a channel pika_instrumentation = PikaInstrumentor() pika_instrumentation.instrument_channel(channel=mychannel) mychannel.queue_declare(queue=args.queue, ) # Declare a queue mychannel.basic_publish("", args.queue, "This is a test message published to RabbitMQ") # Below code with basic_consume and callback function - it does have trace id and span ids for log statements in callback function. # set up subscription on the queue # mychannel.basic_consume(args.queue, callback, auto_ack=True) # # start consuming (blocks) # mychannel.start_consuming() # I'm using channel.consume method to get the messages from the queue. And expecting the logs should have trace ids and span ids. for method, properties, body in mychannel.consume(queue='testqueue', auto_ack=True, inactivity_timeout=10): logger.info(" [x] Received Message: " + str(body.decode('utf-8'))) logger.info(" [x] Header " + str(properties.headers)) logger.info(" Exiting the process") sys.exit(0) connection.close() if __name__ == '__main__': main() ``` **What is the expected behavior?** I'm using channel.consume method to get the messages from the queue. And expecting the logs should have trace ids and span ids. If you see below, trace_id and span_id is available if I use basic_consume with callback. ``` 02:49:21 - INFO - MainThread - TracingConsumer.py:27 - callback - [trace_id=0f0e9af13cd7618bddf6ecaf8bc8663a span_id=32921be241a75b82 resource.service.name=] - [x] Received Message: Hi_from_publisher 02:49:21 - INFO - MainThread - TracingConsumer.py:28 - callback - [trace_id=0f0e9af13cd7618bddf6ecaf8bc8663a span_id=32921be241a75b82 resource.service.name=] - [x] Header {'traceparent': '00-0f0e9af13cd7618bddf6ecaf8bc8663a-27c540a6ee6fe32f-01'} 02:49:21 - INFO - MainThread - TracingConsumer.py:29 - callback - [trace_id=0f0e9af13cd7618bddf6ecaf8bc8663a span_id=32921be241a75b82 resource.service.name=] - Exiting the process ``` **What is the actual behavior?** Actual behavior with channel.consume(...) where trace_id and span_id are 0. Although we can clearly see that the message header contains "traceparent" id. ``` 15:58:43 - INFO - MainThread - TracingConsumerTest.py:60 - main - [trace_id=0 span_id=0 resource.service.name=] - [x] Received Message: This is a test message published to RabbitMQ 15:58:43 - INFO - MainThread - TracingConsumerTest.py:61 - main - [trace_id=0 span_id=0 resource.service.name=] - [x] Header {'traceparent': '00-9f016203a363df635b9f3d3c76775c92-d5afde2a271cfe41-01'} 15:58:43 - INFO - MainThread - TracingConsumerTest.py:62 - main - [trace_id=0 span_id=0 resource.service.name=] - Exiting the process ``` **Additional context** None
Hi all, i am facing the same issue and would like to take a stab at fixing it. From my quick look it seems like a consumer group is only attached when `next` is called on the generator and not when the actual `consume` function is called. That's why I think i want to add a hook on the first call to the `next` method to instrument the consumer. let me know if that makes sense
2024-04-08T11:47:52
open-telemetry/opentelemetry-python-contrib
2,418
open-telemetry__opentelemetry-python-contrib-2418
[ "2399" ]
5375acf534f70f827d108ce2f2e7b8d728b2fc8e
diff --git a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py --- a/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py @@ -365,6 +365,7 @@ def _instrumented_lambda_handler_call( # noqa pylint: disable=too-many-branches ) exception = None + result = None try: result = call_wrapped(*args, **kwargs) except Exception as exc: # pylint: disable=W0703
diff --git a/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py b/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py --- a/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py +++ b/instrumentation/opentelemetry-instrumentation-aws-lambda/tests/test_aws_lambda_instrumentation_manual.py @@ -436,6 +436,31 @@ def test_lambda_handles_handler_exception(self): exc_env_patch.stop() + def test_lambda_handles_handler_exception_with_api_gateway_proxy_event( + self, + ): + exc_env_patch = mock.patch.dict( + "os.environ", + {_HANDLER: "tests.mocks.lambda_function.handler_exc"}, + ) + exc_env_patch.start() + AwsLambdaInstrumentor().instrument() + # instrumentor re-raises the exception + with self.assertRaises(Exception): + mock_execute_lambda( + {"requestContext": {"http": {"method": "GET"}}} + ) + + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.status.status_code, StatusCode.ERROR) + self.assertEqual(len(span.events), 1) + event = span.events[0] + self.assertEqual(event.name, "exception") + + exc_env_patch.stop() + def test_uninstrument(self): AwsLambdaInstrumentor().instrument()
AWS Lambda/APIGW unhandled exception causes UnboundLocalError **Describe your environment** AWS Lambda, python3.12, using [opentelemetry-lambda layer-python 0.5](https://github.com/open-telemetry/opentelemetry-lambda/releases/tag/layer-python%2F0.5.0) which includes opentelemetry-python 1.23.0 and opentelemetry-python-contrib 0.44b0 **Steps to reproduce** A lambda function invoked with APIGW, with auto-instrumentation, after an unhandled exception causes: ``` [ERROR] UnboundLocalError: cannot access local variable 'result' where it is not associated with a value Traceback (most recent call last): File "/opt/python/wrapt/wrappers.py", line 598, in __call__ return self._self_wrapper(self.__wrapped__, self._self_instance, File "/opt/python/opentelemetry/instrumentation/aws_lambda/__init__.py", line 378, in _instrumented_lambda_handler_call if isinstance(result, dict) and result.get("statusCode"): ^^^^^^ ``` This can be reproduced with this code: ```python import json import random import logging from opentelemetry import trace tracer = trace.get_tracer_provider().get_tracer(__name__) logger = logging.getLogger() def lambda_handler(event, context): if random.random() < 0.1: # 10% chance to raise an error raise Exception('Injected error') return { "statusCode": 200, "body": json.dumps({ "message": "hello world", }), } ``` **What is the expected behavior?** The original exception is propagated (and not the `UnboundLocalError`) **What is the actual behavior?** The original exception is not propagated but this exception is raised instead: ``` [ERROR] UnboundLocalError: cannot access local variable 'result' where it is not associated with a value Traceback (most recent call last): File "/opt/python/wrapt/wrappers.py", line 598, in __call__ return self._self_wrapper(self.__wrapped__, self._self_instance, File "/opt/python/opentelemetry/instrumentation/aws_lambda/__init__.py", line 378, in _instrumented_lambda_handler_call if isinstance(result, dict) and result.get("statusCode"): ^^^^^^ ``` **Additional context** It seems that it would be enough to declare `result` at the beginning of this [block of code](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/fdcbbddb6c753e5e9d494ba399a5b4bcab4afc3f/instrumentation/opentelemetry-instrumentation-aws-lambda/src/opentelemetry/instrumentation/aws_lambda/__init__.py#L367-L392): ```python result = None # Assign a default value to result exception = None try: result = call_wrapped(*args, **kwargs) except Exception as exc: # pylint: disable=W0703 exception = exc span.set_status(Status(StatusCode.ERROR)) span.record_exception(exception) if isinstance(lambda_event, dict) and lambda_event.get( "requestContext" ): span.set_attribute(SpanAttributes.FAAS_TRIGGER, "http") if lambda_event.get("version") == "2.0": _set_api_gateway_v2_proxy_attributes(lambda_event, span) else: _set_api_gateway_v1_proxy_attributes(lambda_event, span) if isinstance(result, dict) and result.get("statusCode"): span.set_attribute( SpanAttributes.HTTP_STATUS_CODE, result.get("statusCode"), ) ```
2024-04-14T01:01:58
open-telemetry/opentelemetry-python-contrib
2,436
open-telemetry__opentelemetry-python-contrib-2436
[ "2428" ]
a0c3211c4fd3326279740480c0c1938a17a125c8
diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py new file mode 100644 --- /dev/null +++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=import-error + +from .processor import BaggageSpanProcessor +from .version import __version__ + +__all__ = ["BaggageSpanProcessor", "__version__"] diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py new file mode 100644 --- /dev/null +++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py @@ -0,0 +1,55 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +from opentelemetry.baggage import get_all as get_all_baggage +from opentelemetry.context import Context +from opentelemetry.sdk.trace.export import SpanProcessor +from opentelemetry.trace import Span + + +class BaggageSpanProcessor(SpanProcessor): + """ + The BaggageSpanProcessor reads entries stored in Baggage + from the parent context and adds the baggage entries' keys and + values to the span as attributes on span start. + + Add this span processor to a tracer provider. + + Keys and values added to Baggage will appear on subsequent child + spans for a trace within this service *and* be propagated to external + services in accordance with any configured propagation formats + configured. If the external services also have a Baggage span + processor, the keys and values will appear in those child spans as + well. + + ⚠ Warning ⚠️ + + Do not put sensitive information in Baggage. + + To repeat: a consequence of adding data to Baggage is that the keys and + values will appear in all outgoing HTTP headers from the application. + + """ + + def __init__(self) -> None: + pass + + def on_start( + self, span: "Span", parent_context: Optional[Context] = None + ) -> None: + baggage = get_all_baggage(parent_context) + for key, value in baggage.items(): + span.set_attribute(key, value) diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/version.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/version.py new file mode 100644 --- /dev/null +++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/version.py @@ -0,0 +1,15 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.46b0.dev"
diff --git a/processor/opentelemetry-processor-baggage/tests/__init__.py b/processor/opentelemetry-processor-baggage/tests/__init__.py new file mode 100644 --- /dev/null +++ b/processor/opentelemetry-processor-baggage/tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/processor/opentelemetry-processor-baggage/tests/test_baggage_processor.py b/processor/opentelemetry-processor-baggage/tests/test_baggage_processor.py new file mode 100644 --- /dev/null +++ b/processor/opentelemetry-processor-baggage/tests/test_baggage_processor.py @@ -0,0 +1,89 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from opentelemetry.baggage import get_all as get_all_baggage +from opentelemetry.baggage import set_baggage +from opentelemetry.context import attach, detach +from opentelemetry.processor.baggage import BaggageSpanProcessor +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import SpanProcessor +from opentelemetry.trace import Span, Tracer + + +class BaggageSpanProcessorTest(unittest.TestCase): + def test_check_the_baggage(self): + self.assertIsInstance(BaggageSpanProcessor(), SpanProcessor) + + def test_set_baggage_attaches_to_child_spans_and_detaches_properly_with_context( + self, + ): + tracer_provider = TracerProvider() + tracer_provider.add_span_processor(BaggageSpanProcessor()) + + # tracer has no baggage to start + tracer = tracer_provider.get_tracer("my-tracer") + self.assertIsInstance(tracer, Tracer) + self.assertEqual(get_all_baggage(), {}) + # set baggage in context + ctx = set_baggage("queen", "bee") + with tracer.start_as_current_span( + name="bumble", context=ctx + ) as bumble_span: + # span should have baggage key-value pair in context + self.assertEqual(get_all_baggage(ctx), {"queen": "bee"}) + # span should have baggage key-value pair in attribute + self.assertEqual(bumble_span._attributes["queen"], "bee") + with tracer.start_as_current_span( + name="child_span", context=ctx + ) as child_span: + self.assertIsInstance(child_span, Span) + # child span should have baggage key-value pair in context + self.assertEqual(get_all_baggage(ctx), {"queen": "bee"}) + # child span should have baggage key-value pair in attribute + self.assertEqual(child_span._attributes["queen"], "bee") + + def test_set_baggage_attaches_to_child_spans_and_detaches_properly_with_token( + self, + ): + tracer_provider = TracerProvider() + tracer_provider.add_span_processor(BaggageSpanProcessor()) + + # tracer has no baggage to start + tracer = tracer_provider.get_tracer("my-tracer") + self.assertIsInstance(tracer, Tracer) + self.assertEqual(get_all_baggage(), {}) + # create a context token and set baggage + honey_token = attach(set_baggage("bumble", "bee")) + self.assertEqual(get_all_baggage(), {"bumble": "bee"}) + # in a new span, ensure the baggage is there + with tracer.start_as_current_span("parent") as span: + self.assertEqual(get_all_baggage(), {"bumble": "bee"}) + self.assertEqual(span._attributes["bumble"], "bee") + # create a second context token and set more baggage + moar_token = attach(set_baggage("moar", "bee")) + self.assertEqual( + get_all_baggage(), {"bumble": "bee", "moar": "bee"} + ) + # in a child span, ensure all baggage is there as attributes + with tracer.start_as_current_span("child") as child_span: + self.assertEqual( + get_all_baggage(), {"bumble": "bee", "moar": "bee"} + ) + self.assertEqual(child_span._attributes["bumble"], "bee") + self.assertEqual(child_span._attributes["moar"], "bee") + detach(moar_token) + detach(honey_token) + self.assertEqual(get_all_baggage(), {})
Request for new component: Baggage Span Processor **Is your feature request related to a problem?** The Honeycomb distro currently provides a Span processor that takes items from the baggage and adds those items as attributes onto spans. **Describe the solution you'd like** We'd like to donate this code upstream if there's an interest from the community, the code is pretty minimal: https://github.com/honeycombio/honeycomb-opentelemetry-python/blob/main/src/honeycomb/opentelemetry/baggage.py **Describe alternatives you've considered** Which alternative solutions or features have you considered? **Additional context** Add any other context about the feature request here.
2024-04-17T20:02:28
open-telemetry/opentelemetry-python-contrib
2,461
open-telemetry__opentelemetry-python-contrib-2461
[ "2460" ]
0a231e57f9722e6101194c6b38695addf23ab950
diff --git a/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/__init__.py b/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/__init__.py --- a/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-asyncio/src/opentelemetry/instrumentation/asyncio/__init__.py @@ -116,21 +116,11 @@ class AsyncioInstrumentor(BaseInstrumentor): "run_coroutine_threadsafe", ] - def __init__(self): - super().__init__() - self.process_duration_histogram = None - self.process_created_counter = None - - self._tracer = None - self._meter = None - self._coros_name_to_trace: set = set() - self._to_thread_name_to_trace: set = set() - self._future_active_enabled: bool = False - def instrumentation_dependencies(self) -> Collection[str]: return _instruments def _instrument(self, **kwargs): + # pylint: disable=attribute-defined-outside-init self._tracer = get_tracer( __name__, __version__, kwargs.get("tracer_provider") ) @@ -307,13 +297,17 @@ def trace_future(self, future): ) def callback(f): - exception = f.exception() attr = { "type": "future", + "state": ( + "cancelled" + if f.cancelled() + else determine_state(f.exception()) + ), } - state = determine_state(exception) - attr["state"] = state - self.record_process(start, attr, span, exception) + self.record_process( + start, attr, span, None if f.cancelled() else f.exception() + ) future.add_done_callback(callback) return future
diff --git a/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_future_cancellation.py b/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_future_cancellation.py new file mode 100644 --- /dev/null +++ b/instrumentation/opentelemetry-instrumentation-asyncio/tests/test_asyncio_future_cancellation.py @@ -0,0 +1,60 @@ +import asyncio +from unittest.mock import patch + +from opentelemetry.instrumentation.asyncio import AsyncioInstrumentor +from opentelemetry.instrumentation.asyncio.environment_variables import ( + OTEL_PYTHON_ASYNCIO_FUTURE_TRACE_ENABLED, +) +from opentelemetry.test.test_base import TestBase +from opentelemetry.trace import get_tracer + + +class TestTraceFuture(TestBase): + @patch.dict( + "os.environ", {OTEL_PYTHON_ASYNCIO_FUTURE_TRACE_ENABLED: "true"} + ) + def setUp(self): + super().setUp() + self._tracer = get_tracer( + __name__, + ) + self.instrumentor = AsyncioInstrumentor() + self.instrumentor.instrument() + + def tearDown(self): + super().tearDown() + self.instrumentor.uninstrument() + + def test_trace_future_cancelled(self): + async def future_cancelled(): + with self._tracer.start_as_current_span("root"): + future = asyncio.Future() + future = self.instrumentor.trace_future(future) + future.cancel() + + try: + asyncio.run(future_cancelled()) + except asyncio.CancelledError as exc: + self.assertEqual(isinstance(exc, asyncio.CancelledError), True) + spans = self.memory_exporter.get_finished_spans() + self.assertEqual(len(spans), 2) + self.assertEqual(spans[0].name, "root") + self.assertEqual(spans[1].name, "asyncio future") + + metrics = ( + self.memory_metrics_reader.get_metrics_data() + .resource_metrics[0] + .scope_metrics[0] + .metrics + ) + self.assertEqual(len(metrics), 2) + + self.assertEqual(metrics[0].name, "asyncio.process.duration") + self.assertEqual( + metrics[0].data.data_points[0].attributes["state"], "cancelled" + ) + + self.assertEqual(metrics[1].name, "asyncio.process.created") + self.assertEqual( + metrics[1].data.data_points[0].attributes["state"], "cancelled" + )
Asyncio Instrumentation future cancelledError **Describe your environment** Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main. **Steps to reproduce** Describe exactly how to reproduce the error. Include a code sample if applicable. Using python auto instrumentation version 0.44b0 the real trace source is being deprecated and we see only this: `{"line":44,"module":"runners","timestamp":"2024-04-18 15:02:21.526124+00:00","message":"[asyncio] Exception in callback <function AsyncioInstrumentor.trace_future.<locals>.callback at 0x7fa8d2f47c70>\nhandle: <Handle AsyncioInstrumentor.trace_future.<locals>.callback>","commit":"-","service_name":"******","hostname":"***********","level":"ERROR","exception_type":"CancelledError","traceback":"File \"uvloop/cbhandles.pyx\", line 63, in uvloop.loop.Handle._run\n\nFile \"/otel-auto-instrumentation-python/opentelemetry/instrumentation/asyncio/__init__.py\", line 310, in callback\n 309 def callback(f):\n--> 310 exception = f.exception()\n 311 attr = {\n ..................................................\n f = <Future cancelled>\n ..................................................\n\nCancelledError"}` **What is the expected behavior?** What did you expect to see? SPAN collected with no errors **What is the actual behavior?** What did you see instead? trace source does not run with an error log **Additional context** Add any other context about the problem here. Calling the exception() method when future is in the cancelled state is causing a CancelledError, so I think we should check the cancelled state first and call f.exception() only if it's not cancelled.
2024-04-26T01:36:34
open-telemetry/opentelemetry-python-contrib
2,517
open-telemetry__opentelemetry-python-contrib-2517
[ "2516" ]
6a40ffd90512e3e4636bddb20728f8f680b69f8a
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap.py @@ -14,8 +14,14 @@ import argparse import logging -import subprocess import sys +from subprocess import ( + PIPE, + CalledProcessError, + Popen, + SubprocessError, + check_call, +) import pkg_resources @@ -34,7 +40,7 @@ def wrapper(package=None): if package: return func(package) return func() - except subprocess.SubprocessError as exp: + except SubprocessError as exp: cmd = getattr(exp, "cmd", None) if cmd: msg = f'Error calling system command "{" ".join(cmd)}"' @@ -48,18 +54,21 @@ def wrapper(package=None): @_syscall def _sys_pip_install(package): # explicit upgrade strategy to override potential pip config - subprocess.check_call( - [ - sys.executable, - "-m", - "pip", - "install", - "-U", - "--upgrade-strategy", - "only-if-needed", - package, - ] - ) + try: + check_call( + [ + sys.executable, + "-m", + "pip", + "install", + "-U", + "--upgrade-strategy", + "only-if-needed", + package, + ] + ) + except CalledProcessError as error: + print(error) def _pip_check(): @@ -70,8 +79,8 @@ def _pip_check(): 'opentelemetry-instrumentation-flask 1.0.1 has requirement opentelemetry-sdk<2.0,>=1.0, but you have opentelemetry-sdk 0.5.' To not be too restrictive, we'll only check for relevant packages. """ - with subprocess.Popen( - [sys.executable, "-m", "pip", "check"], stdout=subprocess.PIPE + with Popen( + [sys.executable, "-m", "pip", "check"], stdout=PIPE ) as check_pipe: pip_check = check_pipe.communicate()[0].decode() pip_check_lower = pip_check.lower() diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -24,6 +24,10 @@ "library": "aiohttp ~= 3.0", "instrumentation": "opentelemetry-instrumentation-aiohttp-client==0.46b0.dev", }, + { + "library": "aiohttp ~= 3.0", + "instrumentation": "opentelemetry-instrumentation-aiohttp-server==0.46b0.dev", + }, { "library": "aiopg >= 0.13.0, < 2.0.0", "instrumentation": "opentelemetry-instrumentation-aiopg==0.46b0.dev", @@ -187,6 +191,7 @@ "opentelemetry-instrumentation-dbapi==0.46b0.dev", "opentelemetry-instrumentation-logging==0.46b0.dev", "opentelemetry-instrumentation-sqlite3==0.46b0.dev", + "opentelemetry-instrumentation-threading==0.46b0.dev", "opentelemetry-instrumentation-urllib==0.46b0.dev", "opentelemetry-instrumentation-wsgi==0.46b0.dev", ] diff --git a/scripts/otel_packaging.py b/scripts/otel_packaging.py --- a/scripts/otel_packaging.py +++ b/scripts/otel_packaging.py @@ -12,55 +12,43 @@ # See the License for the specific language governing permissions and # limitations under the License. -from tomli import load -from os import path, listdir -from subprocess import check_output, CalledProcessError -from requests import get +import os +import subprocess +from subprocess import CalledProcessError -scripts_path = path.dirname(path.abspath(__file__)) -root_path = path.dirname(scripts_path) -instrumentations_path = path.join(root_path, "instrumentation") +import tomli + +scripts_path = os.path.dirname(os.path.abspath(__file__)) +root_path = os.path.dirname(scripts_path) +instrumentations_path = os.path.join(root_path, "instrumentation") def get_instrumentation_packages(): - for pkg in sorted(listdir(instrumentations_path)): - pkg_path = path.join(instrumentations_path, pkg) - if not path.isdir(pkg_path): + for pkg in sorted(os.listdir(instrumentations_path)): + pkg_path = os.path.join(instrumentations_path, pkg) + if not os.path.isdir(pkg_path): continue - error = f"Could not get version for package {pkg}" - try: - hatch_version = check_output( + version = subprocess.check_output( "hatch version", shell=True, cwd=pkg_path, - universal_newlines=True + universal_newlines=True, ) - except CalledProcessError as exc: print(f"Could not get hatch version from path {pkg_path}") print(exc.output) + raise exc - try: - response = get(f"https://pypi.org/pypi/{pkg}/json", timeout=10) - - except Exception: - print(error) - continue - - if response.status_code != 200: - print(error) - continue - - pyproject_toml_path = path.join(pkg_path, "pyproject.toml") + pyproject_toml_path = os.path.join(pkg_path, "pyproject.toml") with open(pyproject_toml_path, "rb") as file: - pyproject_toml = load(file) + pyproject_toml = tomli.load(file) instrumentation = { "name": pyproject_toml["project"]["name"], - "version": hatch_version.strip(), + "version": version.strip(), "instruments": pyproject_toml["project"]["optional-dependencies"][ "instruments" ],
Add error handling for uninstallable packages to `opentelemetry-bootstrap -a install` Right now we are doing this error handling [here](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/46d2ce6acea9a1a6cb1a4d4c863077002f5f7d21/scripts/otel_packaging.py#L46). It should be [here](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/46d2ce6acea9a1a6cb1a4d4c863077002f5f7d21/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap.py#L51). The situation we are right now is that if we add a new instrumentation that does not have a third-party package as a dependency (like `threading`), then it won't be added to the [`default_instrumentations`](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/46d2ce6acea9a1a6cb1a4d4c863077002f5f7d21/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py#L184) as it should. This happens because the [script that gets all instrumentations](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/46d2ce6acea9a1a6cb1a4d4c863077002f5f7d21/scripts/otel_packaging.py#L46) won't get this instrumentation because it first checks if it is in Pypi (it is obviously not because we are just adding it to the repo).
2024-05-10T23:34:07
open-telemetry/opentelemetry-python-contrib
2,524
open-telemetry__opentelemetry-python-contrib-2524
[ "2393", "2393" ]
65b4f850a03135bff18a95e62465da881c25f0ec
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py @@ -16,6 +16,15 @@ This library allows tracing HTTP elasticsearch made by the `elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library. +.. warning:: + The elasticsearch package got native OpenTelemetry support since version + `8.13 <https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/release-notes.html#rn-8-13-0>`_. + To avoid duplicated tracing this instrumentation disables itself if it finds an elasticsearch client + that has OpenTelemetry support enabled. + + Please be aware that the two libraries may use a different semantic convention, see + `elasticsearch documentation <https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/opentelemetry.html>`_. + Usage ----- @@ -54,7 +63,7 @@ def response_hook(span: Span, response: dict) for example: -.. code: python +.. code-block: python from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor import elasticsearch @@ -81,6 +90,7 @@ def response_hook(span, response): """ import re +import warnings from logging import getLogger from os import environ from typing import Collection @@ -197,6 +207,16 @@ def _wrap_perform_request( ): # pylint: disable=R0912,R0914 def wrapper(wrapped, _, args, kwargs): + # if wrapped elasticsearch has native OTel instrumentation just call the wrapped function + otel_span = kwargs.get("otel_span") + if otel_span and otel_span.otel_span: + warnings.warn( + "Instrumentation disabled, relying on elasticsearch native OTel support, see " + "https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/elasticsearch/elasticsearch.html", + Warning, + ) + return wrapped(*args, **kwargs) + method = url = None try: method, url, *_ = args @@ -249,6 +269,11 @@ def normalize_kwargs(k, v): v = str(v) elif isinstance(v, elastic_transport.HttpHeaders): v = dict(v) + elif isinstance( + v, elastic_transport.OpenTelemetrySpan + ): + # the transport Span is always a dummy one + v = None return (k, v) hook_kwargs = dict(
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py --- a/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py +++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/tests/test_elasticsearch.py @@ -23,6 +23,7 @@ import elasticsearch.exceptions from elasticsearch import Elasticsearch from elasticsearch_dsl import Search +from pytest import mark import opentelemetry.instrumentation.elasticsearch from opentelemetry import trace @@ -36,7 +37,7 @@ from . import sanitization_queries # pylint: disable=no-name-in-module -major_version = elasticsearch.VERSION[0] +major_version, minor_version = elasticsearch.VERSION[:2] if major_version == 8: from . import helpers_es8 as helpers # pylint: disable=no-name-in-module @@ -70,6 +71,9 @@ def get_elasticsearch_client(*args, **kwargs): @mock.patch(helpers.perform_request_mock_path) [email protected]( + os.environ, {"OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_ENABLED": "false"} +) class TestElasticsearchIntegration(TestBase): search_attributes = { SpanAttributes.DB_SYSTEM: "elasticsearch", @@ -110,7 +114,6 @@ def test_instrumentor(self, request_mock): span = spans_list[0] # Check version and name in span's instrumentation info - # self.assertEqualSpanInstrumentationInfo(span, opentelemetry.instrumentation.elasticsearch) self.assertEqualSpanInstrumentationInfo( span, opentelemetry.instrumentation.elasticsearch ) @@ -475,6 +478,7 @@ def request_hook(span, method, url, kwargs): "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=8" }, + "otel_span": None, } elif major_version == 7: expected_kwargs = { @@ -607,3 +611,30 @@ def test_bulk(self, request_mock): self.assertEqualSpanInstrumentationInfo( span, opentelemetry.instrumentation.elasticsearch ) + + @mark.skipif( + (major_version, minor_version) < (8, 13), + reason="Native OTel since elasticsearch 8.13", + ) + @mock.patch.dict( + os.environ, + {"OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_ENABLED": "true"}, + ) + def test_instrumentation_is_disabled_if_native_support_enabled( + self, request_mock + ): + request_mock.return_value = helpers.mock_response("{}") + + es = get_elasticsearch_client(hosts=["http://localhost:9200"]) + es.index( + index="sw", + id=1, + **normalize_arguments(body={"name": "adam"}, doc_type="_doc"), + ) + + spans_list = self.get_finished_spans() + self.assertEqual(len(spans_list), 1) + span = spans_list[0] + + # Check that name in span's instrumentation info is not from this instrumentation + self.assertEqual(span.instrumentation_info.name, "elasticsearch-api")
Handle elasticsearch client native instrumentation **Describe your environment** Elasticsearch client added native opentelemetry instrumentation in [8.13.0](https://www.elastic.co/guide/en/elasticsearch/client/python-api/master/release-notes.html#rn-8-13-0) and so currently without manual intervention it is possible to have our instrumentation and the client one create spans concurrently. **Steps to reproduce** Add opentelemetry instrumentation using elasticsearch client 8.13.0 **What is the expected behavior?** Only the elasticsearch client creates spans. **What is the actual behavior?** Both client and opentelemetry instrumentation creates their own spans. **Additional context** Java opentelemetry instrumentation disables its instrumentation if a new enough elasticsearch client is found https://github.com/open-telemetry/opentelemetry-java-instrumentation/pull/9337/ Handle elasticsearch client native instrumentation **Describe your environment** Elasticsearch client added native opentelemetry instrumentation in [8.13.0](https://www.elastic.co/guide/en/elasticsearch/client/python-api/master/release-notes.html#rn-8-13-0) and so currently without manual intervention it is possible to have our instrumentation and the client one create spans concurrently. **Steps to reproduce** Add opentelemetry instrumentation using elasticsearch client 8.13.0 **What is the expected behavior?** Only the elasticsearch client creates spans. **What is the actual behavior?** Both client and opentelemetry instrumentation creates their own spans. **Additional context** Java opentelemetry instrumentation disables its instrumentation if a new enough elasticsearch client is found https://github.com/open-telemetry/opentelemetry-java-instrumentation/pull/9337/
Since we are hooking into transport `perform_request` we can check if one of its kwargs is `otel_span` (https://github.com/elastic/elasticsearch-py/blob/main/elasticsearch/_sync/client/_base.py#L271) and if its `enabled` attribute is True (https://github.com/elastic/elasticsearch-py/blob/main/elasticsearch/_otel.py#L46). Before fixing this we need to make tests pass with elasticsearch 7 and 8 Since we are hooking into transport `perform_request` we can check if one of its kwargs is `otel_span` (https://github.com/elastic/elasticsearch-py/blob/main/elasticsearch/_sync/client/_base.py#L271) and if its `enabled` attribute is True (https://github.com/elastic/elasticsearch-py/blob/main/elasticsearch/_otel.py#L46). Before fixing this we need to make tests pass with elasticsearch 7 and 8
2024-05-15T15:03:39
open-telemetry/opentelemetry-python-contrib
2,527
open-telemetry__opentelemetry-python-contrib-2527
[ "2525" ]
7bddbb54195be4004275c83ef2592b362e443dc8
diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py @@ -151,10 +151,10 @@ def __init__(self, producer: Producer, tracer: Tracer): self._tracer = tracer def flush(self, timeout=-1): - self._producer.flush(timeout) + return self._producer.flush(timeout) def poll(self, timeout=-1): - self._producer.poll(timeout) + return self._producer.poll(timeout) def produce( self, topic, value=None, *args, **kwargs
diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/test_instrumentation.py @@ -31,7 +31,7 @@ ) from opentelemetry.test.test_base import TestBase -from .utils import MockConsumer, MockedMessage +from .utils import MockConsumer, MockedMessage, MockedProducer class TestConfluentKafka(TestBase): @@ -246,3 +246,35 @@ def _compare_spans(self, spans, expected_spans): self.assertEqual( expected_attribute_value, span.attributes[attribute_key] ) + + def test_producer_poll(self) -> None: + instrumentation = ConfluentKafkaInstrumentor() + message_queue = [] + + producer = MockedProducer( + message_queue, + { + "bootstrap.servers": "localhost:29092", + }, + ) + + producer = instrumentation.instrument_producer(producer) + producer.produce(topic="topic-1", key="key-1", value="value-1") + msg = producer.poll() + self.assertIsNotNone(msg) + + def test_producer_flush(self) -> None: + instrumentation = ConfluentKafkaInstrumentor() + message_queue = [] + + producer = MockedProducer( + message_queue, + { + "bootstrap.servers": "localhost:29092", + }, + ) + + producer = instrumentation.instrument_producer(producer) + producer.produce(topic="topic-1", key="key-1", value="value-1") + msg = producer.flush() + self.assertIsNotNone(msg) diff --git a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py --- a/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py +++ b/instrumentation/opentelemetry-instrumentation-confluent-kafka/tests/utils.py @@ -1,4 +1,6 @@ -from confluent_kafka import Consumer +from typing import Optional + +from confluent_kafka import Consumer, Producer class MockConsumer(Consumer): @@ -20,11 +22,21 @@ def poll(self, timeout=None): class MockedMessage: - def __init__(self, topic: str, partition: int, offset: int, headers): + def __init__( + self, + topic: str, + partition: int, + offset: int, + headers, + key: Optional[str] = None, + value: Optional[str] = None, + ): self._topic = topic self._partition = partition self._offset = offset self._headers = headers + self._key = key + self._value = value def topic(self): return self._topic @@ -37,3 +49,35 @@ def offset(self): def headers(self): return self._headers + + def key(self): + return self._key + + def value(self): + return self._value + + +class MockedProducer(Producer): + def __init__(self, queue, config): + self._queue = queue + super().__init__(config) + + def produce( + self, *args, **kwargs + ): # pylint: disable=keyword-arg-before-vararg + self._queue.append( + MockedMessage( + topic=kwargs.get("topic"), + partition=0, + offset=0, + headers=[], + key=kwargs.get("key"), + value=kwargs.get("value"), + ) + ) + + def poll(self, *args, **kwargs): + return len(self._queue) + + def flush(self, *args, **kwargs): + return len(self._queue)
Confluent kafka Producer doesn't return expected values from poll() and flush() methods when instrumented by ConfluentKafkaInstrumentor().instrument_producer() **Describe your environment** ``` python 3.9 confluent-kafka==2.3.0 opentelemetry-instrumentation-confluent-kafka==0.45b0 opentelemetry-semantic-conventions==0.45b0 ``` **Steps to reproduce** Spin up a kafka broker: ``` docker run -d --name kafka-server --hostname kafka-server \ -e KAFKA_CFG_NODE_ID=0 \ -e KAFKA_CFG_PROCESS_ROLES=controller,broker \ -e KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093 \ -e KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT \ -e KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-server:9093 \ -e KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER \ bitnami/kafka:latest ``` Run this code: ``` from opentelemetry.instrumentation.confluent_kafka import ConfluentKafkaInstrumentor from confluent_kafka import Producer inst = ConfluentKafkaInstrumentor() p = confluent_kafka.Producer({'bootstrap.servers': 'localhost:9092'}) p = inst.instrument_producer(p) p.produce('my-topic', b'raw_bytes') msg = p.poll(0) print(msg) msg_pending = p.flush5) print(msg_pending) ``` **What is the expected behavior?** Should be printed: ``` 0 1 ``` Producer poll() and flush() methods should return integers. **What is the actual behavior?** ``` None None ``` Producer poll() and flush() methods are returning `None` **Additional context** flush and poll methods doesn't have a return statement: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/460fc335836c395db8472ecf464e7ecd94c08925/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py#L154 https://github.com/open-telemetry/opentelemetry-python-contrib/blob/460fc335836c395db8472ecf464e7ecd94c08925/instrumentation/opentelemetry-instrumentation-confluent-kafka/src/opentelemetry/instrumentation/confluent_kafka/__init__.py#L157
2024-05-16T14:27:38
open-telemetry/opentelemetry-python-contrib
2,535
open-telemetry__opentelemetry-python-contrib-2535
[ "2472" ]
88111d0a8381bdc440c660b7e802dc951607392a
diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py --- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py +++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py @@ -14,7 +14,7 @@ # pylint: disable=import-error -from .processor import BaggageSpanProcessor +from .processor import ALLOW_ALL_BAGGAGE_KEYS, BaggageSpanProcessor from .version import __version__ -__all__ = ["BaggageSpanProcessor", "__version__"] +__all__ = ["ALLOW_ALL_BAGGAGE_KEYS", "BaggageSpanProcessor", "__version__"] diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py --- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py +++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py @@ -12,13 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import Callable, Optional from opentelemetry.baggage import get_all as get_all_baggage from opentelemetry.context import Context from opentelemetry.sdk.trace.export import SpanProcessor from opentelemetry.trace import Span +# A BaggageKeyPredicate is a function that takes a baggage key and returns a boolean +BaggageKeyPredicateT = Callable[[str], bool] + +# A BaggageKeyPredicate that always returns True, allowing all baggage keys to be added to spans +ALLOW_ALL_BAGGAGE_KEYS: BaggageKeyPredicateT = lambda _: True + class BaggageSpanProcessor(SpanProcessor): """ @@ -44,12 +50,13 @@ class BaggageSpanProcessor(SpanProcessor): """ - def __init__(self) -> None: - pass + def __init__(self, baggage_key_predicate: BaggageKeyPredicateT) -> None: + self._baggage_key_predicate = baggage_key_predicate def on_start( self, span: "Span", parent_context: Optional[Context] = None ) -> None: baggage = get_all_baggage(parent_context) for key, value in baggage.items(): - span.set_attribute(key, value) + if self._baggage_key_predicate(key): + span.set_attribute(key, value)
diff --git a/processor/opentelemetry-processor-baggage/tests/test_baggage_processor.py b/processor/opentelemetry-processor-baggage/tests/test_baggage_processor.py --- a/processor/opentelemetry-processor-baggage/tests/test_baggage_processor.py +++ b/processor/opentelemetry-processor-baggage/tests/test_baggage_processor.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re import unittest from opentelemetry.baggage import get_all as get_all_baggage from opentelemetry.baggage import set_baggage from opentelemetry.context import attach, detach -from opentelemetry.processor.baggage import BaggageSpanProcessor +from opentelemetry.processor.baggage import ( + ALLOW_ALL_BAGGAGE_KEYS, + BaggageSpanProcessor, +) from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SpanProcessor from opentelemetry.trace import Span, Tracer @@ -25,13 +29,77 @@ class BaggageSpanProcessorTest(unittest.TestCase): def test_check_the_baggage(self): - self.assertIsInstance(BaggageSpanProcessor(), SpanProcessor) + self.assertIsInstance( + BaggageSpanProcessor(ALLOW_ALL_BAGGAGE_KEYS), SpanProcessor + ) def test_set_baggage_attaches_to_child_spans_and_detaches_properly_with_context( self, ): tracer_provider = TracerProvider() - tracer_provider.add_span_processor(BaggageSpanProcessor()) + tracer_provider.add_span_processor( + BaggageSpanProcessor(ALLOW_ALL_BAGGAGE_KEYS) + ) + + # tracer has no baggage to start + tracer = tracer_provider.get_tracer("my-tracer") + self.assertIsInstance(tracer, Tracer) + self.assertEqual(get_all_baggage(), {}) + # set baggage in context + ctx = set_baggage("queen", "bee") + with tracer.start_as_current_span( + name="bumble", context=ctx + ) as bumble_span: + # span should have baggage key-value pair in context + self.assertEqual(get_all_baggage(ctx), {"queen": "bee"}) + # span should have baggage key-value pair in attribute + self.assertEqual(bumble_span._attributes["queen"], "bee") + with tracer.start_as_current_span( + name="child_span", context=ctx + ) as child_span: + self.assertIsInstance(child_span, Span) + # child span should have baggage key-value pair in context + self.assertEqual(get_all_baggage(ctx), {"queen": "bee"}) + # child span should have baggage key-value pair in attribute + self.assertEqual(child_span._attributes["queen"], "bee") + + def test_baggage_span_processor_with_string_prefix( + self, + ): + tracer_provider = TracerProvider() + tracer_provider.add_span_processor( + BaggageSpanProcessor(self.has_prefix) + ) + + # tracer has no baggage to start + tracer = tracer_provider.get_tracer("my-tracer") + self.assertIsInstance(tracer, Tracer) + self.assertEqual(get_all_baggage(), {}) + # set baggage in context + ctx = set_baggage("queen", "bee") + with tracer.start_as_current_span( + name="bumble", context=ctx + ) as bumble_span: + # span should have baggage key-value pair in context + self.assertEqual(get_all_baggage(ctx), {"queen": "bee"}) + # span should have baggage key-value pair in attribute + self.assertEqual(bumble_span._attributes["queen"], "bee") + with tracer.start_as_current_span( + name="child_span", context=ctx + ) as child_span: + self.assertIsInstance(child_span, Span) + # child span should have baggage key-value pair in context + self.assertEqual(get_all_baggage(ctx), {"queen": "bee"}) + # child span should have baggage key-value pair in attribute + self.assertEqual(child_span._attributes["queen"], "bee") + + def test_baggage_span_processor_with_regex( + self, + ): + tracer_provider = TracerProvider() + tracer_provider.add_span_processor( + BaggageSpanProcessor(self.matches_regex) + ) # tracer has no baggage to start tracer = tracer_provider.get_tracer("my-tracer") @@ -59,7 +127,9 @@ def test_set_baggage_attaches_to_child_spans_and_detaches_properly_with_token( self, ): tracer_provider = TracerProvider() - tracer_provider.add_span_processor(BaggageSpanProcessor()) + tracer_provider.add_span_processor( + BaggageSpanProcessor(ALLOW_ALL_BAGGAGE_KEYS) + ) # tracer has no baggage to start tracer = tracer_provider.get_tracer("my-tracer") @@ -87,3 +157,11 @@ def test_set_baggage_attaches_to_child_spans_and_detaches_properly_with_token( detach(moar_token) detach(honey_token) self.assertEqual(get_all_baggage(), {}) + + @staticmethod + def has_prefix(baggage_key: str) -> bool: + return baggage_key.startswith("que") + + @staticmethod + def matches_regex(baggage_key: str) -> bool: + return re.match(r"que.*", baggage_key) is not None
Baggage span processor - key predicate This issue is to track adding a method of selecting what baggage key entries should be copied. Feedback in the JS contrib PR was to allow a user-provided predicate function. This puts the responsibility on the user to ensure sensitive baggage keys are not copied while also not prescribing how that is determined. - https://github.com/open-telemetry/opentelemetry-js-contrib/issues/2166 We had a similar feedback in the .NET contrib project but thought it was more complicated than just using a set of prefixes so created an issue to continue the discussion. The plain processor that copies all baggage entries (like using `*` in your example) is likely to be accepted first. - https://github.com/open-telemetry/opentelemetry-dotnet-contrib/issues/1695
2024-05-21T12:00:37
open-telemetry/opentelemetry-python-contrib
2,538
open-telemetry__opentelemetry-python-contrib-2538
[ "1742" ]
728976fb10e595a445e0a87be26717d5c069c9a8
diff --git a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py --- a/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/src/opentelemetry/instrumentation/httpx/__init__.py @@ -564,11 +564,13 @@ def _instrument(self, **kwargs): tracer_provider = kwargs.get("tracer_provider") _InstrumentedClient._tracer_provider = tracer_provider _InstrumentedAsyncClient._tracer_provider = tracer_provider - httpx.Client = _InstrumentedClient + # Intentionally using a private attribute here, see: + # https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2538#discussion_r1610603719 + httpx.Client = httpx._api.Client = _InstrumentedClient httpx.AsyncClient = _InstrumentedAsyncClient def _uninstrument(self, **kwargs): - httpx.Client = self._original_client + httpx.Client = httpx._api.Client = self._original_client httpx.AsyncClient = self._original_async_client _InstrumentedClient._tracer_provider = None _InstrumentedClient._request_hook = None
diff --git a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py --- a/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py +++ b/instrumentation/opentelemetry-instrumentation-httpx/tests/test_httpx_integration.py @@ -532,12 +532,36 @@ def test_instrument_client(self): self.assertEqual(result.text, "Hello!") self.assert_span(num_spans=1) + def test_instrumentation_without_client(self): + + HTTPXClientInstrumentor().instrument() + results = [ + httpx.get(self.URL), + httpx.request("GET", self.URL), + ] + with httpx.stream("GET", self.URL) as stream: + stream.read() + results.append(stream) + + spans = self.assert_span(num_spans=len(results)) + for idx, res in enumerate(results): + with self.subTest(idx=idx, res=res): + self.assertEqual(res.text, "Hello!") + self.assertEqual( + spans[idx].attributes[SpanAttributes.HTTP_URL], + self.URL, + ) + + HTTPXClientInstrumentor().uninstrument() + def test_uninstrument(self): HTTPXClientInstrumentor().instrument() HTTPXClientInstrumentor().uninstrument() client = self.create_client() result = self.perform_request(self.URL, client=client) + result_no_client = httpx.get(self.URL) self.assertEqual(result.text, "Hello!") + self.assertEqual(result_no_client.text, "Hello!") self.assert_span(num_spans=0) def test_uninstrument_client(self):
httpx instrumentation doesn't work for `httpx.get`, `httpx.post`, etc. **Describe your environment** Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main. ``` #Python Python 3.8.16 # OTEL opentelemetry-api==1.15.0 ; python_version >= '3.7' opentelemetry-instrumentation==0.36b0 ; python_version >= '3.7' opentelemetry-instrumentation-httpx==0.36b0 opentelemetry-instrumentation-requests==0.36b0 opentelemetry-sdk==1.15.0 ; python_version >= '3.7' # 3rd Party httpx==0.23.3 requests==2.28.2 ; python_version >= '3.6' ``` Azure Function run locally. **Steps to reproduce** Describe exactly how to reproduce the error. Include a code sample if applicable. 1. Install `requests` and `httpx` libs and instrumentations 2. Initialize instrumentors 3. Make a `get/post/etc` request to some api without initializing a client. 4. Make a `get/post/etc` request to some api with initializing a client. ```python import httpx import requests from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor from opentelemetry.instrumentation.requests import RequestsInstrumentor HTTPXClientInstrumentor().instrument() RequestsInstrumentor().instrument() requests.get("www.google.com") httpx.get("www.google.com") with httpx.Client() as client: response = client.get("https://www.google.com") with requests.Session() as session: response = session.get("https://www.google.com") ``` **What is the expected behavior?** What did you expect to see? Whenever I use `httpx.get` (or similar) a new dependency/span should be created and exported - in my case to Azure Application Insights. **What is the actual behavior?** What did you see instead? I don't see expected dependencies/spans/traces exported. <img width="628" alt="Zrzut ekranu 2023-04-3 o 12 53 01" src="https://user-images.githubusercontent.com/82878433/229490157-edb23b78-a14e-4b21-80b8-f82171414449.png"> **Additional context** Full example of the function. ```python import httpx import logging import requests import azure.functions as func from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor from opentelemetry.instrumentation.requests import RequestsInstrumentor from tracing import OpenTelemetryExtension, TracedClass OpenTelemetryExtension.configure(instrumentors=[ HTTPXClientInstrumentor, RequestsInstrumentor ]) def main(req: func.HttpRequest, context: func.Context): with context.span(): return _main(req) class Greeter(TracedClass): trace_private_methods = True trace_exclude_methods = ["_private"] def __init__(self) -> None: super().__init__() def _private(self): pass def greet(self): self._private() response = httpx.get("https://api.namefake.com/") # doesn't work with httpx response = requests.get("https://api.namefake.com/") # works with requests body = response.json() return f"Hello {body['name']}" def _main(req: func.HttpRequest) -> None: logger = logging.getLogger(__name__) logger.info("Hello world!") g = Greeter() greeting = g.greet() logger.info("%s", greeting) with httpx.Client() as client: response = client.get("https://www.google.com") # works correctly with requests.Session() as session: response = session.get("https://www.google.com") # works correctly return func.HttpResponse(response.content, headers={"Content-Type": "text/html"}) ``` It uses [OpenTelemetryExtension POC described here](https://github.com/Azure/azure-sdk-for-python/issues/29672) and `TracedClass` implemented as follows: ```python class MetaTracer(type): def __new__(cls, name, bases, attrs): for key, value in attrs.items(): if any([ type(value) is not FunctionType, key in attrs.get("trace_exclude_methods", []) ]): continue method_type = cls._get_method_type(key) trace_setting = f"trace_{method_type.value}_methods" should_be_traced = attrs.get(trace_setting, getattr(bases[0], trace_setting, False)) if not should_be_traced: continue setattr(value, "__trace_name__", ( f"{value.__module__}::{name}::{value.__name__}" )) attrs[key] = trace()(value) return super().__new__(cls, name, bases, attrs) class _MethodType(str, Enum): PUBLIC = "public" PRIVATE = "private" MAGIC = "magic" @staticmethod def _get_method_type(name: str) -> _MethodType: if name.startswith("__") and name.endswith("__"): return MetaTracer._MethodType.MAGIC if name.startswith("_"): return MetaTracer._MethodType.PRIVATE return MetaTracer._MethodType.PUBLIC class TracedClass(metaclass=MetaTracer): trace_magic_methods = False trace_private_methods = False trace_public_methods = True trace_exclude_methods = [] ``` While I understand why we should use `httpx.Client` over `httpx.get` there are reasons why people are using latter more often than former, but current implementation doesn't allow to trace simple requests. In the OpenCensus httpx extensions there was an opposite issue: httpx.Client was not traced instead - https://github.com/census-instrumentation/opencensus-python/pull/1186. I'm up for working on this issue and would love to hear your comments and tips.
@lzchen @jeremydvoss @macieyng funny that you opened this, I just hit the same thing where no httpx spans showed up. If you turn on debug logging in `opentelemetry` my guess is maybe you have a dependency conflict? I noticed you've got httpx 0.23: ``` [2023-04-06T15:58:29.753796-05:00] | DEBUG | [service=tempus env=localdev version=no-rel-identifier-found trace_id=0 span_id=0] [opentelemetry.instrumentation.auto_instrumentation.sitecustomize] [sitecustomize.py:_load_instrumentors:76] - Skipping instrumentation httpx: DependencyConflict: requested: "httpx<=0.23.0,>=0.18.0" but found: "httpx 0.23.3" ``` This was caused by the changes at https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1460 @phillipuniverse I haven't check that, but seems like you're right about it. Thank you for linking the issues and making the PR! There were similar issues with OpenCensus in the past that, some versions compatibility was too strict.
2024-05-21T17:32:56
open-telemetry/opentelemetry-python-contrib
2,552
open-telemetry__opentelemetry-python-contrib-2552
[ "2551" ]
e6409568c11f5ec1341e85770f2f01dded676d7a
diff --git a/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py b/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py --- a/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py +++ b/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py @@ -166,11 +166,7 @@ def remove_url_credentials(url: str) -> str: parsed = urlparse(url) if all([parsed.scheme, parsed.netloc]): # checks for valid url parsed_url = urlparse(url) - netloc = ( - (":".join(((parsed_url.hostname or ""), str(parsed_url.port)))) - if parsed_url.port - else (parsed_url.hostname or "") - ) + _, _, netloc = parsed.netloc.rpartition("@") return urlunparse( ( parsed_url.scheme,
diff --git a/util/opentelemetry-util-http/tests/test_remove_credentials.py b/util/opentelemetry-util-http/tests/test_remove_credentials.py new file mode 100644 --- /dev/null +++ b/util/opentelemetry-util-http/tests/test_remove_credentials.py @@ -0,0 +1,27 @@ +import unittest + +from opentelemetry.util.http import remove_url_credentials + + +class TestRemoveUrlCredentials(unittest.TestCase): + def test_remove_no_credentials(self): + url = "http://opentelemetry.io:8080/test/path?query=value" + cleaned_url = remove_url_credentials(url) + self.assertEqual(cleaned_url, url) + + def test_remove_credentials(self): + url = "http://someuser:[email protected]:8080/test/path?query=value" + cleaned_url = remove_url_credentials(url) + self.assertEqual( + cleaned_url, "http://opentelemetry.io:8080/test/path?query=value" + ) + + def test_remove_credentials_ipv4_literal(self): + url = "http://someuser:[email protected]:8080/test/path?query=value" + cleaned_url = remove_url_credentials(url) + self.assertEqual(cleaned_url, "http://127.0.0.1:8080/test/path?query=value") + + def test_remove_credentials_ipv6_literal(self): + url = "http://someuser:somepass@[::1]:8080/test/path?query=value" + cleaned_url = remove_url_credentials(url) + self.assertEqual(cleaned_url, "http://[::1]:8080/test/path?query=value")
`remove_url_credentials` drops brackets from IPv6 hostnames **Describe your environment** Python 3.11.9, `opentelemetry-instrumentation` is auto-injected via the OpenTelemetry operator, `opentelemetry_util_http` is `0.44b0` **Steps to reproduce** ```python from opentelemetry.util.http import remove_url_credentials literal_ipv6_url = "https://[::1]/somepath?query=foo" remove_url_credentials(literal_ipv6_url) # 'https://::1/somepath?query=foo' -- should be 'https://[::1]/somepath?query=foo' literal_ipv6_url_with_port = "https://[::1]:12345/somepath?query=foo" remove_url_credentials(literal_ipv6_url_with_port) # 'https://::1:12345/somepath?query=foo -- should be 'https://[::1]:12345/somepath?query=foo' literal_ipv6_url_with_auth = "https://someuser:somepass@[::1]:12345/somepath?query=foo" remove_url_credentials(literal_ipv6_url_with_auth) # 'https://::1:12345/somepath?query=foo' -- should be https://[::1]:12345/somepath?query=foo ``` **What is the expected behavior?** The ipv6 host should remain inside `[]` **What is the actual behavior?** `[]` are stripped from the host **Additional context** https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py#L169 is the causing line. The `hostname` result on `urlparse` does not contain the brackets ```python from urllib.parse import urlparse parsed = urlparse(literal_ipv6_url_with_auth) parsed # ParseResult(scheme='https', netloc='someuser:somepass@[::1]:12345', path='/somepath', params='', query='query=foo', fragment='') parsed.hostname # '::1' ```
2024-05-24T03:30:04
open-telemetry/opentelemetry-python-contrib
2,557
open-telemetry__opentelemetry-python-contrib-2557
[ "2475" ]
bd9156fff84e4d30592d118aa8ee9e3d5d5499f9
diff --git a/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py b/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py --- a/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py @@ -91,7 +91,6 @@ from opentelemetry.instrumentation.system_metrics.package import _instruments from opentelemetry.instrumentation.system_metrics.version import __version__ from opentelemetry.metrics import CallbackOptions, Observation, get_meter -from opentelemetry.sdk.util import get_dict_as_key _logger = logging.getLogger(__name__) @@ -638,8 +637,8 @@ def _get_system_network_connections( net_connection, metric ) - connection_counters_key = get_dict_as_key( - self._system_network_connections_labels + connection_counters_key = tuple( + sorted(self._system_network_connections_labels.items()) ) if connection_counters_key in connection_counters:
opentelemetry-instrumentation-system-metrics depends on the SDK https://github.com/open-telemetry/opentelemetry-python-contrib/blob/2493258af11df7f211567103635588f2813de7a3/instrumentation/opentelemetry-instrumentation-system-metrics/pyproject.toml#L29 **What is the expected behavior?** Instrumentations should never depend directly on the SDK **What is the actual behavior?** There is a hard dep **Additional context** The only usage of the SDK in real code is or this util function https://github.com/open-telemetry/opentelemetry-python-contrib/blob/2493258af11df7f211567103635588f2813de7a3/instrumentation/opentelemetry-instrumentation-system-metrics/src/opentelemetry/instrumentation/system_metrics/__init__.py#L94 This should be inlined or removed and the SDK dep removed
Can work on this one Thanks @emdneto assigned to you and appreciate your help
2024-05-27T14:43:42
open-telemetry/opentelemetry-python-contrib
2,573
open-telemetry__opentelemetry-python-contrib-2573
[ "2457" ]
361da3e45e99cc42e571c6e3f9913d37e51da89d
diff --git a/propagator/opentelemetry-propagator-aws-xray/src/opentelemetry/propagators/aws/aws_xray_propagator.py b/propagator/opentelemetry-propagator-aws-xray/src/opentelemetry/propagators/aws/aws_xray_propagator.py --- a/propagator/opentelemetry-propagator-aws-xray/src/opentelemetry/propagators/aws/aws_xray_propagator.py +++ b/propagator/opentelemetry-propagator-aws-xray/src/opentelemetry/propagators/aws/aws_xray_propagator.py @@ -58,6 +58,7 @@ import logging import typing +from os import environ from opentelemetry import trace from opentelemetry.context import Context @@ -71,6 +72,7 @@ ) TRACE_HEADER_KEY = "X-Amzn-Trace-Id" +AWS_TRACE_HEADER_ENV_KEY = "_X_AMZN_TRACE_ID" KV_PAIR_DELIMITER = ";" KEY_AND_VALUE_DELIMITER = "=" @@ -324,3 +326,33 @@ def fields(self): """Returns a set with the fields set in `inject`.""" return {TRACE_HEADER_KEY} + + +class AwsXrayLambdaPropagator(AwsXRayPropagator): + """Implementation of the AWS X-Ray Trace Header propagation protocol but + with special handling for Lambda's ``_X_AMZN_TRACE_ID` environment + variable. + """ + + def extract( + self, + carrier: CarrierT, + context: typing.Optional[Context] = None, + getter: Getter[CarrierT] = default_getter, + ) -> Context: + + xray_context = super().extract(carrier, context=context, getter=getter) + + if trace.get_current_span(context=context).get_span_context().is_valid: + return xray_context + + trace_header = environ.get(AWS_TRACE_HEADER_ENV_KEY) + + if trace_header is None: + return xray_context + + return super().extract( + {TRACE_HEADER_KEY: trace_header}, + context=xray_context, + getter=getter, + )
diff --git a/propagator/opentelemetry-propagator-aws-xray/tests/test_aws_xray_lambda_propagator.py b/propagator/opentelemetry-propagator-aws-xray/tests/test_aws_xray_lambda_propagator.py new file mode 100644 --- /dev/null +++ b/propagator/opentelemetry-propagator-aws-xray/tests/test_aws_xray_lambda_propagator.py @@ -0,0 +1,164 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import environ +from unittest import TestCase +from unittest.mock import patch + +from requests.structures import CaseInsensitiveDict + +from opentelemetry.context import get_current +from opentelemetry.propagators.aws.aws_xray_propagator import ( + TRACE_HEADER_KEY, + AwsXrayLambdaPropagator, +) +from opentelemetry.propagators.textmap import DefaultGetter +from opentelemetry.sdk.trace import ReadableSpan +from opentelemetry.trace import ( + Link, + NonRecordingSpan, + SpanContext, + TraceState, + get_current_span, + use_span, +) + + +class AwsXRayLambdaPropagatorTest(TestCase): + + def test_extract_no_environment_variable(self): + + actual_context = get_current_span( + AwsXrayLambdaPropagator().extract( + {}, context=get_current(), getter=DefaultGetter() + ) + ).get_span_context() + + self.assertEqual(hex(actual_context.trace_id), "0x0") + self.assertEqual(hex(actual_context.span_id), "0x0") + self.assertFalse( + actual_context.trace_flags.sampled, + ) + self.assertEqual(actual_context.trace_state, TraceState.get_default()) + + def test_extract_no_environment_variable_valid_context(self): + + with use_span(NonRecordingSpan(SpanContext(1, 2, False))): + + actual_context = get_current_span( + AwsXrayLambdaPropagator().extract( + {}, context=get_current(), getter=DefaultGetter() + ) + ).get_span_context() + + self.assertEqual(hex(actual_context.trace_id), "0x1") + self.assertEqual(hex(actual_context.span_id), "0x2") + self.assertFalse( + actual_context.trace_flags.sampled, + ) + self.assertEqual( + actual_context.trace_state, TraceState.get_default() + ) + + @patch.dict( + environ, + { + "_X_AMZN_TRACE_ID": ( + "Root=1-00000001-d188f8fa79d48a391a778fa6;" + "Parent=53995c3f42cd8ad8;Sampled=1;Foo=Bar" + ) + }, + ) + def test_extract_from_environment_variable(self): + + actual_context = get_current_span( + AwsXrayLambdaPropagator().extract( + {}, context=get_current(), getter=DefaultGetter() + ) + ).get_span_context() + + self.assertEqual( + hex(actual_context.trace_id), "0x1d188f8fa79d48a391a778fa6" + ) + self.assertEqual(hex(actual_context.span_id), "0x53995c3f42cd8ad8") + self.assertTrue( + actual_context.trace_flags.sampled, + ) + self.assertEqual(actual_context.trace_state, TraceState.get_default()) + + @patch.dict( + environ, + { + "_X_AMZN_TRACE_ID": ( + "Root=1-00000002-240000000000000000000002;" + "Parent=1600000000000002;Sampled=1;Foo=Bar" + ) + }, + ) + def test_add_link_from_environment_variable(self): + + propagator = AwsXrayLambdaPropagator() + + default_getter = DefaultGetter() + + carrier = CaseInsensitiveDict( + { + TRACE_HEADER_KEY: ( + "Root=1-00000001-240000000000000000000001;" + "Parent=1600000000000001;Sampled=1" + ) + } + ) + + extracted_context = propagator.extract( + carrier, context=get_current(), getter=default_getter + ) + + link_context = propagator.extract( + carrier, context=extracted_context, getter=default_getter + ) + + span = ReadableSpan( + "test", parent=extracted_context, links=[Link(link_context)] + ) + + span_parent_context = get_current_span(span.parent).get_span_context() + + self.assertEqual( + hex(span_parent_context.trace_id), "0x2240000000000000000000002" + ) + self.assertEqual( + hex(span_parent_context.span_id), "0x1600000000000002" + ) + self.assertTrue( + span_parent_context.trace_flags.sampled, + ) + self.assertEqual( + span_parent_context.trace_state, TraceState.get_default() + ) + + span_link_context = get_current_span( + span.links[0].context + ).get_span_context() + + self.assertEqual( + hex(span_link_context.trace_id), "0x1240000000000000000000001" + ) + self.assertEqual(hex(span_link_context.span_id), "0x1600000000000001") + self.assertTrue( + span_link_context.trace_flags.sampled, + ) + self.assertEqual( + span_link_context.trace_state, TraceState.get_default() + )
Add xray propagators that prioritizes xray environment variable Similar to [this](https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1032).
2024-06-03T16:36:06
JoaquinAmatRodrigo/skforecast
702
JoaquinAmatRodrigo__skforecast-702
[ "701" ]
40b1f50d5861afab7fb0bffa3ce7dac171678e79
diff --git a/skforecast/datasets/datasets.py b/skforecast/datasets/datasets.py --- a/skforecast/datasets/datasets.py +++ b/skforecast/datasets/datasets.py @@ -130,7 +130,7 @@ def fetch_dataset( 'date_format': '%Y-%m-%d %H:%M:%S', 'freq': 'H', 'description': ( - 'Hourly measures of several air quimical pollutant (pm2.5, co, no, ' + 'Hourly measures of several air chemical pollutant (pm2.5, co, no, ' 'no2, pm10, nox, o3, so2) at Valencia city.' ), 'source': ( @@ -375,4 +375,4 @@ def load_demo_dataset(version: str = 'latest') -> pd.Series: df = df['y'] df = df.sort_index() - return df \ No newline at end of file + return df diff --git a/skforecast/plot/plot.py b/skforecast/plot/plot.py --- a/skforecast/plot/plot.py +++ b/skforecast/plot/plot.py @@ -56,7 +56,7 @@ def plot_residuals( ) if residuals is None: - residuals = y_pred - y_true + residuals = y_true - y_pred if fig is None: fig = plt.figure(constrained_layout=True, **fig_kw)
Miscalculated residuals in plot_residuals() The source code in `plot_residuals()` calculate the residuals as follows: https://github.com/JoaquinAmatRodrigo/skforecast/blob/310915dc5b07b82acd731f43ab0f6b776ba5c6fd/skforecast/plot/plot.py#L59 But according to [Rob Hyndman's book (FPP3)](https://otexts.com/fpp3/accuracy.html#forecast-errors), the correct calculation should be: $$e_{T+h} = y_{T+h} - \hat{y}_{T+h\|T}$$ Where $y_{T+h}$ are the "*Ground truth (correct) values*" and $\hat{y}_{T+h\|T}$ are the "*Values of predictions*" So, the above code should change to: ``` residuals = y_true - y_pred ```
2024-06-07T15:04:29
JoaquinAmatRodrigo/skforecast
727
JoaquinAmatRodrigo__skforecast-727
[ "722" ]
b33909cf6d6ffbb2d397aefefb848ddb9c52a400
diff --git a/skforecast/ForecasterRnn/ForecasterRnn.py b/skforecast/ForecasterRnn/ForecasterRnn.py --- a/skforecast/ForecasterRnn/ForecasterRnn.py +++ b/skforecast/ForecasterRnn/ForecasterRnn.py @@ -14,6 +14,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd +import keras from sklearn.pipeline import Pipeline from sklearn.base import clone from sklearn.preprocessing import MinMaxScaler @@ -41,14 +42,15 @@ # TODO. Test Grid search class ForecasterRnn(ForecasterBase): """ - This class turns any regressor compatible with the TensorFlow API into a - TensorFlow RNN multi-serie multi-step forecaster. A unique model is created - to forecast all time steps and series. See documentation for more details. + This class turns any regressor compatible with the Keras API into a + Keras RNN multi-serie multi-step forecaster. A unique model is created + to forecast all time steps and series. Keras enables workflows on top of + either JAX, TensorFlow, or PyTorch. See documentation for more details. Parameters ---------- - regressor : regressor or pipeline compatible with the TensorFlow API - An instance of a regressor or pipeline compatible with the TensorFlow API. + regressor : regressor or pipeline compatible with the Keras API + An instance of a regressor or pipeline compatible with the Keras API. levels : str, list Name of one or more time series to be predicted. This determine the series the forecaster will be handling. If `None`, all series used during training @@ -83,8 +85,8 @@ class ForecasterRnn(ForecasterBase): Attributes ---------- - regressor : regressor or pipeline compatible with the TensorFlow API - An instance of a regressor or pipeline compatible with the TensorFlow API. + regressor : regressor or pipeline compatible with the Keras API + An instance of a regressor or pipeline compatible with the Keras API. An instance of this regressor is trained for each step. All of them are stored in `self.regressors_`. levels : str, list @@ -218,7 +220,11 @@ def __init__( layer_init = self.regressor.layers[0] if lags == "auto": - self.lags = np.arange(layer_init.input_shape[0][1]) + 1 + if keras.__version__ < "3.0": + self.lags = np.arange(layer_init.input_shape[0][1]) + 1 + else: + self.lags = np.arange(layer_init.output.shape[1]) + 1 + warnings.warn( "Setting `lags` = 'auto'. `lags` are inferred from the regressor " "architecture. Avoid the warning with lags=lags." @@ -239,7 +245,10 @@ def __init__( layer_end = self.regressor.layers[-1] try: - self.series = layer_end.output_shape[-1] + if keras.__version__ < "3.0": + self.series = layer_end.output_shape[-1] + else: + self.series = layer_end.output.shape[-1] # if does not work, break the and raise an error the input shape should # be shape=(lags, n_series)) except: @@ -248,7 +257,10 @@ def __init__( ) if steps == "auto": - self.steps = np.arange(layer_end.output_shape[1]) + 1 + if keras.__version__ < "3.0": + self.steps = np.arange(layer_end.output_shape[1]) + 1 + else: + self.steps = np.arange(layer_end.output.shape[1]) + 1 warnings.warn( "`steps` default value = 'auto'. `steps` inferred from regressor " "architecture. Avoid the warning with steps=steps." @@ -263,7 +275,10 @@ def __init__( ) self.max_step = np.max(self.steps) - self.outputs = layer_end.output_shape[-1] + if keras.__version__ < "3.0": + self.outputs = layer_end.output_shape[-1] + else: + self.outputs = layer_end.output.shape[-1] if not isinstance(levels, (list, str, type(None))): raise TypeError( diff --git a/skforecast/ForecasterRnn/utils.py b/skforecast/ForecasterRnn/utils.py --- a/skforecast/ForecasterRnn/utils.py +++ b/skforecast/ForecasterRnn/utils.py @@ -8,11 +8,11 @@ from typing import Union, Any, Optional, Tuple, Callable import pandas as pd import re +import os from ..utils import check_optional_dependency try: - import tensorflow as tf - from tensorflow import keras + import keras from keras.models import Model from keras.layers import Dense, Input, Reshape, LSTM, SimpleRNN from keras.optimizers import Adam @@ -34,7 +34,7 @@ def create_and_compile_model( optimizer: object=Adam(learning_rate=0.01), loss: object=MeanSquaredError(), compile_kwars: dict={}, -) -> tf.keras.models.Model: +) -> keras.models.Model: """ Creates a neural network model for time series prediction with flexible recurrent layers. @@ -64,14 +64,31 @@ def create_and_compile_model( Optimization algorithm and learning rate. loss : object, default `MeanSquaredError()` Loss function for model training. + compile_kwargs : dict, default `{}` + Additional arguments for model compilation. Returns ------- - model : tf.keras.models.Model + model : keras.models.Model Compiled neural network model. """ - + + if keras.__version__ > "3": + print(f"keras version: {keras.__version__}") + print(f"Using backend: {keras.backend.backend()}") + if keras.backend.backend() == "tensorflow": + import tensorflow + print(f"tensorflow version: {tensorflow.__version__}") + elif keras.backend.backend() == "torch": + import torch + print(f"torch version: {torch.__version__}") + elif keras.backend.backend() == "jax": + import jax + print(f"jax version: {jax.__version__}") + else: + print("Backend not recognized") + err_msg = f"`series` must be a pandas DataFrame. Got {type(series)}." if not isinstance(series, pd.DataFrame): @@ -158,7 +175,7 @@ def create_and_compile_model( # Output layer x = Dense(levels * steps, activation="linear")(x) # model = Model(inputs=input_layer, outputs=x) - output_layer = tf.keras.layers.Reshape((steps, levels))(x) + output_layer = keras.layers.Reshape((steps, levels))(x) model = Model(inputs=input_layer, outputs=output_layer) # Compile the model if optimizer, loss or compile_kwargs are passed diff --git a/skforecast/utils/utils.py b/skforecast/utils/utils.py --- a/skforecast/utils/utils.py +++ b/skforecast/utils/utils.py @@ -34,7 +34,7 @@ ], 'deeplearning': [ 'matplotlib>=3.3, <3.9', - 'tensorflow>=2.13, <2.16', + 'keras>=2.6, <4.0', ], 'plotting': [ 'matplotlib>=3.3, <3.9',
diff --git a/skforecast/ForecasterRnn/tests/test_create_lags.py b/skforecast/ForecasterRnn/tests/test_create_lags.py --- a/skforecast/ForecasterRnn/tests/test_create_lags.py +++ b/skforecast/ForecasterRnn/tests/test_create_lags.py @@ -6,15 +6,15 @@ import pandas as pd from skforecast.ForecasterRnn import ForecasterRnn from skforecast.ForecasterRnn.utils import create_and_compile_model -import tensorflow as tf +import keras lags = 6 steps = 3 levels = "l1" activation = "relu" -optimizer = tf.keras.optimizers.Adam(learning_rate=0.01) -loss = tf.keras.losses.MeanSquaredError() +optimizer = keras.optimizers.Adam(learning_rate=0.01) +loss = keras.losses.MeanSquaredError() recurrent_units = 100 dense_units = [128, 64] diff --git a/skforecast/ForecasterRnn/tests/test_create_train_X_y.py b/skforecast/ForecasterRnn/tests/test_create_train_X_y.py --- a/skforecast/ForecasterRnn/tests/test_create_train_X_y.py +++ b/skforecast/ForecasterRnn/tests/test_create_train_X_y.py @@ -6,7 +6,7 @@ import pandas as pd from skforecast.ForecasterRnn import ForecasterRnn from skforecast.ForecasterRnn.utils import create_and_compile_model -import tensorflow as tf +import keras from sklearn.compose import ColumnTransformer from sklearn.preprocessing import StandardScaler @@ -18,8 +18,8 @@ steps = 1 levels = "1" activation = "relu" -optimizer = tf.keras.optimizers.Adam(learning_rate=0.01) -loss = tf.keras.losses.MeanSquaredError() +optimizer = keras.optimizers.Adam(learning_rate=0.01) +loss = keras.losses.MeanSquaredError() recurrent_units = 100 dense_units = [128, 64] diff --git a/skforecast/ForecasterRnn/tests/test_fit.py b/skforecast/ForecasterRnn/tests/test_fit.py --- a/skforecast/ForecasterRnn/tests/test_fit.py +++ b/skforecast/ForecasterRnn/tests/test_fit.py @@ -4,7 +4,7 @@ import numpy as np import pytest from skforecast.ForecasterRnn import ForecasterRnn -import tensorflow as tf +import keras from skforecast.ForecasterRnn.utils import create_and_compile_model @@ -13,8 +13,8 @@ steps = 1 levels = "1" activation = "relu" -optimizer = tf.keras.optimizers.Adam(learning_rate=0.01) -loss = tf.keras.losses.MeanSquaredError() +optimizer = keras.optimizers.Adam(learning_rate=0.01) +loss = keras.losses.MeanSquaredError() recurrent_units = 100 dense_units = [128, 64] diff --git a/skforecast/ForecasterRnn/tests/test_init.py b/skforecast/ForecasterRnn/tests/test_init.py --- a/skforecast/ForecasterRnn/tests/test_init.py +++ b/skforecast/ForecasterRnn/tests/test_init.py @@ -2,8 +2,7 @@ from skforecast.ForecasterRnn.utils import create_and_compile_model import pandas as pd import numpy as np -import tensorflow as tf -from tensorflow import keras +import keras from keras.losses import MeanSquaredError from keras.optimizers import Adam import pytest diff --git a/skforecast/ForecasterRnn/tests/test_plot_history.py b/skforecast/ForecasterRnn/tests/test_plot_history.py --- a/skforecast/ForecasterRnn/tests/test_plot_history.py +++ b/skforecast/ForecasterRnn/tests/test_plot_history.py @@ -4,7 +4,7 @@ import numpy as np import pytest from skforecast.ForecasterRnn import ForecasterRnn -import tensorflow as tf +import keras from skforecast.ForecasterRnn.utils import create_and_compile_model import matplotlib.pyplot as plt @@ -19,8 +19,8 @@ steps = 4 levels = ["1", "2"] activation = "relu" -optimizer = tf.keras.optimizers.Adam(learning_rate=0.01) -loss = tf.keras.losses.MeanSquaredError() +optimizer = keras.optimizers.Adam(learning_rate=0.01) +loss = keras.losses.MeanSquaredError() recurrent_units = 100 dense_units = [128, 64] diff --git a/skforecast/ForecasterRnn/tests/test_predict.py b/skforecast/ForecasterRnn/tests/test_predict.py --- a/skforecast/ForecasterRnn/tests/test_predict.py +++ b/skforecast/ForecasterRnn/tests/test_predict.py @@ -4,8 +4,8 @@ import numpy as np import pytest from skforecast.ForecasterRnn import ForecasterRnn -import tensorflow as tf from skforecast.ForecasterRnn.utils import create_and_compile_model +import keras series = pd.DataFrame( { @@ -18,8 +18,8 @@ steps = 4 levels = ["1", "2"] activation = "relu" -optimizer = tf.keras.optimizers.Adam(learning_rate=0.01) -loss = tf.keras.losses.MeanSquaredError() +optimizer = keras.optimizers.Adam(learning_rate=0.01) +loss = keras.losses.MeanSquaredError() recurrent_units = 100 dense_units = [128, 64] diff --git a/skforecast/ForecasterRnn/tests/test_utils.py b/skforecast/ForecasterRnn/tests/test_utils.py --- a/skforecast/ForecasterRnn/tests/test_utils.py +++ b/skforecast/ForecasterRnn/tests/test_utils.py @@ -1,7 +1,7 @@ import pytest import pandas as pd import numpy as np -import tensorflow as tf +import keras from skforecast.ForecasterRnn.utils import create_and_compile_model # test with several parameters for dense_units and recurrent_units @@ -34,8 +34,8 @@ def test_units(dense_units, recurrent_layer, recurrent_units): steps = 5 levels = 1 activation = "relu" - optimizer = tf.keras.optimizers.Adam(learning_rate=0.01) - loss = tf.keras.losses.MeanSquaredError() + optimizer = keras.optimizers.Adam(learning_rate=0.01) + loss = keras.losses.MeanSquaredError() # Call the function to create and compile the model model = create_and_compile_model( @@ -56,7 +56,7 @@ def test_units(dense_units, recurrent_layer, recurrent_units): recurrent_units = [recurrent_units] # Assert that the model is an instance of tf.keras.models.Model - assert isinstance(model, tf.keras.models.Model) + assert isinstance(model, keras.models.Model) # Assert that the model has the correct number of layers if dense_units is None: @@ -79,7 +79,7 @@ def test_units(dense_units, recurrent_layer, recurrent_units): def test_correct_input_type(): # Test if the function works with the correct input type model = create_and_compile_model(series_data, lags_data, steps_data, levels_data) - assert isinstance(model, tf.keras.models.Model) + assert isinstance(model, keras.models.Model) def test_incorrect_series_type():
Error al ejecutar código de ejemplo de Deep Learning para pronóstico de series temporales en skforecast Estimados desarrolladores de skforecast, Me dirijo a ustedes con la esperanza de obtener su ayuda para resolver un error que he encontrado al intentar ejecutar el código de ejemplo de "Deep Learning para la predicción de series temporales: Redes Neuronales Recurrentes (RNN) y Long Short-Term Memory (LSTM)" disponible en la siguiente URL: https://cienciadedatos.net/documentos/py54-forecasting-con-deep-learning He seguido cuidadosamente los pasos descritos en el ejemplo, utilizando la versión más reciente de la biblioteca skforecast (0.5.4) y TensorFlow (2.16.1). Sin embargo, al ejecutar el código en mi entorno de trabajo (Laptop con procesador AMD, Windows 11), obtengo el siguiente error: ``` --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) Cell In[11], line 3 1 # Creación del forecaster 2 # ============================================================================== ----> 3 forecaster = ForecasterRnn( 4 regressor=model, 5 levels=levels, 6 transformer_series=MinMaxScaler(), 7 fit_kwargs={ 8 "epochs": 10, # Número de épocas para entrenar el modelo. 9 "batch_size": 32, # Tamaño del batch para entrenar el modelo. 10 "callbacks": [ 11 EarlyStopping(monitor="val_loss", patience=5) 12 ], # Callback para detener el entrenamiento cuando ya no esté aprendiendo más. 13 "series_val": data_val, # Datos de validación para el entrenamiento del modelo. 14 }, 15 ) 16 forecaster File c:\Users\luisc\AppData\Local\Programs\Python\Python311\Lib\site-packages\skforecast\ForecasterRnn\ForecasterRnn.py:226, in ForecasterRnn.__init__(self, regressor, levels, lags, steps, transformer_series, weight_func, fit_kwargs, forecaster_id, n_jobs, transformer_exog) 223 layer_init = self.regressor.layers[0] 225 if lags == "auto": --> 226 self.lags = np.arange(layer_init.input_shape[0][1]) + 1 227 warnings.warn( 228 "Setting `lags` = 'auto'. `lags` are inferred from the regressor " 229 "architecture. Avoid the warning with lags=lags." 230 ) 231 elif isinstance(lags, int): AttributeError: 'InputLayer' object has no attribute 'input_shape' ``` Mi versión actual de skforecast es la : 0.12.1, y la de tensorflow es la: 2.16.1, intenté bajar las versiones a las 0.12.0 y 2.15.1 respectivamente, y aún así recibo el mismo error. Me gustaría saber si me pueden ayudar a solucionar el error y poder correr su código, de antemano muchas gracias.
Hola @Chuello Creo que el link no lo es el correcto. @FernandoCarazoMelo > Hola @Chuello Creo que el link no lo es el correcto. > > @FernandoCarazoMelo Buenas tardes Fernando, si tienes razón, ya edité en link, gracias Hola @Chuello , No me da ningún error al ejecutarlo con tensorflow 2.15. Qué versión de keras estás utilizando? Puedes imprimir el session info? > Hola @Chuello , > > No me da ningún error al ejecutarlo con tensorflow 2.15. Qué versión de keras estás utilizando? Puedes imprimir el session info? Buenas tardes Fernando, si por acá te dejo la salida de session_info: ----- keras 3.3.3 matplotlib 3.9.0 numpy 1.26.4 pandas 2.2.2 plotly 5.22.0 session_info 1.0.0 skforecast 0.12.0 sklearn 1.4.2 tensorflow 2.16.1 ----- IPython 8.25.0 jupyter_client 8.6.2 jupyter_core 5.7.2 ----- Python 3.11.6 (tags/v3.11.6:8b6ee5b, Oct 2 2023, 14:57:12) [MSC v.1935 64 bit (AMD64)] Windows-10-10.0.22631-SP0 ----- Session information updated at 2024-06-19 16:55 skforecast version: 0.12.0 tensorflow version: 2.16.1
2024-06-19T23:44:59
ccnmtl/django-pagetree
49
ccnmtl__django-pagetree-49
[ "48" ]
abf98b58c2bdddf052f2e38c4a0d44a718c1dfde
diff --git a/pagetree/models.py b/pagetree/models.py --- a/pagetree/models.py +++ b/pagetree/models.py @@ -1,3 +1,4 @@ +import random from django.contrib.auth.models import User from django.core.cache import cache from django.db import models @@ -382,10 +383,22 @@ def available_pageblocks(self): return self.hierarchy.available_pageblocks() def add_pageblock_form(self): - class EditForm(forms.Form): - label = forms.CharField() - css_extra = forms.CharField(label="extra CSS classes") - return EditForm() + # This unique id should instead be derived from the block type's + # ID, but that requires a new templatetag. This works for now. + unique_id = random.randint(0, 10000) + + class AddPageBlockForm(forms.Form): + label = forms.CharField( + widget=forms.TextInput( + attrs={'id': 'id_label_%d' % unique_id} + )) + css_extra = forms.CharField( + label='extra CSS classes', + widget=forms.TextInput( + attrs={'id': 'id_css_extra_%d' % unique_id} + )) + + return AddPageBlockForm() def get_first_leaf(self): if self.is_leaf():
Multiple elements with the same ID are rendering on the "Add Pageblock" form In the "Add Pageblock" form, the modal forms render inputs with the `id` set to `id_label` and `id_css_extra`. Because there's multiple pageblock adding modals in the dom, that causes multiple elements with the same `id` to be rendered, which goes against the assumptions for an html ID. This causes input selection to behave strangely: I'm unable to select the inputs with a left-click in Firefox, I need to work around it by right-clicking. The solution is just making these id's unique so they don't conflict with each other
2015-03-04T18:49:47
ccnmtl/django-pagetree
89
ccnmtl__django-pagetree-89
[ "66" ]
2e177e2ca721258227da7a471b9458037094e33f
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ setup( name="django-pagetree", - version="1.1.4", + version="1.1.5", author="Anders Pearson", author_email="[email protected]", url="https://github.com/ccnmtl/django-pagetree",
Custom pageblocks in Hierarchy menu Jess has a feature request for pagetree: WORTH has a giant hierarchy menu: https://worth2.ccnmtl.columbia.edu/pages/edit/ and it would be nice to see which sections have which pageblocks on them. She says it shouldn't list text blocks, html blocks, quizzes etc., since many of those will be present on every page.
2015-05-15T14:30:53
ccnmtl/django-pagetree
156
ccnmtl__django-pagetree-156
[ "1" ]
1bed764ad04ae0cde28434c357d08fddd32aef2c
diff --git a/pagetree/models.py b/pagetree/models.py --- a/pagetree/models.py +++ b/pagetree/models.py @@ -360,11 +360,13 @@ class EditSectionForm(forms.Form): slug = forms.CharField(initial=self.slug) show_toc = forms.BooleanField( initial=self.show_toc, + required=False, label="Show Table of Contents", help_text=("list table of contents of " "immediate child sections (if applicable)")) deep_toc = forms.BooleanField( initial=self.deep_toc, + required=False, label="Show Deep Table of Contents", help_text=( "include children of children (etc) in TOC. " diff --git a/pagetree/views.py b/pagetree/views.py --- a/pagetree/views.py +++ b/pagetree/views.py @@ -102,8 +102,13 @@ def edit_section(request, section_id, success_url=None): section.save_version(request.user, activity="edit section") section.label = request.POST.get('label', '') section.slug = slugify(request.POST.get('slug', section.label))[:50] - section.show_toc = request.POST.get('show_toc', False) - section.deep_toc = request.POST.get('deep_toc', False) + + value = request.POST.get('show_toc', False) + section.show_toc = value in ('True', True, 'on') + + value = request.POST.get('deep_toc', False) + section.deep_toc = value in ('True', True, 'on') + section.save() section.enforce_slug() section.save()
admin templates requires django-smartif but doesn't list it in setup.py should make it an explicit requirement so it's found at install time rather than as a 500 error, or just remove it.
Pagetree doesn't seem to have admin templates anymore, so I think this is no longer an issue...
2017-03-17T18:09:06
getredash/redash
73
getredash__redash-73
[ "2" ]
5eddddb7b5ae012ea92822aceaf1c6d2ac8c0806
diff --git a/rd_service/data/models.py b/rd_service/data/models.py --- a/rd_service/data/models.py +++ b/rd_service/data/models.py @@ -51,7 +51,8 @@ class Meta: app_label = 'redash' db_table = 'queries' - def to_dict(self, with_result=True, with_stats=False): + def to_dict(self, with_result=True, with_stats=False, + with_visualizations=False): d = { 'id': self.id, 'latest_query_data_id': self.latest_query_data_id, @@ -75,6 +76,10 @@ def to_dict(self, with_result=True, with_stats=False): if with_result and self.latest_query_data_id: d['latest_query_data'] = self.latest_query_data.to_dict() + if with_visualizations: + d['visualizations'] = [vis.to_dict(with_query=False) + for vis in self.visualizations.all()] + return d @classmethod @@ -148,10 +153,41 @@ def __unicode__(self): return u"%s=%s" % (self.id, self.name) +class Visualization(models.Model): + id = models.AutoField(primary_key=True) + type = models.CharField(max_length=100) + query = models.ForeignKey(Query, related_name='visualizations') + name = models.CharField(max_length=255) + description = models.CharField(max_length=4096) + options = models.TextField() + + class Meta: + app_label = 'redash' + db_table = 'visualizations' + + def to_dict(self, with_query=True): + d = { + 'id': self.id, + 'type': self.type, + 'name': self.name, + 'description': self.description, + 'options': json.loads(self.options), + } + + if with_query: + d['query'] = self.query.to_dict() + + return d + + def __unicode__(self): + return u"%s=>%s" % (self.id, self.query_id) + + class Widget(models.Model): id = models.AutoField(primary_key=True) - query = models.ForeignKey(Query) type = models.CharField(max_length=100) + query = models.ForeignKey(Query, related_name='widgets') + visualization = models.ForeignKey(Visualization, related_name='widgets') width = models.IntegerField() options = models.TextField() dashboard = models.ForeignKey(Dashboard, related_name='widgets') @@ -163,10 +199,10 @@ class Meta: def to_dict(self): return { 'id': self.id, - 'query': self.query.to_dict(), 'type': self.type, 'width': self.width, 'options': json.loads(self.options), + 'visualization': self.visualization.to_dict(), 'dashboard_id': self.dashboard_id } diff --git a/rd_service/migrate.py b/rd_service/migrate.py new file mode 100644 --- /dev/null +++ b/rd_service/migrate.py @@ -0,0 +1,50 @@ +import json +import settings +from data.models import * + +# first run: + +# CREATE TABLE "visualizations" ( +# "id" serial NOT NULL PRIMARY KEY, +# "type" varchar(100) NOT NULL, +# "query_id" integer NOT NULL REFERENCES "queries" ("id") DEFERRABLE INITIALLY DEFERRED, +# "name" varchar(255) NOT NULL, +# "description" varchar(4096), +# "options" text NOT NULL +# ) +# ; + +# ALTER TABLE widgets ADD COLUMN "visualization_id" integer REFERENCES "visualizations" ("id") DEFERRABLE INITIALLY DEFERRED; + +if __name__ == '__main__': + print 'migrating Widgets -> Visualizations ...' + + for query in Query.objects.filter(name__icontains="cohort"): + vis = Visualization(query=query, name=query.name, + description=query.description, + type="COHORT", options="{}") + vis.save() + + + for widget in Widget.objects.all(): + print 'processing widget %d' % widget.id + query = widget.query + vis_type = widget.type.upper() + + vis = query.visualizations.filter(type=vis_type) + if vis: + print 'found' + widget.visualization = vis[0] + widget.save() + + else: + options = json.loads(widget.options) + vis_options = {"series": options} if options else {} + vis_options = json.dumps(vis_options) + + vis = Visualization(query=query, name=query.name, + description=query.description, + type=vis_type, options=vis_options) + vis.save() + widget.visualization = vis + widget.save() \ No newline at end of file diff --git a/rd_service/server.py b/rd_service/server.py --- a/rd_service/server.py +++ b/rd_service/server.py @@ -167,7 +167,7 @@ def delete(self, widget_id): class DashboardHandler(BaseAuthenticatedHandler): def get(self, dashboard_slug=None): if dashboard_slug: - dashboard = data.models.Dashboard.objects.prefetch_related('widgets__query__latest_query_data').get(slug=dashboard_slug) + dashboard = data.models.Dashboard.objects.prefetch_related('widgets__visualization__query__latest_query_data').get(slug=dashboard_slug) self.write_json(dashboard.to_dict(with_widgets=True)) else: dashboards = [d.to_dict() for d in @@ -204,6 +204,7 @@ def post(self, id=None): query_def['created_at'] = dateutil.parser.parse(query_def['created_at']) query_def.pop('latest_query_data', None) + query_def.pop('visualizations', None) if id: query = data.models.Query(**query_def) @@ -221,7 +222,7 @@ def get(self, id=None): if id: q = data.models.Query.objects.get(pk=id) if q: - self.write_json(q.to_dict()) + self.write_json(q.to_dict(with_visualizations=True)) else: self.send_error(404) else: @@ -251,6 +252,30 @@ def post(self, _): self.write({'job': job.to_dict()}) +class VisualizationHandler(BaseAuthenticatedHandler): + def get(self, id): + pass + + def post(self, id=None): + kwargs = json.loads(self.request.body) + kwargs['options'] = json.dumps(kwargs['options']) + + if id: + vis = data.models.Visualization(**kwargs) + fields = kwargs.keys() + fields.remove('id') + vis.save(update_fields=fields) + else: + vis = data.models.Visualization(**kwargs) + vis.save() + + self.write_json(vis.to_dict(with_query=False)) + + def delete(self, id): + vis = data.models.Visualization.objects.get(pk=id) + vis.delete() + + class CsvQueryResultsHandler(BaseAuthenticatedHandler): def get_current_user(self): user = super(CsvQueryResultsHandler, self).get_current_user() @@ -312,6 +337,7 @@ def get_application(static_path, is_debug, redis_connection, data_manager): (r"/api/queries(?:/([0-9]*))?", QueriesHandler), (r"/api/query_results(?:/([0-9]*))?", QueryResultsHandler), (r"/api/jobs/(.*)", JobsHandler), + (r"/api/visualizations(?:/([0-9]*))?", VisualizationHandler), (r"/api/widgets(?:/([0-9]*))?", WidgetsHandler), (r"/api/dashboards(?:/(.*))?", DashboardHandler), (r"/admin/(.*)", MainHandler),
Visualizations workflow & object Visualizations (widget?) should have an object of their own containing the following data: - query - type (chart, cohort, grid, ...) - options Tasks: - [x] Visualization object - [x] UI to create new visualizations instead of the hardcoded option we have today - [x] Change the dashboard widgets to use visualizations rather than queries - [ ] Friendlier selector when adding new widgets to dashboard
There was a case (EvMe's query 607), where the data was unsorted and had too many series, which resulted in HighCharts dying. When we revisit visualizations we should take into account limits for different visualizers, and prevent the user from killing his browser. cc: @shayel. http://app.raw.densitydesign.org/#/ @amirnissim this requires some changes on the backend too, but let's try to move forward without them and on Sunday we will discuss the needed changes. Below is a "brain dump" about this feature, ask any questions you feel necessary: Basically the idea is to have different types of visualizations, and the ability to create a new visualization from any dataset (query). Eventually, this will replace the "Chart" and "Cohort" static tabs. Each visualization will define: 1. Name 2. Description 3. Properties (mandatory & optional + default values) 4. Rendering logic When creating new visualization it will have: 1. query_id - reference to queries table 2. visualization_type - string 3. options - JSON 4. ? Until we add the visualization object, let's start by creating the "infrastructure" for this in the frontend code. In terms of UI, I think we will represent each visualization as a tab where we currently have the "Chart" and "Cohort" tabs and also have a "+" tab, where you define a new visualization.
2014-02-04T14:42:35
getredash/redash
126
getredash__redash-126
[ "121", "121", "121" ]
6ee4e6cd8e064e701928937cfa2822b8a2851b28
diff --git a/redash/controllers.py b/redash/controllers.py --- a/redash/controllers.py +++ b/redash/controllers.py @@ -32,10 +32,11 @@ def ping(): @app.route('/admin/<anything>') @app.route('/dashboard/<anything>') @app.route('/queries') [email protected]('/queries/<anything>') [email protected]('/queries/<query_id>') [email protected]('/queries/<query_id>/<anything>') @app.route('/') @auth.required -def index(anything=None): +def index(**kwargs): email_md5 = hashlib.md5(current_user.email.lower()).hexdigest() gravatar_url = "https://www.gravatar.com/avatar/%s?s=40" % email_md5 @@ -243,7 +244,7 @@ def post(self, query_id): query_def['latest_query_data'] = query_def.pop('latest_query_data_id') models.Query.update_instance(query_id, **query_def) - + query = models.Query.get_by_id(query_id) return query.to_dict(with_result=False, with_visualizations=True)
Redesign queryfiddle page Attached are 2 modes of the queryfiddle page: dataset and query. Thanks @yosefw ![dataset - table](https://f.cloud.github.com/assets/1098126/2319734/e1700534-a382-11e3-95e2-8947d0d391f0.png) ![dataset - edit query](https://f.cloud.github.com/assets/1098126/2319735/e38cbdbc-a382-11e3-848b-053d0a7097f2.png) Redesign queryfiddle page Attached are 2 modes of the queryfiddle page: dataset and query. Thanks @yosefw ![dataset - table](https://f.cloud.github.com/assets/1098126/2319734/e1700534-a382-11e3-95e2-8947d0d391f0.png) ![dataset - edit query](https://f.cloud.github.com/assets/1098126/2319735/e38cbdbc-a382-11e3-848b-053d0a7097f2.png) Redesign queryfiddle page Attached are 2 modes of the queryfiddle page: dataset and query. Thanks @yosefw ![dataset - table](https://f.cloud.github.com/assets/1098126/2319734/e1700534-a382-11e3-95e2-8947d0d391f0.png) ![dataset - edit query](https://f.cloud.github.com/assets/1098126/2319735/e38cbdbc-a382-11e3-848b-053d0a7097f2.png)
2014-03-06T08:30:18
getredash/redash
172
getredash__redash-172
[ "106" ]
185b1c9df07bd49906af7dd9ac970491592ebeef
diff --git a/bin/latest_release.py b/bin/latest_release.py new file mode 100755 --- /dev/null +++ b/bin/latest_release.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +import sys +import requests + +if __name__ == '__main__': + response = requests.get('https://api.github.com/repos/EverythingMe/redash/releases') + + if response.status_code != 200: + exit("Failed getting releases (status code: %s)." % response.status_code) + + sorted_releases = sorted(response.json(), key=lambda release: release['id'], reverse=True) + + latest_release = sorted_releases[0] + asset_url = latest_release['assets'][0]['url'] + + if '--url-only' in sys.argv: + print asset_url + else: + print "Latest release: %s" % latest_release['tag_name'] + print latest_release['body'] + + print "\nTarball URL: %s" % asset_url + print 'wget: wget --header="Accept: application/octet-stream" %s' % asset_url + + diff --git a/bin/upload_version.py b/bin/upload_version.py --- a/bin/upload_version.py +++ b/bin/upload_version.py @@ -3,30 +3,44 @@ import sys import json import requests +import subprocess + + +def capture_output(command): + proc = subprocess.Popen(command, stdout=subprocess.PIPE) + return proc.stdout.read() + if __name__ == '__main__': - version = sys.argv[1] - filepath = sys.argv[2] - filename = filepath.split('/')[-1] - github_token = os.environ['GITHUB_TOKEN'] - auth = (github_token, 'x-oauth-basic') - commit_sha = os.environ['CIRCLE_SHA1'] - - params = json.dumps({ - 'tag_name': 'v{0}'.format(version), - 'name': 're:dash v{0}'.format(version), - 'target_commitish': commit_sha, - 'prerelease': True - }) - - response = requests.post('https://api.github.com/repos/everythingme/redash/releases', - data=params, - auth=auth) - - upload_url = response.json()['upload_url'] - upload_url = upload_url.replace('{?name}', '') - - with open(filepath) as file_content: - headers = {'Content-Type': 'application/gzip'} - response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth, headers=headers, verify=False) + version = sys.argv[1] + filepath = sys.argv[2] + filename = filepath.split('/')[-1] + github_token = os.environ['GITHUB_TOKEN'] + auth = (github_token, 'x-oauth-basic') + commit_sha = os.environ['CIRCLE_SHA1'] + + commit_body = capture_output(["git", "log", "--format=%b", "-n", "1", commit_sha]) + file_md5_checksum = capture_output(["md5sum", filename]).split()[0] + file_sha256_checksum = capture_output(["sha256sum", filename]).split()[0] + version_body = "%s\n\nMD5: %s\nSHA256: %s" % (commit_body, file_md5_checksum, file_sha256_checksum) + + params = json.dumps({ + 'tag_name': 'v{0}'.format(version), + 'name': 're:dash v{0}'.format(version), + 'body': version_body, + 'target_commitish': commit_sha, + 'prerelease': True + }) + + response = requests.post('https://api.github.com/repos/everythingme/redash/releases', + data=params, + auth=auth) + + upload_url = response.json()['upload_url'] + upload_url = upload_url.replace('{?name}', '') + + with open(filepath) as file_content: + headers = {'Content-Type': 'application/gzip'} + response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth, + headers=headers, verify=False) diff --git a/redash/data/worker.py b/redash/data/worker.py --- a/redash/data/worker.py +++ b/redash/data/worker.py @@ -157,9 +157,13 @@ def cancel(self): return if self.status == self.PROCESSING: - os.kill(self.process_id, signal.SIGINT) - else: - self.done(None, "Interrupted/Cancelled while running.") + try: + os.kill(self.process_id, signal.SIGINT) + except OSError as e: + logging.warning("[%s] Tried to cancel job but os.kill failed (pid=%d, error=%s)", + self.id, self.process_id, e) + + self.done(None, "Interrupted/Cancelled while running.") def save(self, pipe=None): if not pipe: @@ -173,6 +177,9 @@ def save(self, pipe=None): super(Job, self).save(pipe) + def expire(self, expire_time): + self.redis_connection.expire(self._redis_key(self.id), expire_time) + def processing(self, process_id): self.update(status=self.PROCESSING, process_id=process_id, @@ -279,6 +286,8 @@ def _fork_and_process(self, job_id): self.name, job_id) job.done(None, "Interrupted/Cancelled while running.") + job.expire(24 * 3600) + logging.info("[%s] Finished Processing %s (pid: %d status: %d)", self.name, job_id, self.child_pid, status)
diff --git a/tests/test_job.py b/tests/test_job.py --- a/tests/test_job.py +++ b/tests/test_job.py @@ -98,3 +98,14 @@ def test_unicode_serialization(self): loaded_job = Job.load(redis_connection, job.id) self.assertEquals(loaded_job.query, unicode_query) + def test_cancel_job_with_no_process(self): + job = Job(redis_connection, query=self.query, priority=self.priority) + job.status = Job.PROCESSING + job.process_id = 699999 + job.save() + + job.cancel() + + job = Job.load(redis_connection, job.id) + + self.assertEquals(job.status, Job.FAILED)
Set TTL on job keys in Redis To make sure the memory footprint stays constant and manageable.
2014-04-13T13:37:41
getredash/redash
192
getredash__redash-192
[ "177" ]
7d0324be91367633058d8eab09afec870d85257a
diff --git a/migrations/add_text_to_widgets.py b/migrations/add_text_to_widgets.py new file mode 100644 --- /dev/null +++ b/migrations/add_text_to_widgets.py @@ -0,0 +1,13 @@ +from playhouse.migrate import Migrator +from redash import db +from redash import models + + +if __name__ == '__main__': + db.connect_db() + migrator = Migrator(db.database) + with db.database.transaction(): + migrator.add_column(models.Widget, models.Widget.text, 'text') + migrator.set_nullable(models.Widget, models.Widget.visualization, True) + + db.close_db(None) \ No newline at end of file diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -255,11 +255,11 @@ def to_dict(self, with_widgets=False): if with_widgets: widgets = Widget.select(Widget, Visualization, Query, QueryResult, User)\ .where(Widget.dashboard == self.id)\ - .join(Visualization)\ - .join(Query)\ - .join(User)\ + .join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\ + .join(Query, join_type=peewee.JOIN_LEFT_OUTER)\ + .join(User, join_type=peewee.JOIN_LEFT_OUTER)\ .switch(Query)\ - .join(QueryResult) + .join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER) widgets = {w.id: w.to_dict() for w in widgets} widgets_layout = map(lambda row: map(lambda widget_id: widgets.get(widget_id, None), row), layout) else: @@ -324,8 +324,8 @@ def __unicode__(self): class Widget(BaseModel): id = peewee.PrimaryKeyField() - visualization = peewee.ForeignKeyField(Visualization, related_name='widgets') - + visualization = peewee.ForeignKeyField(Visualization, related_name='widgets', null=True) + text = peewee.TextField(null=True) width = peewee.IntegerField() options = peewee.TextField() dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True) @@ -339,14 +339,19 @@ class Meta: db_table = 'widgets' def to_dict(self): - return { + d = { 'id': self.id, 'width': self.width, 'options': json.loads(self.options), - 'visualization': self.visualization.to_dict(), - 'dashboard_id': self._data['dashboard'] + 'dashboard_id': self._data['dashboard'], + 'text': self.text } + if self.visualization and self.visualization.id: + d['visualization'] = self.visualization.to_dict() + + return d + def __unicode__(self): return u"%s" % self.id
diff --git a/tests/test_controllers.py b/tests/test_controllers.py --- a/tests/test_controllers.py +++ b/tests/test_controllers.py @@ -182,6 +182,23 @@ def test_create_widget(self): [rv4.json['widget']['id']]]) self.assertEquals(rv4.json['new_row'], True) + def test_create_text_widget(self): + dashboard = dashboard_factory.create() + + data = { + 'visualization_id': None, + 'text': 'Sample text.', + 'dashboard_id': dashboard.id, + 'options': {}, + 'width': 2 + } + + with app.test_client() as c, authenticated_user(c): + rv = json_request(c.post, '/api/widgets', data=data) + + self.assertEquals(rv.status_code, 200) + self.assertEquals(rv.json['widget']['text'], 'Sample text.') + def test_delete_widget(self): widget = widget_factory.create()
Suggestion: Dashboard-level formatting and description 1. Would be useful to have a general description for an entire dashboard (not for each individual chart). That way you can explain the purpose of this dashboard. 2. Ability to add sections in a dashboard. Each section will be defined by a Heading, A short description for THAT section (see above), and the graphs. This could be edited either as another "graph" in the drag & drop layout editor that currently exists, or via a textual formatter (see #3 markdown). 3. Even better, allow power-users to manually edit a dashboard's layout using a markdown or similar editor. This will allow creating rich dashboards which are self-explanatory and possibly include images, "static" tables etc - all in one place.
2014-04-29T08:23:54
getredash/redash
194
getredash__redash-194
[ "174" ]
f23b434972dc71aaa6088b6e23e3e16e566ed14e
diff --git a/redash/data/worker.py b/redash/data/worker.py --- a/redash/data/worker.py +++ b/redash/data/worker.py @@ -314,7 +314,7 @@ def _fork_and_process(self, job_id): self.name, job_id) job.done(None, "Interrupted/Cancelled while running.") - job.expire(24 * 3600) + job.expire(settings.JOB_EXPIRY_TIME) logging.info("[%s] Finished Processing %s (pid: %d status: %d)", self.name, job_id, self.child_pid, status) diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -133,8 +133,13 @@ def to_dict(self): def get_latest(cls, data_source, query, ttl=0): query_hash = utils.gen_query_hash(query) - query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source, - peewee.SQL("retrieved_at + interval '%s second' >= now() at time zone 'utc'", ttl)).order_by(cls.retrieved_at.desc()) + if ttl == -1: + query = cls.select().where(cls.query_hash == query_hash, + cls.data_source == data_source).order_by(cls.retrieved_at.desc()) + else: + query = cls.select().where(cls.query_hash == query_hash, cls.data_source == data_source, + peewee.SQL("retrieved_at + interval '%s second' >= now() at time zone 'utc'", + ttl)).order_by(cls.retrieved_at.desc()) return query.first() @@ -261,7 +266,23 @@ def to_dict(self, with_widgets=False): .switch(Query)\ .join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER) widgets = {w.id: w.to_dict() for w in widgets} - widgets_layout = map(lambda row: map(lambda widget_id: widgets.get(widget_id, None), row), layout) + + # The following is a workaround for cases when the widget object gets deleted without the dashboard layout + # updated. This happens for users with old databases that didn't have a foreign key relationship between + # visualizations and widgets. + # It's temporary until better solution is implemented (we probably should move the position information + # to the widget). + widgets_layout = [] + for row in layout: + new_row = [] + for widget_id in row: + widget = widgets.get(widget_id, None) + if widget: + new_row.append(widget) + + widgets_layout.append(new_row) + + # widgets_layout = map(lambda row: map(lambda widget_id: widgets.get(widget_id, None), row), layout) else: widgets_layout = None diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -63,6 +63,7 @@ def parse_boolean(str): ALLOWED_EXTERNAL_USERS = array_from_string(os.environ.get("REDASH_ALLOWED_EXTERNAL_USERS", '')) STATIC_ASSETS_PATH = fix_assets_path(os.environ.get("REDASH_STATIC_ASSETS_PATH", "../rd_ui/app/")) WORKERS_COUNT = int(os.environ.get("REDASH_WORKERS_COUNT", "2")) +JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600*24)) COOKIE_SECRET = os.environ.get("REDASH_COOKIE_SECRET", "c292a0a3aa32397cdb050e233733900f") LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO") EVENTS_LOG_PATH = os.environ.get("REDASH_EVENTS_LOG_PATH", "")
diff --git a/tests/test_models.py b/tests/test_models.py --- a/tests/test_models.py +++ b/tests/test_models.py @@ -80,4 +80,14 @@ def test_get_latest_returns_the_most_recent_result(self): found_query_result = models.QueryResult.get_latest(qr.data_source, qr.query, 60) + self.assertEqual(found_query_result.id, qr.id) + + def test_get_latest_returns_the_last_cached_result_for_negative_ttl(self): + yesterday = datetime.datetime.now() + datetime.timedelta(days=-100) + very_old = query_result_factory.create(retrieved_at=yesterday) + + yesterday = datetime.datetime.now() + datetime.timedelta(days=-1) + qr = query_result_factory.create(retrieved_at=yesterday) + found_query_result = models.QueryResult.get_latest(qr.data_source, qr.query, -1) + self.assertEqual(found_query_result.id, qr.id) \ No newline at end of file
Forked queries should reuse existing query-results
2014-04-29T13:06:32
getredash/redash
196
getredash__redash-196
[ "195" ]
855aecd85ff1dcac40e2b1d2a15da5b2256abb10
diff --git a/redash/data/query_runner_pg.py b/redash/data/query_runner_pg.py --- a/redash/data/query_runner_pg.py +++ b/redash/data/query_runner_pg.py @@ -43,12 +43,26 @@ def query_runner(query): cursor.execute(query) wait(connection) - column_names = [col.name for col in cursor.description] + column_names = set() + columns = [] + duplicates_counter = 1 + + for column in cursor.description: + # TODO: this deduplication needs to be generalized and reused in all query runners. + column_name = column.name + if column_name in column_names: + column_name = column_name + str(duplicates_counter) + duplicates_counter += 1 + + column_names.add(column_name) + + columns.append({ + 'name': column_name, + 'friendly_name': column_friendly_name(column_name), + 'type': None + }) rows = [dict(zip(column_names, row)) for row in cursor] - columns = [{'name': col.name, - 'friendly_name': column_friendly_name(col.name), - 'type': None} for col in cursor.description] data = {'columns': columns, 'rows': rows} json_data = json.dumps(data, cls=JSONEncoder)
When two columns have the same name their values get overriden ``` SELECT 1, 2 ``` Will result in two columns having both the same value.
2014-05-01T14:54:00
getredash/redash
221
getredash__redash-221
[ "209" ]
b0cc646b5e50594d23156eec3e2fa116335736b1
diff --git a/redash/tasks.py b/redash/tasks.py --- a/redash/tasks.py +++ b/redash/tasks.py @@ -58,8 +58,8 @@ def add_task(cls, query, data_source, scheduled=False): pipe = redis_connection.pipeline() try: - pipe.watch('query_hash_job:%s' % query_hash) - job_id = pipe.get('query_hash_job:%s' % query_hash) + pipe.watch(cls._job_lock_id(query_hash, data_source.id)) + job_id = pipe.get(cls._job_lock_id(query_hash, data_source.id)) if job_id: logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id) @@ -75,7 +75,7 @@ def add_task(cls, query, data_source, scheduled=False): result = execute_query.apply_async(args=(query, data_source.id), queue=queue_name) job = cls(async_result=result) logging.info("[Manager][%s] Created new job: %s", query_hash, job.id) - pipe.set('query_hash_job:%s' % query_hash, job.id) + pipe.set(cls._job_lock_id(query_hash, data_source.id), job.id) pipe.execute() break @@ -116,6 +116,9 @@ def to_dict(self): def cancel(self): return self._async_result.revoke(terminate=True) + @staticmethod + def _job_lock_id(query_hash, data_source_id): + return "query_hash_job:%s:%s" % (data_source_id, query_hash) @celery.task(base=BaseTask) def refresh_queries(): @@ -178,11 +181,11 @@ def execute_query(self, query, data_source_id): self.update_state(state='STARTED', meta={'start_time': start_time, 'error': error, 'custom_message': ''}) - # TODO: it is possible that storing the data will fail, and we will need to retry - # while we already marked the job as done # Delete query_hash - redis_connection.delete('query_hash_job:%s' % query_hash) + redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id)) + # TODO: it is possible that storing the data will fail, and we will need to retry + # while we already marked the job as done if not error: query_result = models.QueryResult.store_result(data_source.id, query_hash, query, data, run_time, datetime.datetime.utcnow()) else:
User can't add another dashboard after creating one without refreshing
2014-05-18T14:01:06
getredash/redash
248
getredash__redash-248
[ "243" ]
5310498d0f15d3ef2129cdb41167b69bf52fb0e1
diff --git a/redash/controllers.py b/redash/controllers.py --- a/redash/controllers.py +++ b/redash/controllers.py @@ -294,11 +294,11 @@ def post(self): query.create_default_visualizations() - return query.to_dict(with_result=False) + return query.to_dict() @require_permission('view_query') def get(self): - return [q.to_dict(with_result=False, with_stats=True) for q in models.Query.all_queries()] + return [q.to_dict(with_stats=True) for q in models.Query.all_queries()] class QueryAPI(BaseResource): @@ -318,7 +318,7 @@ def post(self, query_id): query = models.Query.get_by_id(query_id) - return query.to_dict(with_result=False, with_visualizations=True) + return query.to_dict(with_visualizations=True) @require_permission('view_query') def get(self, query_id): @@ -392,7 +392,7 @@ def post(self): 'error': 'Access denied for table(s): %s' % (metadata.used_tables) } } - + models.ActivityLog( user=self.current_user, type=models.ActivityLog.QUERY_EXECUTION, diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -78,7 +78,7 @@ def permissions(self): class Group(BaseModel): DEFAULT_PERMISSIONS = ['create_dashboard', 'create_query', 'edit_dashboard', 'edit_query', 'view_query', 'view_source', 'execute_query'] - + id = peewee.PrimaryKeyField() name = peewee.CharField(max_length=100) permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS) @@ -151,7 +151,7 @@ def verify_password(self, password): class ActivityLog(BaseModel): QUERY_EXECUTION = 1 - + id = peewee.PrimaryKeyField() user = peewee.ForeignKeyField(User) type = peewee.IntegerField() @@ -277,7 +277,7 @@ def create_default_visualizations(self): type="TABLE", options="{}") table_visualization.save() - def to_dict(self, with_result=True, with_stats=False, with_visualizations=False, with_user=True): + def to_dict(self, with_stats=False, with_visualizations=False, with_user=True): d = { 'id': self.id, 'latest_query_data_id': self._data.get('latest_query_data', None), @@ -307,9 +307,6 @@ def to_dict(self, with_result=True, with_stats=False, with_visualizations=False, d['visualizations'] = [vis.to_dict(with_query=False) for vis in self.visualizations] - if with_result and self.latest_query_data: - d['latest_query_data'] = self.latest_query_data.to_dict() - return d @classmethod @@ -383,13 +380,11 @@ def to_dict(self, with_widgets=False): layout = json.loads(self.layout) if with_widgets: - widgets = Widget.select(Widget, Visualization, Query, QueryResult, User)\ + widgets = Widget.select(Widget, Visualization, Query, User)\ .where(Widget.dashboard == self.id)\ .join(Visualization, join_type=peewee.JOIN_LEFT_OUTER)\ .join(Query, join_type=peewee.JOIN_LEFT_OUTER)\ - .join(User, join_type=peewee.JOIN_LEFT_OUTER)\ - .switch(Query)\ - .join(QueryResult, join_type=peewee.JOIN_LEFT_OUTER) + .join(User, join_type=peewee.JOIN_LEFT_OUTER) widgets = {w.id: w.to_dict() for w in widgets} # The following is a workaround for cases when the widget object gets deleted without the dashboard layout @@ -539,4 +534,4 @@ def create_db(create_tables, drop_tables): if create_tables and not model.table_exists(): model.create_table() - db.close_db(None) \ No newline at end of file + db.close_db(None)
Load query results in dashboard with separate requests Currently when requesting dashboard data from the server it returns it with all the visualizations, queries and query results. While at first this seemed as a good idea (to have single http request), it has some downsides: 1. In case of large data sets, getting them all in one puts too much stress on the server. 2. In ability to use HTTP caching in case the same query is used for several visualizaitons or if it was loaded already (and the cache isn't stale). 3. It prevents "progressive" load, as data for all the queries needs to be downloaded before single visualization can be seen. Therefore let's change the API to return everything aside from query results. It might even be just a server change, as in theory, in the past this case was handled properly by the application (although a lot has been changed since).
2014-07-28T14:29:14
getredash/redash
252
getredash__redash-252
[ "228" ]
c2d621ae0fcff82ca34cfcc3d0ee20a3ffa03a79
diff --git a/redash/cache.py b/redash/cache.py new file mode 100644 --- /dev/null +++ b/redash/cache.py @@ -0,0 +1,8 @@ +from flask import make_response +from functools import update_wrapper + +ONE_YEAR = 60 * 60 * 24 * 365.25 + +headers = { + 'Cache-Control': 'max-age=%d' % ONE_YEAR +} diff --git a/redash/controllers.py b/redash/controllers.py --- a/redash/controllers.py +++ b/redash/controllers.py @@ -26,6 +26,7 @@ import logging from tasks import QueryTask +from cache import headers as cache_headers @app.route('/ping', methods=['GET']) def ping(): @@ -417,7 +418,8 @@ class QueryResultAPI(BaseResource): def get(self, query_result_id): query_result = models.QueryResult.get_by_id(query_result_id) if query_result: - return {'query_result': query_result.to_dict()} + data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder) + return make_response(data, 200, cache_headers) else: abort(404)
HTTP caching headers for /api/query_results (take into account TTL and other factors) Related: #145.
Related: #243. QueryResult calls by id can be cached forever. Can be huge win for performance, now that we load it separately.
2014-08-04T13:22:56
getredash/redash
279
getredash__redash-279
[ "278" ]
5e970b73d5224c621c031d692daeb3fa1dce32f9
diff --git a/redash/controllers.py b/redash/controllers.py --- a/redash/controllers.py +++ b/redash/controllers.py @@ -414,48 +414,52 @@ def post(self): class QueryResultAPI(BaseResource): - @require_permission('view_query') - def get(self, query_result_id): - query_result = models.QueryResult.get_by_id(query_result_id) - if query_result: - data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder) - return make_response(data, 200, cache_headers) - else: - abort(404) + @staticmethod + def csv_response(query_result): + s = cStringIO.StringIO() + + query_data = json.loads(query_result.data) + writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']]) + writer.writer = utils.UnicodeWriter(s) + writer.writeheader() + for row in query_data['rows']: + for k, v in row.iteritems(): + if isinstance(v, numbers.Number) and (v > 1000 * 1000 * 1000 * 100): + row[k] = datetime.datetime.fromtimestamp(v/1000.0) + + writer.writerow(row) + headers = {'Content-Type': "text/csv; charset=UTF-8"} + headers.update(cache_headers) + return make_response(s.getvalue(), 200, headers) -class CsvQueryResultsAPI(BaseResource): @require_permission('view_query') - def get(self, query_id, query_result_id=None): - if not query_result_id: + def get(self, query_id=None, query_result_id=None, filetype='json'): + if query_result_id is None and query_id is not None: query = models.Query.get(models.Query.id == query_id) if query: query_result_id = query._data['latest_query_data'] - query_result = query_result_id and models.QueryResult.get_by_id(query_result_id) - if query_result: - s = cStringIO.StringIO() - - query_data = json.loads(query_result.data) - writer = csv.DictWriter(s, fieldnames=[col['name'] for col in query_data['columns']]) - writer.writer = utils.UnicodeWriter(s) - writer.writeheader() - for row in query_data['rows']: - for k, v in row.iteritems(): - if isinstance(v, numbers.Number) and (v > 1000 * 1000 * 1000 * 100): - row[k] = datetime.datetime.fromtimestamp(v/1000.0) + if query_result_id: + query_result = models.QueryResult.get_by_id(query_result_id) - writer.writerow(row) + if query_result: + if filetype == 'json': + data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder) + return make_response(data, 200, cache_headers) + else: + return self.csv_response(query_result) - return make_response(s.getvalue(), 200, {'Content-Type': "text/csv; charset=UTF-8"}) else: abort(404) -api.add_resource(CsvQueryResultsAPI, '/api/queries/<query_id>/results/<query_result_id>.csv', - '/api/queries/<query_id>/results.csv', - endpoint='csv_query_results') + api.add_resource(QueryResultListAPI, '/api/query_results', endpoint='query_results') -api.add_resource(QueryResultAPI, '/api/query_results/<query_result_id>', endpoint='query_result') +api.add_resource(QueryResultAPI, + '/api/query_results/<query_result_id>', + '/api/queries/<query_id>/results.<filetype>', + '/api/queries/<query_id>/results/<query_result_id>.<filetype>', + endpoint='query_result') class JobAPI(BaseResource):
API - result data in json from query id Today, in order to get query results in json form, the url references the query_data_id, which is retrieved in a separate query data request by query_id. So, in order to avoid 2 requests, plz allow getting the results by query_id (like is done with the csv format) https://redash.host.com/api/queries/1234/results.json
2014-09-02T14:52:38
getredash/redash
280
getredash__redash-280
[ "261" ]
365b8a8c93bff4db4d1f1b822bb3d9bef648e3d7
diff --git a/redash/controllers.py b/redash/controllers.py --- a/redash/controllers.py +++ b/redash/controllers.py @@ -111,7 +111,6 @@ def status_api(): manager_status = redis_connection.hgetall('redash:status') status['manager'] = manager_status - status['manager']['queue_size'] = redis_connection.llen('queries') + redis_connection.llen('scheduled_queries') status['manager']['outdated_queries_count'] = models.Query.outdated_queries().count() queues = {} diff --git a/redash/tasks.py b/redash/tasks.py --- a/redash/tasks.py +++ b/redash/tasks.py @@ -1,6 +1,7 @@ import time import datetime import logging +import itertools import redis from celery import Task from celery.result import AsyncResult @@ -64,7 +65,12 @@ def add_task(cls, query, data_source, scheduled=False): logging.info("[Manager][%s] Found existing job: %s", query_hash, job_id) job = cls(job_id=job_id) - else: + if job.is_cancelled: + logging.info("[%s] job found cancelled already, removing lock", query_hash) + redis_connection.delete(QueryTask._job_lock_id(query_hash, data_source.id)) + job = None + + if not job: pipe.multi() if scheduled: @@ -113,6 +119,14 @@ def to_dict(self): 'query_result_id': query_result_id, } + @property + def is_cancelled(self): + return self._async_result.status == 'REVOKED' + + @property + def celery_status(self): + return self._async_result.status + def cancel(self): return self._async_result.revoke(terminate=True)
Cancelling queries sends them into "limbo" If you cancel a query, it doesn't delete the lock which prevents it from running again. 1. If Celery raises an exception when cancelling a task, we need to handle it and cancel the task at this point. 2. Expire the lock keys after predefined time. 3. When locking, if it's possible to check if a job still running, verify it still running/exists, if not - ignore the lock.
2014-09-06T15:09:55
getredash/redash
378
getredash__redash-378
[ "263" ]
b1f97e8c8d2dc4f644f88c44e71e82b32bc46152
diff --git a/migrations/0002_fix_timestamp_fields.py b/migrations/0002_fix_timestamp_fields.py new file mode 100644 --- /dev/null +++ b/migrations/0002_fix_timestamp_fields.py @@ -0,0 +1,21 @@ +from redash.models import db + +if __name__ == '__main__': + db.connect_db() + columns = ( + ('activity_log', 'created_at'), + ('dashboards', 'created_at'), + ('data_sources', 'created_at'), + ('events', 'created_at'), + ('groups', 'created_at'), + ('queries', 'created_at'), + ('widgets', 'created_at'), + ('query_results', 'retrieved_at') + ) + + with db.database.transaction(): + for column in columns: + db.database.execute_sql("ALTER TABLE {} ALTER COLUMN {} TYPE timestamp with time zone;".format(*column)) + + db.close_db(None) + diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -9,7 +9,7 @@ import peewee from passlib.apps import custom_app_context as pwd_context -from playhouse.postgres_ext import ArrayField +from playhouse.postgres_ext import ArrayField, DateTimeTZField, PostgresqlExtDatabase from flask.ext.login import UserMixin, AnonymousUserMixin from redash import utils, settings @@ -18,8 +18,9 @@ class Database(object): def __init__(self): self.database_config = dict(settings.DATABASE_CONFIG) + self.database_config['register_hstore'] = False self.database_name = self.database_config.pop('name') - self.database = peewee.PostgresqlDatabase(self.database_name, **self.database_config) + self.database = PostgresqlExtDatabase(self.database_name, **self.database_config) self.app = None self.pid = os.getpid() @@ -96,7 +97,7 @@ class Group(BaseModel): name = peewee.CharField(max_length=100) permissions = ArrayField(peewee.CharField, default=DEFAULT_PERMISSIONS) tables = ArrayField(peewee.CharField) - created_at = peewee.DateTimeField(default=datetime.datetime.now) + created_at = DateTimeTZField(default=datetime.datetime.now) class Meta: db_table = 'groups' @@ -173,7 +174,7 @@ class ActivityLog(BaseModel): user = peewee.ForeignKeyField(User) type = peewee.IntegerField() activity = peewee.TextField() - created_at = peewee.DateTimeField(default=datetime.datetime.now) + created_at = DateTimeTZField(default=datetime.datetime.now) class Meta: db_table = 'activity_log' @@ -198,7 +199,7 @@ class DataSource(BaseModel): options = peewee.TextField() queue_name = peewee.CharField(default="queries") scheduled_queue_name = peewee.CharField(default="queries") - created_at = peewee.DateTimeField(default=datetime.datetime.now) + created_at = DateTimeTZField(default=datetime.datetime.now) class Meta: db_table = 'data_sources' @@ -222,7 +223,7 @@ class QueryResult(BaseModel): query = peewee.TextField() data = peewee.TextField() runtime = peewee.FloatField() - retrieved_at = peewee.DateTimeField() + retrieved_at = DateTimeTZField() class Meta: db_table = 'query_results' @@ -297,7 +298,7 @@ class Query(BaseModel): user_email = peewee.CharField(max_length=360, null=True) user = peewee.ForeignKeyField(User) is_archived = peewee.BooleanField(default=False, index=True) - created_at = peewee.DateTimeField(default=datetime.datetime.now) + created_at = DateTimeTZField(default=datetime.datetime.now) class Meta: db_table = 'queries' @@ -441,7 +442,7 @@ class Dashboard(BaseModel): layout = peewee.TextField() dashboard_filters_enabled = peewee.BooleanField(default=False) is_archived = peewee.BooleanField(default=False, index=True) - created_at = peewee.DateTimeField(default=datetime.datetime.now) + created_at = DateTimeTZField(default=datetime.datetime.now) class Meta: db_table = 'dashboards' @@ -552,7 +553,7 @@ class Widget(BaseModel): width = peewee.IntegerField() options = peewee.TextField() dashboard = peewee.ForeignKeyField(Dashboard, related_name='widgets', index=True) - created_at = peewee.DateTimeField(default=datetime.datetime.now) + created_at = DateTimeTZField(default=datetime.datetime.now) # unused; kept for backward compatability: type = peewee.CharField(max_length=100, null=True) @@ -586,13 +587,14 @@ def delete_instance(self, *args, **kwargs): self.dashboard.save() super(Widget, self).delete_instance(*args, **kwargs) + class Event(BaseModel): user = peewee.ForeignKeyField(User, related_name="events") action = peewee.CharField() object_type = peewee.CharField() object_id = peewee.CharField(null=True) additional_properties = peewee.TextField(null=True) - created_at = peewee.DateTimeField(default=datetime.datetime.now) + created_at = DateTimeTZField(default=datetime.datetime.now) class Meta: db_table = 'events'
Timestamp fields created as "TIMESTAMP without time zone" resulting in wrong timestamp in UI We need to either convert it to "TIMESTAMP with time zone" or make sure we send UTC times to the database (probably better the later?).
2015-02-23T07:03:29
getredash/redash
422
getredash__redash-422
[ "288" ]
bf5fe7d2c7a09bd0433fba5446f03e3e140e5e35
diff --git a/redash/query_runner/big_query.py b/redash/query_runner/big_query.py --- a/redash/query_runner/big_query.py +++ b/redash/query_runner/big_query.py @@ -5,6 +5,8 @@ import sys import time +import requests + from redash.query_runner import * from redash.utils import JSONEncoder @@ -15,6 +17,7 @@ from apiclient.discovery import build from apiclient.errors import HttpError from oauth2client.client import SignedJwtAssertionCredentials + from oauth2client import gce enabled = True except ImportError: @@ -66,18 +69,6 @@ def _load_key(filename): f.close() -def _get_bigquery_service(service_account, private_key): - scope = [ - "https://www.googleapis.com/auth/bigquery", - ] - - credentials = SignedJwtAssertionCredentials(service_account, private_key, scope=scope) - http = httplib2.Http() - http = credentials.authorize(http) - - return build("bigquery", "v2", http=http) - - def _get_query_results(jobs, project_id, job_id, start_index): query_reply = jobs.getQueryResults(projectId=project_id, jobId=job_id, startIndex=start_index).execute() logging.debug('query_reply %s', query_reply) @@ -117,11 +108,23 @@ def configuration_schema(cls): def __init__(self, configuration_json): super(BigQuery, self).__init__(configuration_json) - self.private_key = _load_key(self.configuration["privateKey"]) + def _get_bigquery_service(self): + scope = [ + "https://www.googleapis.com/auth/bigquery", + ] + + private_key = _load_key(self.configuration["privateKey"]) + credentials = SignedJwtAssertionCredentials(self.configuration['serviceAccount'], private_key, scope=scope) + http = httplib2.Http() + http = credentials.authorize(http) + + return build("bigquery", "v2", http=http) + + def _get_project_id(self): + return self.configuration["projectId"] def run_query(self, query): - bigquery_service = _get_bigquery_service(self.configuration["serviceAccount"], - self.private_key) + bigquery_service = self._get_bigquery_service() jobs = bigquery_service.jobs() job_data = { @@ -134,13 +137,13 @@ def run_query(self, query): logger.debug("BigQuery got query: %s", query) - project_id = self.configuration["projectId"] + project_id = self._get_project_id() try: insert_response = jobs.insert(projectId=project_id, body=job_data).execute() current_row = 0 query_reply = _get_query_results(jobs, project_id=project_id, - job_id=insert_response['jobReference']['jobId'], start_index=current_row) + job_id=insert_response['jobReference']['jobId'], start_index=current_row) logger.debug("bigquery replied: %s", query_reply) @@ -176,4 +179,26 @@ def run_query(self, query): return json_data, error -register(BigQuery) \ No newline at end of file + +class BigQueryGCE(BigQuery): + @classmethod + def type(cls): + return "bigquery_gce" + + @classmethod + def configuration_schema(cls): + return {} + + def _get_project_id(self): + return requests.get('http://metadata/computeMetadata/v1/project/project-id', headers={'Metadata-Flavor': 'Google'}).content + + def _get_bigquery_service(self): + credentials = gce.AppAssertionCredentials(scope='https://www.googleapis.com/auth/bigquery') + http = httplib2.Http() + http = credentials.authorize(http) + + return build("bigquery", "v2", http=http) + + +register(BigQuery) +register(BigQueryGCE) \ No newline at end of file
Add support for BigQuery instance-based authorization on GCE More details here: https://stackoverflow.com/questions/26031492/how-to-auth-into-bigquery-on-google-compute-engine/26031493#26031493
2015-05-10T05:47:20
getredash/redash
445
getredash__redash-445
[ "444" ]
b56e87ceb26e5812a94d8f8c0faec4bbb3f279e6
diff --git a/redash/tasks.py b/redash/tasks.py --- a/redash/tasks.py +++ b/redash/tasks.py @@ -252,9 +252,9 @@ def execute_query(self, query, data_source_id, metadata): metadata['Query Hash'] = query_hash metadata['Queue'] = self.request.delivery_info['routing_key'] - annotation = ", ".join(["{}: {}".format(k, v) for k, v in metadata.iteritems()]) + annotation = u", ".join([u"{}: {}".format(k, v) for k, v in metadata.iteritems()]) - logging.debug("Annotation: %s", annotation) + logging.debug(u"Annotation: %s", annotation) annotated_query = u"/* {} */ {}".format(annotation, query) else:
Query execution fails with Google Apps accounts having non-ASCII characters in name If the account used for Google Apps authentication has non-ASCII characters (ä in this case), execution of queries fails against Amazon Redshift datasource. [2015-06-05 11:36:13,520][PID:7502][INFO][root] [Manager] Metadata: [{'Username': u'Raimo J\xe4rvenp\xe4\xe4', 'Query ID': 25}] [2015-06-05 11:36:13,520][PID:7502][INFO][root] [Manager][a2a8974a74cfcb958afc2f843f9f896b] Found existing job: ed40d887-8dae-4e2d-a3a4-439bfb4f01a6 [2015-06-05 11:36:13,521][PID:7502][INFO][root] [a2a8974a74cfcb958afc2f843f9f896b] job found is ready (FAILURE), removing lock
I think I fixed this in the last build. I will make sure and let you know. On Jun 5, 2015 3:00 PM, "Jari Korkiakoski" [email protected] wrote: > If the account used for Google Apps authentication has non-ASCII > characters (ä in this case), execution of queries fails against Amazon > Redshift datasource. > > [2015-06-05 11:36:13,520][PID:7502][INFO][root] [Manager] Metadata: > [{'Username': u'Raimo J\xe4rvenp\xe4\xe4', 'Query ID': 25}] > [2015-06-05 11:36:13,520][PID:7502][INFO][root] > [Manager][a2a8974a74cfcb958afc2f843f9f896b] Found existing job: > ed40d887-8dae-4e2d-a3a4-439bfb4f01a6 > [2015-06-05 11:36:13,521][PID:7502][INFO][root] > [a2a8974a74cfcb958afc2f843f9f896b] job found is ready (FAILURE), removing > lock > > — > Reply to this email directly or view it on GitHub > https://github.com/EverythingMe/redash/issues/444.
2015-06-05T13:49:38
getredash/redash
464
getredash__redash-464
[ "463" ]
6c6c0256ba6e97f3d4d1695a79e5eff0acc046d9
diff --git a/redash/utils.py b/redash/utils.py --- a/redash/utils.py +++ b/redash/utils.py @@ -95,7 +95,7 @@ def default(self, o): if isinstance(o, decimal.Decimal): return float(o) - if isinstance(o, datetime.date): + if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)): return o.isoformat() super(JSONEncoder, self).default(o)
Error running query: datetime.time(13, 52, 27) is not JSON serializable My table schema: ``` sql CREATE TABLE F_entrances ( id SERIAL PRIMARY KEY, timeOfEntrance time, customerId int REFERENCES D_customers ); ``` (and yes, I committed the horrible sin of camel_case vs underScore. I'll be fixing that soonish) The query ``` sql SELECT timeofentrance FROM F_entrances ``` Gives me the error `Error running query: datetime.time(13, 52, 27) is not JSON serializable`. I worked around it with `to_char` but this seems to be a problem at the [Python layer](http://stackoverflow.com/a/11875813/1216976).
It's definitely is. What datasource type are you using? Ok, I believe I know where the problem is. Expect a fix tomorrow. On Sunday, June 28, 2015, Randall Koutnik [email protected] wrote: > My table schema: > > CREATE TABLE F_entrances ( > id SERIAL PRIMARY KEY, > timeOfEntrance time, > customerId int REFERENCES D_customers > ); > > (and yes, I committed the horrible sin of camel_case vs underScore. I'll > be fixing that soonish) > > The query > > SELECT > timeofentranceFROM F_entrances > > Gives me the error Error running query: datetime.time(13, 52, 27) is not > JSON serializable. I worked around it with to_char but this seems to be a > problem at the Python layer http://stackoverflow.com/a/11875813/1216976. > > — > Reply to this email directly or view it on GitHub > https://github.com/EverythingMe/redash/issues/463. I'm using postgres here. Thanks for the quick response!
2015-06-29T15:02:04
getredash/redash
480
getredash__redash-480
[ "275" ]
a692e3f664b8074efd9363621d11d5d23fc5f9d9
diff --git a/redash/controllers.py b/redash/controllers.py --- a/redash/controllers.py +++ b/redash/controllers.py @@ -12,7 +12,7 @@ import logging from flask import render_template, send_from_directory, make_response, request, jsonify, redirect, \ - session, url_for, current_app + session, url_for, current_app, flash from flask.ext.restful import Resource, abort from flask_login import current_user, login_user, logout_user, login_required import sqlparse @@ -80,7 +80,7 @@ def login(): login_user(user, remember=remember) return redirect(request.args.get('next') or '/') except models.User.DoesNotExist: - pass + flash("Wrong username or password.") return render_template("login.html", name=settings.NAME, diff --git a/redash/google_oauth.py b/redash/google_oauth.py --- a/redash/google_oauth.py +++ b/redash/google_oauth.py @@ -1,25 +1,25 @@ import logging from flask.ext.login import login_user import requests -from flask import redirect, url_for, Blueprint +from flask import redirect, url_for, Blueprint, flash from flask_oauth import OAuth from redash import models, settings logger = logging.getLogger('google_oauth') oauth = OAuth() -request_token_params = {'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', 'response_type': 'code'} -if settings.GOOGLE_APPS_DOMAIN: - request_token_params['hd'] = settings.GOOGLE_APPS_DOMAIN -else: +if not settings.GOOGLE_APPS_DOMAIN: logger.warning("No Google Apps domain defined, all Google accounts allowed.") google = oauth.remote_app('google', base_url='https://www.google.com/accounts/', authorize_url='https://accounts.google.com/o/oauth2/auth', request_token_url=None, - request_token_params=request_token_params, + request_token_params={ + 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', + 'response_type': 'code' + }, access_token_url='https://accounts.google.com/o/oauth2/token', access_token_method='POST', access_token_params={'grant_type': 'authorization_code'}, @@ -31,7 +31,7 @@ def get_user_profile(access_token): - headers = {'Authorization': 'OAuth '+access_token} + headers = {'Authorization': 'OAuth {}'.format(access_token)} response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers) if response.status_code == 401: @@ -41,9 +41,17 @@ def get_user_profile(access_token): return response.json() +def verify_profile(profile): + if not settings.GOOGLE_APPS_DOMAIN: + return True + + domain = profile['email'].split('@')[-1] + return domain in settings.GOOGLE_APPS_DOMAIN + + def create_and_login_user(name, email): try: - user_object = models.User.get(models.User.email == email) + user_object = models.User.get_by_email(email) if user_object.name != name: logger.debug("Updating user name (%r -> %r)", user_object.name, name) user_object.name = name @@ -70,10 +78,17 @@ def authorized(resp): if access_token is None: logger.warning("Access token missing in call back request.") + flash("Validation error. Please retry.") return redirect(url_for('login')) profile = get_user_profile(access_token) if profile is None: + flash("Validation error. Please retry.") + return redirect(url_for('login')) + + if not verify_profile(profile): + logger.warning("User tried to login with unauthorized domain name: %s", profile['email']) + flash("Your Google Apps domain name isn't allowed.") return redirect(url_for('login')) create_and_login_user(profile['name'], profile['email']) diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -32,6 +32,10 @@ def array_from_string(str): return array +def set_from_string(str): + return set(array_from_string(str)) + + def parse_boolean(str): return json.loads(str.lower()) @@ -60,7 +64,7 @@ def parse_boolean(str): # Google Apps domain to allow access from; any user with email in this Google Apps will be allowed # access -GOOGLE_APPS_DOMAIN = os.environ.get("REDASH_GOOGLE_APPS_DOMAIN", "") +GOOGLE_APPS_DOMAIN = set_from_string(os.environ.get("REDASH_GOOGLE_APPS_DOMAIN", "")) GOOGLE_CLIENT_ID = os.environ.get("REDASH_GOOGLE_CLIENT_ID", "") GOOGLE_CLIENT_SECRET = os.environ.get("REDASH_GOOGLE_CLIENT_SECRET", "")
show login failure message? how about display a message when login failed? in current version the pages only reload when then login failed
2015-07-12T09:52:49
getredash/redash
495
getredash__redash-495
[ "491" ]
4f11f28efa045be1d9161ccd1163ca8f6ba6d7ac
diff --git a/redash/query_runner/big_query.py b/redash/query_runner/big_query.py --- a/redash/query_runner/big_query.py +++ b/redash/query_runner/big_query.py @@ -91,7 +91,7 @@ def configuration_schema(cls): 'properties': { 'serviceAccount': { 'type': 'string', - 'title': 'Service Account' + 'title': 'Service Account Email address' }, 'projectId': { 'type': 'string', @@ -201,4 +201,4 @@ def _get_bigquery_service(self): register(BigQuery) -register(BigQueryGCE) \ No newline at end of file +register(BigQueryGCE)
CLI add new BigQuery datasource - Service Account is actually "Email" Service Account Adding a new BigQuery datasource the CLI asks me about the service account: ![redash_cli_capture](https://cloud.githubusercontent.com/assets/755254/8698815/7603be02-2ad7-11e5-967e-610d5f32b766.png) I typed the "Client ID", when actually we need to provide the "Email address"
2015-07-15T16:46:09
getredash/redash
543
getredash__redash-543
[ "542" ]
700054741908c8585db9817f2c2539f0e1413dc4
diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py --- a/redash/query_runner/mongodb.py +++ b/redash/query_runner/mongodb.py @@ -151,7 +151,11 @@ def _fix_dates(self, data): for k in data: if isinstance(data[k], list): for i in range(0, len(data[k])): - self._fix_dates(data[k][i]) + if isinstance(data[k][i], (str, unicode)): + self._convert_date(data[k], i) + elif not isinstance(data[k][i], (int)): + self._fix_dates(data[k][i]) + elif isinstance(data[k], dict): self._fix_dates(data[k]) else:
Failed to run mongodb query When I try to run this mongodb on both local install or redash demo, a error raised "Error running query: string indices must be integers" Below is the query, I think this is an error of python code since I can run other simple query just fine. ``` json { "collection": "Email", "aggregate": [ { "$group": { "_id": { "$dateToString": { "format": "%Y-%m-%d", "date": "$sendTime" } }, "sent": { "$sum": { "$cond": { "if": { "$gte": [ "$sent", 1 ] }, "then": 1, "else": 0 } } }, "opened": { "$sum": { "$cond": { "if": { "$gte": [ "$opened", 1 ] }, "then": 1, "else": 0 } } }, "clicked": { "$sum": { "$cond": { "if": { "$gte": [ "$clicked", 1 ] }, "then": 1, "else": 0 } } } } }, { "$limit": 10 } ] } ```
I also tried to make a script with pymongo and this query condition works fine http://pastebin.com/GSCmdfJB Trace ``` shell 02:16:19 worker.1 | Traceback (most recent call last): 02:16:19 worker.1 | File "/usr/local/lib/python2.7/site-packages/celery/app/trace.py", line 240, in trace_task 02:16:19 worker.1 | R = retval = fun(*args, **kwargs) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/tasks.py", line 24, in __call__ 02:16:19 worker.1 | return super(BaseTask, self).__call__(*args, **kwargs) 02:16:19 worker.1 | File "/usr/local/lib/python2.7/site-packages/celery/app/trace.py", line 437, in __protected_call__ 02:16:19 worker.1 | return self.run(*args, **kwargs) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/tasks.py", line 297, in execute_query 02:16:19 worker.1 | data, error = query_runner.run_query(annotated_query) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/query_runner/mongodb.py", line 188, in run_query 02:16:19 worker.1 | self._fix_dates(query_data) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/query_runner/mongodb.py", line 160, in _fix_dates 02:16:19 worker.1 | self._fix_dates(data[k][i]) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/query_runner/mongodb.py", line 162, in _fix_dates 02:16:19 worker.1 | self._fix_dates(data[k]) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/query_runner/mongodb.py", line 162, in _fix_dates 02:16:19 worker.1 | self._fix_dates(data[k]) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/query_runner/mongodb.py", line 162, in _fix_dates 02:16:19 worker.1 | self._fix_dates(data[k]) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/query_runner/mongodb.py", line 162, in _fix_dates 02:16:19 worker.1 | self._fix_dates(data[k]) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/query_runner/mongodb.py", line 162, in _fix_dates 02:16:19 worker.1 | self._fix_dates(data[k]) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/query_runner/mongodb.py", line 160, in _fix_dates 02:16:19 worker.1 | self._fix_dates(data[k][i]) 02:16:19 worker.1 | File "/Users/quanmt/PycharmProjects/redash/redash/query_runner/mongodb.py", line 158, in _fix_dates 02:16:19 worker.1 | if isinstance(data[k], list): 02:16:19 worker.1 | TypeError: string indices must be integers ``` The stack trace clearly indicates a bug in re:dash code, which is also explains why running this query w/ PyMongo worked. I need to make sure when I get the chance to properly debug this, but I think I can see the bug: when `_fix_dates` is called for the `$gte` node, it calls for `_fix_dates` on a string, which tries to iterate it with `for k in data`, and then fails with the error you get. I can fix this when I'm back from my vacation (September 1st). If you have a chance to fix this before, pull requests are always appreciated :-) cc: @erans
2015-08-28T03:11:31
getredash/redash
544
getredash__redash-544
[ "541" ]
700054741908c8585db9817f2c2539f0e1413dc4
diff --git a/redash/controllers.py b/redash/controllers.py --- a/redash/controllers.py +++ b/redash/controllers.py @@ -307,6 +307,8 @@ def delete(self, dashboard_slug): dashboard.is_archived = True dashboard.save() + return dashboard.to_dict(with_widgets=True) + api.add_resource(DashboardListAPI, '/api/dashboards', endpoint='dashboards') api.add_resource(DashboardRecentAPI, '/api/dashboards/recent', endpoint='recent_dashboards') api.add_resource(DashboardAPI, '/api/dashboards/<dashboard_slug>', endpoint='dashboard') diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -700,6 +700,7 @@ def to_dict(self, with_widgets=False): 'layout': layout, 'dashboard_filters_enabled': self.dashboard_filters_enabled, 'widgets': widgets_layout, + 'is_archived': self.is_archived, 'updated_at': self.updated_at, 'created_at': self.created_at } @@ -715,6 +716,7 @@ def recent(cls, user_id=None, limit=20): where(Event.action << ('edit', 'view')).\ where(~(Event.object_id >> None)). \ where(Event.object_type == 'dashboard'). \ + where(Dashboard.is_archived == False). \ group_by(Event.object_id, Dashboard.id). \ order_by(peewee.SQL("count(0) desc"))
Cannot delete dashboard from home page On the home page, clicking the "X" next to a dashboard brings up the "are you sure" alert, but clicking OK results in the dashboard not being deleted. The title disappears, but if you reload the page the dashboard is still there. Version 0.7.1+b1015
It's a bug in the new code that generates the recent dashboards list, it doesn't filter out the archived dashboards. I got aware of it shortly before going on vacation, and I'll fix it when I'm back. Sorry about the confusion. Once this is fixed, all the dashboards you archived (deleted) will be removed from the list on the homepage.
2015-09-03T05:43:43
getredash/redash
576
getredash__redash-576
[ "571" ]
445dbb5ade7bd3d81b61b1f3b32a03a50f5918ba
diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py --- a/redash/query_runner/__init__.py +++ b/redash/query_runner/__init__.py @@ -67,6 +67,24 @@ def configuration_schema(cls): def run_query(self, query): raise NotImplementedError() + def fetch_columns(self, columns): + column_names = [] + duplicates_counter = 1 + new_columns = [] + + for col in columns: + column_name = col[0] + if column_name in column_names: + column_name = "{}{}".format(column_name, duplicates_counter) + duplicates_counter += 1 + + column_names.append(column_name) + new_columns.append({'name': column_name, + 'friendly_name': column_name, + 'type': col[1]}) + + return new_columns + def get_schema(self): return [] diff --git a/redash/query_runner/mysql.py b/redash/query_runner/mysql.py --- a/redash/query_runner/mysql.py +++ b/redash/query_runner/mysql.py @@ -119,13 +119,8 @@ def run_query(self, query): # TODO - very similar to pg.py if cursor.description is not None: - columns_data = [(i[0], i[1]) for i in cursor.description] - - rows = [dict(zip((c[0] for c in columns_data), row)) for row in data] - - columns = [{'name': col[0], - 'friendly_name': col[0], - 'type': types_map.get(col[1], None)} for col in columns_data] + columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description]) + rows = [dict(zip((c['name'] for c in columns), row)) for row in data] data = {'columns': columns, 'rows': rows} json_data = json.dumps(data, cls=JSONEncoder) @@ -149,4 +144,4 @@ def run_query(self, query): return json_data, error -register(Mysql) \ No newline at end of file +register(Mysql) diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py --- a/redash/query_runner/pg.py +++ b/redash/query_runner/pg.py @@ -121,29 +121,9 @@ def run_query(self, query): cursor.execute(query) _wait(connection) - # While set would be more efficient here, it sorts the data which is not what we want, but due to the small - # size of the data we can assume it's ok. - column_names = [] - columns = [] - duplicates_counter = 1 - if cursor.description is not None: - for column in cursor.description: - # TODO: this deduplication needs to be generalized and reused in all query runners. - column_name = column.name - if column_name in column_names: - column_name += str(duplicates_counter) - duplicates_counter += 1 - - column_names.append(column_name) - - columns.append({ - 'name': column_name, - 'friendly_name': column_name, - 'type': types_map.get(column.type_code, None) - }) - - rows = [dict(zip(column_names, row)) for row in cursor] + columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description]) + rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor] data = {'columns': columns, 'rows': rows} error = None @@ -170,4 +150,4 @@ def run_query(self, query): return json_data, error -register(PostgreSQL) \ No newline at end of file +register(PostgreSQL)
Columns with same name (on different tables) are messing up the results If we have two columns with the same name in the same query the result is overridden. In this query I also found another bug: the `COUNT` and the `GROUP_CONCAT` does not show results if they don't have an alias. ``` sql SELECT s.name, t.name, count(tg.id), group_concat(tg.taggable_id ORDER BY tg.taggable_id ASC separator ', ') FROM taggings tg, tags t, questions q, subgroups s WHERE t.id = tg.tag_id AND tg.taggable_id = q.id AND tg.taggable_type = 'Question' AND q.year >= 2009 AND s.id = q.subgroup_id GROUP BY s.name, t.id ``` Result: ![screen shot 2015-09-22 at 2 07 53 pm](https://cloud.githubusercontent.com/assets/501867/10025613/803d60b6-6133-11e5-8dc4-2c5f202b9f6c.png) Should be: ![screen shot 2015-09-22 at 2 08 52 pm](https://cloud.githubusercontent.com/assets/501867/10025622/829eaf5e-6133-11e5-8daa-751fc53162a6.png) Adding an alias to all columns fixes it.
I've implemented a [solution](https://github.com/EverythingMe/redash/blob/master/redash/query_runner/pg.py#L134-L136) for this in the PostgreSQL query runner, but never generalized it for the other query runners. Which one are you using? @arikfr I'm using mysql. Should I implement the solution for mysql or try to generalize it?
2015-09-29T04:33:08
getredash/redash
602
getredash__redash-602
[ "564" ]
9886f5b13bba7762dd24c4eefbaf3c54f455263b
diff --git a/redash/authentication.py b/redash/authentication.py --- a/redash/authentication.py +++ b/redash/authentication.py @@ -52,6 +52,7 @@ def hmac_load_user_from_request(request): return None + def get_user_from_api_key(api_key, query_id): if not api_key: return None @@ -67,8 +68,19 @@ def get_user_from_api_key(api_key, query_id): return user -def api_key_load_user_from_request(request): + +def get_api_key_from_request(request): api_key = request.args.get('api_key', None) + + if api_key is None and request.headers.get('Authorization'): + auth_header = request.headers.get('Authorization') + api_key = auth_header.replace('Key ', '', 1) + + return api_key + + +def api_key_load_user_from_request(request): + api_key = get_api_key_from_request(request) query_id = request.view_args.get('query_id', None) user = get_user_from_api_key(api_key, query_id)
diff --git a/tests/test_authentication.py b/tests/test_authentication.py --- a/tests/test_authentication.py +++ b/tests/test_authentication.py @@ -44,6 +44,17 @@ def test_user_api_key(self): rv = c.get('/api/queries/', query_string={'api_key': user.api_key}) self.assertEqual(user.id, api_key_load_user_from_request(request).id) + def test_api_key_header(self): + with app.test_client() as c: + rv = c.get('/api/queries/{}'.format(self.query.id), headers={'Authorization': "Key {}".format(self.api_key)}) + self.assertIsNotNone(api_key_load_user_from_request(request)) + + def test_api_key_header_with_wrong_key(self): + with app.test_client() as c: + rv = c.get('/api/queries/{}'.format(self.query.id), headers={'Authorization': "Key oops"}) + self.assertIsNone(api_key_load_user_from_request(request)) + + class TestHMACAuthentication(BaseTestCase): # # This is a bad way to write these tests, but the way Flask works doesn't make it easy to write them properly...
API keys should be supported in the HTTP headers Currently it seems that all API calls must include the `api_key` in the query string. Ideally the HTTP headers could also be used (e.g. `Authorization: Key XXXX` or `X-Api-Key`) so that Web server logs don't log the API key in the clear.
Good idea, it just a matter of updating this function: https://github.com/EverythingMe/redash/blob/master/redash/authentication.py#L71 to read the value from query string or headers. +1
2015-10-11T09:09:34
getredash/redash
604
getredash__redash-604
[ "597" ]
21de1d90e31f8e68ea1ba1a89ddd2084729ac990
diff --git a/redash/query_runner/mongodb.py b/redash/query_runner/mongodb.py --- a/redash/query_runner/mongodb.py +++ b/redash/query_runner/mongodb.py @@ -29,7 +29,6 @@ datetime.datetime: TYPE_DATETIME, } -date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE) class MongoDBJSONEncoder(JSONEncoder): def default(self, o): @@ -38,66 +37,25 @@ def default(self, o): return super(MongoDBJSONEncoder, self).default(o) -# Simple query example: -# -# { -# "collection" : "my_collection", -# "query" : { -# "date" : { -# "$gt" : "ISODate(\"2015-01-15 11:41\")", -# }, -# "type" : 1 -# }, -# "fields" : { -# "_id" : 1, -# "name" : 2 -# }, -# "sort" : [ -# { -# "name" : "date", -# "direction" : -1 -# } -# ] -# -# } -# -# -# Aggregation -# =========== -# Uses a syntax similar to the one used in PyMongo, however to support the -# correct order of sorting, it uses a regular list for the "$sort" operation -# that converts into a SON (sorted dictionary) object before execution. -# -# Aggregation query example: -# -# { -# "collection" : "things", -# "aggregate" : [ -# { -# "$unwind" : "$tags" -# }, -# { -# "$group" : { -# "_id" : "$tags", -# "count" : { "$sum" : 1 } -# } -# }, -# { -# "$sort" : [ -# { -# "name" : "count", -# "direction" : -1 -# }, -# { -# "name" : "_id", -# "direction" : -1 -# } -# ] -# } -# ] -# } -# -# + +date_regex = re.compile("ISODate\(\"(.*)\"\)", re.IGNORECASE) + + +def datetime_parser(dct): + for k, v in dct.iteritems(): + if isinstance(v, basestring): + m = date_regex.findall(v) + if len(m) > 0: + dct[k] = parse(m[0], yearfirst=True) + + return dct + + +def parse_query_json(query): + query_data = json.loads(query, object_hook=datetime_parser) + return query_data + + class MongoDB(BaseQueryRunner): @classmethod def configuration_schema(cls): @@ -144,25 +102,6 @@ def _get_column_by_name(self, columns, column_name): return None - def _fix_dates(self, data): - for k in data: - if isinstance(data[k], list): - for i in range(0, len(data[k])): - if isinstance(data[k][i], (str, unicode)): - self._convert_date(data[k], i) - elif not isinstance(data[k][i], (int)): - self._fix_dates(data[k][i]) - - elif isinstance(data[k], dict): - self._fix_dates(data[k]) - else: - if isinstance(data[k], (str, unicode)): - self._convert_date(data, k) - - def _convert_date(self, q, field_name): - m = date_regex.findall(q[field_name]) - if len(m) > 0: - q[field_name] = parse(m[0], yearfirst=True) def run_query(self, query): if self.is_replica_set: @@ -176,8 +115,7 @@ def run_query(self, query): logger.debug("mongodb got query: %s", query) try: - query_data = json.loads(query) - self._fix_dates(query_data) + query_data = parse_query_json(query) except ValueError: return None, "Invalid query format. The query is not a valid JSON."
diff --git a/tests/query_runner/__init__.py b/tests/query_runner/__init__.py new file mode 100644 --- /dev/null +++ b/tests/query_runner/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/query_runner/test_mongodb.py b/tests/query_runner/test_mongodb.py new file mode 100644 --- /dev/null +++ b/tests/query_runner/test_mongodb.py @@ -0,0 +1,73 @@ +import datetime +import json +from unittest import TestCase +from redash.query_runner.mongodb import parse_query_json + + +class TestParseQueryJson(TestCase): + def test_ignores_non_isodate_fields(self): + query = { + 'test': 1, + 'test_list': ['a', 'b', 'c'], + 'test_dict': { + 'a': 1, + 'b': 2 + } + } + + query_data = parse_query_json(json.dumps(query)) + self.assertDictEqual(query_data, query) + + def test_parses_isodate_fields(self): + query = { + 'test': 1, + 'test_list': ['a', 'b', 'c'], + 'test_dict': { + 'a': 1, + 'b': 2 + }, + 'testIsoDate': "ISODate(\"2014-10-03T00:00\")" + } + + query_data = parse_query_json(json.dumps(query)) + + self.assertEqual(query_data['testIsoDate'], datetime.datetime(2014, 10, 3, 0, 0)) + + def test_parses_isodate_in_nested_fields(self): + query = { + 'test': 1, + 'test_list': ['a', 'b', 'c'], + 'test_dict': { + 'a': 1, + 'b': { + 'date': "ISODate(\"2014-10-04T00:00\")" + } + }, + 'testIsoDate': "ISODate(\"2014-10-03T00:00\")" + } + + query_data = parse_query_json(json.dumps(query)) + + self.assertEqual(query_data['testIsoDate'], datetime.datetime(2014, 10, 3, 0, 0)) + self.assertEqual(query_data['test_dict']['b']['date'], datetime.datetime(2014, 10, 4, 0, 0)) + + def test_handles_nested_fields(self): + # https://github.com/EverythingMe/redash/issues/597 + query = { + "collection": "bus", + "aggregate": [ + { + "$geoNear": { + "near": {"type": "Point", "coordinates": [-22.910079, -43.205161]}, + "maxDistance": 100000000, + "distanceField": "dist.calculated", + "includeLocs": "dist.location", + "spherical": True + } + } + ] + } + + query_data = parse_query_json(json.dumps(query)) + + self.assertDictEqual(query, query_data)
"'float' object is not iterable" when using coordinates for MongoDB query I'm trying to run a query using **MongoDB** and the **[$geoNear](http://docs.mongodb.org/manual/reference/operator/aggregation/geoNear/)** function, but every time I type the origin coordinate using floats (to create a [GeoJSON Point](http://docs.mongodb.org/manual/reference/geojson/)), I get an error: [_from Python?_] `Error running query: 'float' object is not iterable` I'm trying to run the query below. The problem here is the `[ -22.910079, -43.205161 ]` part. ``` json { "collection": "bus", "aggregate": [ { "$geoNear": { "near": { "type": "Point", "coordinates": [ -22.910079, -43.205161 ] }, "maxDistance": 100000000, "distanceField": "dist.calculated", "includeLocs": "dist.location", "spherical": true } } ] } ``` However, if I use the coordinates with integers, such as `[ -22, -43 ]`, the query runs fine, but this coordinate is now meaningless, obviously. Here is an example that doesn't error: ``` json { "collection": "bus", "aggregate": [ { "$geoNear": { "near": { "type": "Point", "coordinates": [ -22, -43 ] }, "maxDistance": 100000000, "distanceField": "dist.calculated", "includeLocs": "dist.location", "spherical": true } } ] } ```
I think the issue is this line: https://github.com/EverythingMe/redash/blob/master/redash/query_runner/mongodb.py#L153 We should probably replace the `_fix_dates` method with usage of `object_hook` option of `json.loads`, as shown here: ![image](https://cloud.githubusercontent.com/assets/71468/10386214/d27e1204-6e5c-11e5-9aba-8829cccdefad.png) (from: http://www.slideshare.net/nicolaiarocci/developing-restful-web-apis-with-python-flask-and-mongodb) Just need to make sure that object_hook being invoked on inner dicts as well and not only top level.
2015-10-11T11:45:00
getredash/redash
605
getredash__redash-605
[ "572", "572" ]
13184519c396822b8d31525c4cf5815ae9a07a83
diff --git a/redash/handlers/static.py b/redash/handlers/static.py --- a/redash/handlers/static.py +++ b/redash/handlers/static.py @@ -38,7 +38,8 @@ def index(**kwargs): features = { 'clientSideMetrics': settings.CLIENT_SIDE_METRICS, - 'allowScriptsInUserInput': settings.ALLOW_SCRIPTS_IN_USER_INPUT + 'allowScriptsInUserInput': settings.ALLOW_SCRIPTS_IN_USER_INPUT, + 'highChartsTurboThreshold': settings.HIGHCHARTS_TURBO_THRESHOLD } return render_template("index.html", user=json.dumps(user), name=settings.NAME, diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -142,6 +142,8 @@ def all_settings(): # Client side toggles: ALLOW_SCRIPTS_IN_USER_INPUT = parse_boolean(os.environ.get("REDASH_ALLOW_SCRIPTS_IN_USER_INPUT", "false")) CLIENT_SIDE_METRICS = parse_boolean(os.environ.get("REDASH_CLIENT_SIDE_METRICS", "false")) +# http://api.highcharts.com/highcharts#plotOptions.series.turboThreshold +HIGHCHARTS_TURBO_THRESHOLD = int(os.environ.get("REDASH_HIGHCHARTS_TURBO_THRESHOLD", "1000")) # Features: FEATURE_ALLOW_ALL_TO_EDIT_QUERIES = parse_boolean(os.environ.get("REDASH_FEATURE_ALLOW_ALL_TO_EDIT", "true"))
Row count limitation when creating chart visualization Graphing query with series data works fine when there are 1150 rows returned (total) but not when I go back further in time and get 1543 rows. The chart shows just one of the two data points used as series. The error in the console shows: "Highcharts error #12: www.highcharts.com/errors/12", and the link refers to the turboThreshold. I did not see any references to this when searching through the code. Row count limitation when creating chart visualization Graphing query with series data works fine when there are 1150 rows returned (total) but not when I go back further in time and get 1543 rows. The chart shows just one of the two data points used as series. The error in the console shows: "Highcharts error #12: www.highcharts.com/errors/12", and the link refers to the turboThreshold. I did not see any references to this when searching through the code.
It's a HighCharts limitation. They have two suggested solutions (mentioned on the linked error page), but I never had the chance to explore this in detail. You can try to disable the `turboThreshold` value to disable the limitation or just change it to higher value (2000? 5000?). There is an "advanced" undocumented option of editing the JSON parameters passed to HighCharts. If you're interested, I can explain how to enable it. Thanks! Would you be able to give some guidance where I would change the turboThreshold value? Or is the only way to do this through the undocumented option you mention above? Currently it's only possible through the undocumented option. It will require shell access to the re:dash server, to add the required permission to your user. As an alternative, I can add tomorrow the `turboThreshold` as a configuration parameter to re:dash. It will require you to upgrade once I do that. http://docs.redash.io/en/latest/upgrade.html It's a HighCharts limitation. They have two suggested solutions (mentioned on the linked error page), but I never had the chance to explore this in detail. You can try to disable the `turboThreshold` value to disable the limitation or just change it to higher value (2000? 5000?). There is an "advanced" undocumented option of editing the JSON parameters passed to HighCharts. If you're interested, I can explain how to enable it. Thanks! Would you be able to give some guidance where I would change the turboThreshold value? Or is the only way to do this through the undocumented option you mention above? Currently it's only possible through the undocumented option. It will require shell access to the re:dash server, to add the required permission to your user. As an alternative, I can add tomorrow the `turboThreshold` as a configuration parameter to re:dash. It will require you to upgrade once I do that. http://docs.redash.io/en/latest/upgrade.html
2015-10-11T12:30:47
getredash/redash
617
getredash__redash-617
[ "411" ]
a0c26c64f09eb73fbc02473972c88d61847aa91d
diff --git a/redash/handlers/static.py b/redash/handlers/static.py --- a/redash/handlers/static.py +++ b/redash/handlers/static.py @@ -36,14 +36,16 @@ def index(**kwargs): 'permissions': current_user.permissions } - features = { + client_config = { 'clientSideMetrics': settings.CLIENT_SIDE_METRICS, 'allowScriptsInUserInput': settings.ALLOW_SCRIPTS_IN_USER_INPUT, - 'highChartsTurboThreshold': settings.HIGHCHARTS_TURBO_THRESHOLD + 'highChartsTurboThreshold': settings.HIGHCHARTS_TURBO_THRESHOLD, + 'dateFormat': settings.DATE_FORMAT, + 'dateTimeFormat': "{0} HH:mm".format(settings.DATE_FORMAT) } return render_template("index.html", user=json.dumps(user), name=settings.NAME, - features=json.dumps(features), + client_config=json.dumps(client_config), analytics=settings.ANALYTICS) diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -143,6 +143,7 @@ def all_settings(): CLIENT_SIDE_METRICS = parse_boolean(os.environ.get("REDASH_CLIENT_SIDE_METRICS", "false")) # http://api.highcharts.com/highcharts#plotOptions.series.turboThreshold HIGHCHARTS_TURBO_THRESHOLD = int(os.environ.get("REDASH_HIGHCHARTS_TURBO_THRESHOLD", "1000")) +DATE_FORMAT = os.environ.get("REDASH_DATE_FORMAT", "DD/MM/YY") # Features: FEATURE_ALLOW_ALL_TO_EDIT_QUERIES = parse_boolean(os.environ.get("REDASH_FEATURE_ALLOW_ALL_TO_EDIT", "true"))
diff --git a/rd_ui/test/mocks/redash_mocks.js b/rd_ui/test/mocks/redash_mocks.js --- a/rd_ui/test/mocks/redash_mocks.js +++ b/rd_ui/test/mocks/redash_mocks.js @@ -1,4 +1,4 @@ -featureFlags = []; +clientConfig = {}; currentUser = { id: 1, name: 'John Mock',
Add time zone support Right now re:dash assumes that all timestamps are in the local time zone. However, Redshift (and many other databases, depending on the configuration) default to UTC, and thus re:dash parses the data incorrectly. What's confusing is that in Highcharts also formats the data in local time, leading to this awkward display: ![image](https://cloud.githubusercontent.com/assets/963826/7282512/4c9a1d38-e8e5-11e4-9bbf-eb52707d71ff.png) Notice how the data is highlighted at the beginning of April 23 (which hasn't happened yet in GMT), but the date is showing April 22 4:00 pm (which hasn't happened yet in Pacific Standard Time). I temporarily worked around this by patching ng_highchart.js: ``` diff if (moment.isMoment(this.x)) { - var s = '<b>' + this.x.toDate().toLocaleString() + '</b>', + var s = '<b>' + this.x.toDate().toISOString() + '</b>', pointsCount = this.points.length; ``` resources.js: ``` diff if (angular.isNumber(v)) { columnTypes[k] = 'float'; } else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}T/)) { - row[k] = moment(v); + row[k] = moment.utc(v); columnTypes[k] = 'datetime'; } else if (_.isString(v) && v.match(/^\d{4}-\d{2}-\d{2}/)) { - row[k] = moment(v); + row[k] = moment.utc(v); columnTypes[k] = 'date'; } else if (typeof(v) == 'object' && v !== null) { row[k] = JSON.stringify(v); ``` I'd imagine there will be cases where some users have their database configured to output a specific standard time and others will use UTC. What's the best way to support this feature? Options: 1. Store the UTC offset in the `query_result` (so moment will parse the right value) 2. Allow configuration option in re:dash for time zone (e.g. UTC, America/Los_Angeles) What do you think, @arikfr?
Oh, I see 267c32b39003241368b34f47dfc8975fe58d8ae3 at least fixed the time to be stored UTC internally, but it didn't fix my use case. We still need to consider how the time is displayed in the chart. Actually, there is no assumption about the timezone of the data, and the plan was to show the dates/times as they are returned from the database to avoid such issues and let the querying user control the timezone. About a month ago, I started using the `toLocaleString` method to print dates and times, to make sure that dates are presented with correct formatting (MM/DD for US, DD/MM for Israel for example). But looks like this resulted in two bugs: 1. Inconsistency between how the time presented in the the chart axis (which handled by HighCharts) and the rest of the system (tooltips, table). 2. In some configurations, it will show the wrong time. From what I can see, as long as the data is UTC and your machine has correct time zone it should actually work. I think that the best thing to do, is to revert to previous method of handling this (just print the time as is) and find a way to correctly print the dates without using `toLocaleString`. But just to verify my theory in #2 above, can you check what time zone you have configured on your machine? @arikfr, my browser is configured for Pacific Standard Time (UTC-0700), and the database usually uses UTC. `moment()` assumes parsing of the local time of the browser if no time zone information is available, which can happen quite frequently when you do a regular SQL query (e.g. extracting a day via `SELECT round(extract('epoch' FROM timestamp) / 3600`). Requiring that the SQL output the time zone information in all cases seems unrealistic; either re:dash needs to determine this automatically or allow the user to specify it. > Requiring that the SQL output the time zone information in all cases seems unrealistic I agree. I think that we should make as a little assumptions about the data as possible and just show the data as is, regardless of the time zone. Does this sound right to you? @arikfr I like the idea, but I'd imagine this may still lead to confusion if a UTC offset or time zone isn't shown somewhere. Also, Moment.js will enforce either UTC or local time zone. Is it really practical to create a timestamp that has no time zone associated with it? Maybe there should be a configuration option of default timezone? And we will apply this to timestamps without timezone? (using UTC by default) Yes, definitely. I was looking into finally fixing this because this is an annoying bug. I think it would be easier to fix this issue if this comment were addressed: ``` javascript // TODO: we should stop manipulating incoming data, and switch to relaying on the column type set by the backend. // This logic is prone to errors, and better be removed. Kept for now, for backward compatability. ``` That way we can tell if a value is a TIMESTAMP type and parse accordingly. Should we fix this by storing the column type mappings as a field in the `query_result` table? Oh, it looks like `data_type` is available in the query result.
2015-10-20T13:28:32
getredash/redash
620
getredash__redash-620
[ "450" ]
feabc46da4a22098b2c8a777636a3c291bfc6728
diff --git a/bin/release_manager.py b/bin/release_manager.py --- a/bin/release_manager.py +++ b/bin/release_manager.py @@ -7,7 +7,7 @@ github_token = os.environ['GITHUB_TOKEN'] auth = (github_token, 'x-oauth-basic') -repo = 'EverythingMe/redash' +repo = 'getredash/redash' def _github_request(method, path, params=None, headers={}): if not path.startswith('https://api.github.com'): diff --git a/setup/latest_release_url.py b/setup/latest_release_url.py deleted file mode 100644 --- a/setup/latest_release_url.py +++ /dev/null @@ -1,6 +0,0 @@ -import urllib2 -import json - -latest = json.load(urllib2.urlopen("https://api.github.com/repos/EverythingMe/redash/releases/latest")) - -print latest['assets'][0]['browser_download_url']
diff --git a/tests/query_runner/test_mongodb.py b/tests/query_runner/test_mongodb.py --- a/tests/query_runner/test_mongodb.py +++ b/tests/query_runner/test_mongodb.py @@ -52,7 +52,7 @@ def test_parses_isodate_in_nested_fields(self): self.assertEqual(query_data['test_dict']['b']['date'], datetime.datetime(2014, 10, 4, 0, 0)) def test_handles_nested_fields(self): - # https://github.com/EverythingMe/redash/issues/597 + # https://github.com/getredash/redash/issues/597 query = { "collection": "bus", "aggregate": [
docker support It would be nice to have Dockerfile in repo, so we can build a container automatically
It is in the backlog. Can't commit to ETA, but I will comment here when it's available. @arikfr @saritasa I plan on working on it next week. Is anybody still working on this? I am currently investigating to add a Dockerfile to redash too. I would like to follow up if anybody have made a working version ;) @tjwudi I was planning on this but got drawn to other project internally. I don't have a working version yet, but it's definitely something I am planning on doing since we are dockerizing a lot of the infrastructure right now and redash is becoming a major part of our admin interfaces. @KensoDev thanks for your info. Then I will start to work on it. Once I got it working I will send in a PR. I'm adding some thoughts on how to add Docker support for re:dash, in case someone would like to pick up the glove :) Basically the whole setup process is "documented" in the [bootstrap script](https://github.com/EverythingMe/redash/blob/master/setup/bootstrap.sh), but the Docker setup should only take inspiration from it rather reuse it entirely. Basically what I would do is: 1. Use PostgreSQL official container. 2. Use Redis official container. 3. Use Nginx official container. 4. Create "base" re:dash container, which will be used for the workers and web server. 5. Move the [database bootstrap](https://github.com/EverythingMe/redash/blob/master/setup/bootstrap.sh#L143-L151) part into a script in the `bin` folder. Then tie everything together with Docker compose and some setup script to create the database once everything is up. Basic re:dash setup should have the following containers: PostgreSQL, Redis, Nginx, re:dash web server, re:dash "master" celery worker (with --beat option) & one additional re:dash celery worker. I'm quite new to Docker, so if someone has other suggestions, I'll be happy to hear. @arikfr Oh you use nginx in a separate container? Is there any reason not use nginx in the same container with redash? @tjwudi That's a Docker best practice, but you need to understand what is _your_ purpose of doing this. If the purpose it to make redash easily distributed across machines, uses @arikfr suggestion. This way, you can compose the workload easier and not worry about processes running inside Docker, each Docker will be a process that you can monitor using upstart or anything like that. I'm not using re:dash with Docker yet, the above outline was just a suggestion. As for reasoning, @KensoDev summed it up nicely. But I will add that it's not only about distributing across machines (which not really relevant for re:dash) but more about reusing existing work (in this case - official nginx container) and following best practices. @arikfr yes. Also, when you have a single point of entry to a docker machine there's less complexity when you run it and monitor it. This way, you upstart a docker and the entry point is nginx, another is celery etc... Proper docker support would be nice. I did get re:dash running in docker via the naive method of running the provision script on top of a basic ubuntu instance (actually wget libffi-dev need to be installed before the provision script will run as it is - also I had to add a 'service postgresql start' in the middle of the script, otherwise it didn't see the database at all). So... it is possible to do anyway. But following docker best practices would be nice. Docker support now in master from #588 I will build "official" images later this week. But whoever wants, can use the Dockerfile directly.
2015-10-21T06:00:12
getredash/redash
630
getredash__redash-630
[ "627" ]
67aecc0201ed0a323a0deb1bb0bd3f57cf4bc15d
diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py new file mode 100644 --- /dev/null +++ b/redash/query_runner/sqlite.py @@ -0,0 +1,96 @@ +import json +import logging +import sqlite3 +import sys + +from redash.query_runner import BaseQueryRunner +from redash.query_runner import TYPE_STRING +from redash.query_runner import register + +from redash.utils import JSONEncoder + +logger = logging.getLogger(__name__) + +class Sqlite(BaseQueryRunner): + @classmethod + def configuration_schema(cls): + return { + "type": "object", + "properties": { + "dbpath": { + "type": "string", + "title": "Database Path" + } + }, + "required": ["dbpath"], + } + + @classmethod + def type(cls): + return "sqlite" + + def __init__(self, configuration_json): + super(Sqlite, self).__init__(configuration_json) + + self._dbpath = self.configuration['dbpath'] + + def get_schema(self): + query_table = "select tbl_name from sqlite_master where type='table'" + query_columns = "PRAGMA table_info(%s)" + + results, error = self.run_query(query_table) + + if error is not None: + raise Exception("Failed getting schema.") + + results = json.loads(results) + + schema = {} + for row in results['rows']: + table_name = row['tbl_name'] + schema[table_name] = {'name': table_name, 'columns': []} + results_table, error = self.run_query(query_columns % (table_name,)) + if error is not None: + raise Exception("Failed getting schema.") + + results_table = json.loads(results_table) + for row_column in results_table['rows']: + schema[table_name]['columns'].append(row_column['name']) + + return schema.values() + + def run_query(self, query): + connection = sqlite3.connect(self._dbpath) + + cursor = connection.cursor() + + try: + cursor.execute(query) + + if cursor.description is not None: + columns = self.fetch_columns([(i[0], None) for i in cursor.description]) + rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor] + + data = {'columns': columns, 'rows': rows} + error = None + json_data = json.dumps(data, cls=JSONEncoder) + else: + error = 'Query completed but it returned no data.' + json_data = None + except KeyboardInterrupt: + connection.cancel() + error = "Query cancelled by user." + json_data = None + except Exception as e: + raise sys.exc_info()[1], None, sys.exc_info()[2] + finally: + connection.close() + return json_data, error + +register(Sqlite) + + + + + + diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -129,6 +129,7 @@ def all_settings(): 'redash.query_runner.vertica', 'redash.query_runner.treasuredata', 'redash.query_runner.oracle', + 'redash.query_runner.sqlite', ] enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))
sqlite support Is support for sqlite databases something feasible ?
+1 I've checked the code, it should not be much to do, I'll try to contribute that change. :+1: indeed it should be simple enough, and similar to the MySQL or Postgres query runners. But I'm curious: in what use case you find it useful to connect re:dash to a sqlite datasource?
2015-10-28T18:54:13
getredash/redash
661
getredash__redash-661
[ "598" ]
1bdc1bef733eb2d7d10e0ef5d622679273e01b31
diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py --- a/redash/query_runner/__init__.py +++ b/redash/query_runner/__init__.py @@ -9,6 +9,7 @@ __all__ = [ 'ValidationError', 'BaseQueryRunner', + 'InterruptException', 'TYPE_DATETIME', 'TYPE_BOOLEAN', 'TYPE_INTEGER', @@ -38,6 +39,9 @@ TYPE_DATE ]) +class InterruptException(Exception): + pass + class BaseQueryRunner(object): def __init__(self, configuration): jsonschema.validate(configuration, self.configuration_schema()) diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py --- a/redash/query_runner/pg.py +++ b/redash/query_runner/pg.py @@ -142,7 +142,7 @@ def run_query(self, query): logging.exception(e) error = e.message json_data = None - except KeyboardInterrupt: + except (KeyboardInterrupt, InterruptException): connection.cancel() error = "Query cancelled by user." json_data = None diff --git a/redash/tasks.py b/redash/tasks.py --- a/redash/tasks.py +++ b/redash/tasks.py @@ -1,5 +1,6 @@ import time import logging +import signal from flask.ext.mail import Message import redis from celery import Task @@ -8,7 +9,7 @@ from redash import redis_connection, models, statsd_client, settings, utils, mail from redash.utils import gen_query_hash from redash.worker import celery -from redash.query_runner import get_query_runner +from redash.query_runner import get_query_runner, InterruptException logger = get_task_logger(__name__) @@ -132,7 +133,7 @@ def ready(self): return self._async_result.ready() def cancel(self): - return self._async_result.revoke(terminate=True) + return self._async_result.revoke(terminate=True, signal='SIGINT') @staticmethod def _job_lock_id(query_hash, data_source_id): @@ -263,9 +264,12 @@ def check_alerts_for_query(self, query_id): mail.send(message) +def signal_handler(*args): + raise InterruptException @celery.task(bind=True, base=BaseTask, track_started=True) def execute_query(self, query, data_source_id, metadata): + signal.signal(signal.SIGINT, signal_handler) start_time = time.time() logger.info("Loading data source (%d)...", data_source_id)
Cancel doesnt actually cancel a query in redshift. I was using redash and wrote a pretty bad query. Soon after I hit cancel and proceeded to run other queries but they wouldn't go through. I suspected redshift to be busy running my previous erroneous query so logged into the aws console and sure enough it was running. I had to terminate the query manually there.
Cancel is implemented by terminating the process that is running the query. I've noticed that it sometimes takes time for Celery (the background workers framework) to process the cancel request. How much time did it pass between when you cancelled in re:dash and when you checked the console? Hmm it had to be around 10 to 15 min since I let it run for a while as I worked on other things. We've had a similar issue with Redshift while using redash. I think the problem is that terminating the process doesn't actually kill the query in Redshift. You need to send a cancel request to Redshift by sending the INT signal to the process. To test this, I fired off a query that I knew would take a long time in psql. In another window, I killed it with TERM. When I checked running queries, that query was still running, even though the psql process had been killed. I then tried the same thing but sent an INT signal. This caused a cancel request to be sent and the query was cancelled. The easy way to do this would be to add `signal=INT` to the [revoke method](https://github.com/EverythingMe/redash/blob/5d1c75df1cc22978b4fb6e9e56ef5492a76e6447/redash/tasks.py#L135). However, I don't know enough about how that interacts with non-Redshift tasks. I'm happy to help implement this if needed. @alexdebrie is correct, the Postgres/Redshift adapter sends cancel request when it receives the `INT` signal. I was under the impression that when revoking a task it will send `INT` signal, but from looking at Celery documentation it looks like the default is indeed `TERM`. I'll double check this again tomorrow, and fix (and test with other adapters) accordingly. Thanks for the debugging, @alexdebrie ! No problem, thanks for the awesome project! No, thank You! Regards, Anthony On 20 October 2015 at 22:30, Alex DeBrie [email protected] wrote: > No problem, thanks for the awesome project! > > — > Reply to this email directly or view it on GitHub > https://github.com/EverythingMe/redash/issues/598#issuecomment-149693063 > . ## Anthony Kalinde Big Data Engineer _DharmicData.com_ skype: anthonykalinde mobile: +47 90 22 5857 linkedin: no.linkedin.com/in/anthonykalinde @arikfr -- Just checking to see if you've had a chance to test this with other adapters. We like to stay in sync with official redash repo as much as possible, but we'll just make this fix in our repo if changing it will break other runners. Started testing it now, and even when I use the INT signal (`self._async_result.revoke(terminate=True, signal="SIGINT"`), I see that it just kills the process but doesn't call my code that tries to catch the INT signal. Did you have the chance to try it out? Ahh darn. I haven't tried it yet. I should have some time later this week, and I'll let you know what I find.
2015-11-18T19:23:42
getredash/redash
685
getredash__redash-685
[ "683" ]
51deb8f75d2fc5124811a8b96f8ce8f539eb9013
diff --git a/redash/models.py b/redash/models.py --- a/redash/models.py +++ b/redash/models.py @@ -369,10 +369,10 @@ def to_dict(self): } @classmethod - def unused(cls): - week_ago = datetime.datetime.now() - datetime.timedelta(days=7) + def unused(cls, days=7): + age_threshold = datetime.datetime.now() - datetime.timedelta(days=days) - unused_results = cls.select().where(Query.id == None, cls.retrieved_at < week_ago)\ + unused_results = cls.select().where(Query.id == None, cls.retrieved_at < age_threshold)\ .join(Query, join_type=peewee.JOIN_LEFT_OUTER) return unused_results diff --git a/redash/settings.py b/redash/settings.py --- a/redash/settings.py +++ b/redash/settings.py @@ -69,6 +69,8 @@ def all_settings(): # The following enables periodic job (every 5 minutes) of removing unused query results. QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "true")) +QUERY_RESULTS_CLEANUP_COUNT = int(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_COUNT", "100")) +QUERY_RESULTS_CLEANUP_MAX_AGE = int(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_MAX_AGE", "7")) AUTH_TYPE = os.environ.get("REDASH_AUTH_TYPE", "api_key") PASSWORD_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_PASSWORD_LOGIN_ENABLED", "true")) diff --git a/redash/tasks.py b/redash/tasks.py --- a/redash/tasks.py +++ b/redash/tasks.py @@ -221,7 +221,10 @@ def cleanup_query_results(): Each time the job deletes only 100 query results so it won't choke the database in case of many such results. """ - unused_query_results = models.QueryResult.unused().limit(100) + logging.info("Running query results clean up (removing maximum of %d unused results, that are %d days old or more)", + settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE) + + unused_query_results = models.QueryResult.unused(settings.QUERY_RESULTS_CLEANUP_MAX_AGE).limit(settings.QUERY_RESULTS_CLEANUP_COUNT) total_unused_query_results = models.QueryResult.unused().count() deleted_count = models.QueryResult.delete().where(models.QueryResult.id << unused_query_results).execute()
config request - cleanup_query_results The cleanup cron is great because it reduces size of redash database. This also allows for smaller quicker backups of its data. If possible, someone can add in a little configuration? In tasks.py, make it configurable to set a value instead of hardcoded 100. For example, I might want 1000 here: ``` unused_query_results = models.QueryResult.unused().limit(100) ``` And in models.py, make it configurable on date range instead of hardcoded week_ago. For example, I might only want 1 day here: ``` week_ago = datetime.datetime.now() - datetime.timedelta(days=7) ```
2015-12-03T09:10:38
getredash/redash
716
getredash__redash-716
[ "708" ]
3d178f9a6036c236ca254528fc4d4cde2e0f7fc8
diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py --- a/redash/handlers/widgets.py +++ b/redash/handlers/widgets.py @@ -46,5 +46,7 @@ def delete(self, widget_id): widget = models.Widget.get(models.Widget.id == widget_id) widget.delete_instance() + return {'layout': widget.dashboard.layout } + api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets') api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
Deleting and re-adding widgets to a dashboard breaks it There's a specific set of steps that has messed up some of our users' dashboards: 1. Create a new dashboard 2. Add multiple widgets to it. 3. Remove all those widgets from the dashboard 4. Re-add some widgets 5. Click the "Edit Dashboard (Name/Layout)" button 6. Click Save without changing anything. 7. Refresh the page This makes none of the widgets appear and causes the "Layout" array in the admin panel to contain one or more "null" values (depending on how many widgets you added/deleted): ![image](https://cloud.githubusercontent.com/assets/844493/11826038/76c749fa-a348-11e5-9f46-bbae43c4b6f6.png) The only way to recover from this state is to manually delete the "null" values through the admin interface. This is on re:dash version 0.8.2
**Thanks!** I've been trying to find the steps to reproduce this issue for such a long time. Expect a fix in v0.9.0.
2015-12-20T11:17:13
getredash/redash
725
getredash__redash-725
[ "720" ]
1aba777b61801718c3ca4ea14077cd881535808f
diff --git a/redash/google_oauth.py b/redash/google_oauth.py --- a/redash/google_oauth.py +++ b/redash/google_oauth.py @@ -1,8 +1,8 @@ import logging from flask.ext.login import login_user import requests -from flask import redirect, url_for, Blueprint, flash -from flask_oauth import OAuth +from flask import redirect, url_for, Blueprint, flash, request +from flask_oauthlib.client import OAuth from redash import models, settings logger = logging.getLogger('google_oauth') @@ -18,11 +18,9 @@ request_token_url=None, request_token_params={ 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', - 'response_type': 'code' }, access_token_url='https://accounts.google.com/o/oauth2/token', access_token_method='POST', - access_token_params={'grant_type': 'authorization_code'}, consumer_key=settings.GOOGLE_CLIENT_ID, consumer_secret=settings.GOOGLE_CLIENT_SECRET) @@ -65,10 +63,10 @@ def create_and_login_user(name, email): @blueprint.route('/oauth/google', endpoint="authorize") def login(): - # TODO, suport next + next = request.args.get('next','/') callback=url_for('.callback', _external=True) logger.debug("Callback url: %s", callback) - return google.authorize(callback=callback) + return google.authorize(callback=callback, state=next) @blueprint.route('/oauth/google_callback', endpoint="callback") @@ -93,4 +91,6 @@ def authorized(resp): create_and_login_user(profile['name'], profile['email']) - return redirect(url_for('index')) \ No newline at end of file + next = request.args.get('state','/') + + return redirect(next)
diff --git a/tests/__init__.py b/tests/__init__.py --- a/tests/__init__.py +++ b/tests/__init__.py @@ -3,6 +3,10 @@ # Use different url for Celery to avoid DB being cleaned up: os.environ['REDASH_CELERY_BROKER'] = "redis://localhost:6379/6" +# Dummy values for oauth login +os.environ['REDASH_GOOGLE_CLIENT_ID'] = "dummy" +os.environ['REDASH_GOOGLE_CLIENT_SECRET'] = "dummy" + import logging from unittest import TestCase
User should be redirected to his original destination after login with Google OAuth If the user tried to open a page before being logged in, he should be redirected to this page after successful login.
2015-12-27T06:23:42