repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
pulp/pulpcore | 2,569 | pulp__pulpcore-2569 | [
"2137"
] | cc535cc4fc1eebec4e9e06fd2d08e7e309cd75f4 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -7,7 +7,6 @@
import tempfile
from pulpcore.app import pulp_hashlib
-from pulpcore.app.loggers import deprecation_logger
from pulpcore.app.models import Artifact
from pulpcore.exceptions import (
DigestValidationError,
@@ -53,10 +52,7 @@ class BaseDownloader:
data later.
The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default
- writes to a random file in the current working directory or you can pass in your own file
- object. See the ``custom_file_object`` keyword argument for more details. Allowing the download
- instantiator to define the file to receive data allows the streamer to receive the data instead
- of having it written to disk.
+ writes to a random file in the current working directory.
The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all
data written to the file-like object is quiesced to disk before the file-like object has
@@ -67,14 +63,12 @@ class BaseDownloader:
expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the
value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}
expected_size (int): The number of bytes the download is expected to have.
- path (str): The full path to the file containing the downloaded data if no
- ``custom_file_object`` option was specified, otherwise None.
+ path (str): The full path to the file containing the downloaded data.
"""
def __init__(
self,
url,
- custom_file_object=None,
expected_digests=None,
expected_size=None,
semaphore=None,
@@ -86,23 +80,15 @@ def __init__(
Args:
url (str): The url to download.
- custom_file_object (file object): An open, writable file object that downloaded data
- can be written to by
- :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the
value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}
expected_size (int): The number of bytes the download is expected to have.
semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.
Useful for limiting the number of outstanding downloaders in various ways.
"""
- if custom_file_object:
- deprecation_logger.warn(
- "The 'custom_file_object' argument to 'BaseDownloader' is"
- "deprecated and will be removed in pulpcore==3.20; stop using it."
- )
self.url = url
- self._writer = custom_file_object
+ self._writer = None
self.path = None
self.expected_digests = expected_digests
self.expected_size = expected_size
| diff --git a/pulpcore/tests/unit/download/test_download_base.py b/pulpcore/tests/unit/download/test_download_base.py
--- a/pulpcore/tests/unit/download/test_download_base.py
+++ b/pulpcore/tests/unit/download/test_download_base.py
@@ -17,7 +17,6 @@ def test_no_trusted_digest(self, mock_DIGEST_FIELDS):
with self.assertRaises(UnsupportedDigestValidationError):
BaseDownloader(
url,
- custom_file_object=None,
expected_digests={"sha1": "912ec803b2ce49e4a541068d495ab570"},
expected_size=None,
semaphore=None,
@@ -32,7 +31,6 @@ def test_no_expected_digests(self, mock_DIGEST_FIELDS):
url = "http://example.com"
downloader = BaseDownloader(
url,
- custom_file_object=None,
expected_digests=None,
expected_size=None,
semaphore=None,
@@ -52,7 +50,6 @@ def test_expected_digests(self, mock_DIGEST_FIELDS):
}
downloader = BaseDownloader(
url,
- custom_file_object=None,
expected_digests=digests,
expected_size=None,
semaphore=None,
| Remove deprecated custom_file_object
| 2022-04-20T19:23:39 |
|
pulp/pulpcore | 2,570 | pulp__pulpcore-2570 | [
"2307"
] | 656136c54daeb29629640614a2305790551b28b3 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -10,7 +10,8 @@
from django.core.files.storage import default_storage
from django.db.models import F
-
+from naya.json import stream_array, tokenize
+from io import StringIO
from pkg_resources import DistributionNotFound, get_distribution
from rest_framework.serializers import ValidationError
from tablib import Dataset
@@ -43,6 +44,8 @@
CA_FILE = "pulpcore.app.modelresource.ContentArtifactResource.json"
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# How many entities from an import-file should be processed at one time
+IMPORT_BATCH_SIZE = 100
# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
# make before we decide this is a fatal error?
@@ -58,22 +61,52 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
+def _impfile_iterator(fd):
+ """
+ Iterate over an import-file returning batches of rows as a json-array-string.
+
+ We use naya.json.stream_array() to get individual rows; once a batch is gathered,
+ we yield the result of json.dumps() for that batch. Repeat until all rows have been
+ called for.
+ """
+ eof = False
+ batch = []
+ rows = stream_array(tokenize(fd))
+ while not eof:
+ try:
+ while len(batch) < IMPORT_BATCH_SIZE:
+ batch.append(next(rows))
+ except StopIteration:
+ eof = True
+ yield json.dumps(batch)
+ batch.clear()
+
+
def _import_file(fpath, resource_class, retry=False):
+ """
+ Import the specified resource-file in batches to limit memory-use.
+
+ We process resource-files one "batch" at a time. Because of the way django-import's
+ internals work, we have to feed it batches as StringIO-streams of json-formatted strings.
+ The file-to-json-to-string-to-import is overhead, but it lets us put an upper bound on the
+ number of entities in memory at any one time at import-time.
+ """
try:
- log.info(_("Importing file {}.").format(fpath))
+ log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
- data = Dataset().load(json_file, format="json")
resource = resource_class()
- log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- if retry:
- curr_attempt = 1
- while curr_attempt < MAX_ATTEMPTS:
- curr_attempt += 1
+ log.info("...Importing resource {resource.__class__.__name__}.")
+ # Load one batch-sized chunk of the specified import-file at a time. If requested,
+ # retry a batch if it looks like we collided with some other repo being imported with
+ # overlapping content.
+ for batch_str in _impfile_iterator(json_file):
+ data = Dataset().load(StringIO(batch_str))
+ if retry:
# django import-export can have a problem with concurrent-imports that are
# importing the same 'thing' (e.g., a Package that exists in two different
# repo-versions that are being imported at the same time). If we're asked to
# retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll
+ # (rather than failing with an exception) first. If errors happen, we'll do one
# retry before we give up on this repo-version's import.
a_result = resource.import_data(data, raise_errors=False)
if a_result.has_errors():
@@ -82,16 +115,16 @@ def _import_file(fpath, resource_class, retry=False):
f"...{total_errors} import-errors encountered importing "
"{fpath}, attempt {curr_attempt}, retrying"
)
- # Last attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
+ else:
a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
- else:
- a_result = resource.import_data(data, raise_errors=True)
- return a_result
+ yield a_result
except AttributeError:
log.error(f"FAILURE loading import-file {fpath}!")
raise
@@ -194,15 +227,19 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
+ content_count = 0
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
- resulting_content_ids.extend(
- row.object_id for row in a_result.rows if row.import_type in ("new", "update")
- )
+ for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
+ content_count += len(a_result.rows)
+ resulting_content_ids.extend(
+ row.object_id for row in a_result.rows if row.import_type in ("new", "update")
+ )
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource, retry=True)
+ # We don't do anything with the imported batches, we just need to get them imported
+ for a_batch in _import_file(ca_path, ContentArtifactResource, retry=True):
+ pass
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
@@ -399,20 +436,24 @@ def validate_and_assemble(toc_filename):
_check_versions(version_json)
# Artifacts
- ar_result = _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource)
data = dict(
- message="Importing Artifacts", code="import.artifacts", total=len(ar_result.rows)
+ message="Importing Artifacts",
+ code="import.artifacts",
)
with ProgressReport(**data) as pb:
- for row in pb.iter(ar_result.rows):
- artifact = Artifact.objects.get(pk=row.object_id)
- base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
- src = os.path.join(temp_dir, base_path)
-
- if not default_storage.exists(base_path):
- with open(src, "rb") as f:
- default_storage.save(base_path, f)
-
+ # Import artifacts, and place their binary blobs, one batch at a time.
+ # Skip artifacts that already exist in storage.
+ for ar_result in _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource):
+ for row in pb.iter(ar_result.rows):
+ artifact = Artifact.objects.get(pk=row.object_id)
+ base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
+ src = os.path.join(temp_dir, base_path)
+
+ if not default_storage.exists(base_path):
+ with open(src, "rb") as f:
+ default_storage.save(base_path, f)
+
+ # Now import repositories, in parallel.
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -436,7 +477,7 @@ def validate_and_assemble(toc_filename):
dispatch(
import_repository_version,
exclusive_resources=[dest_repo],
- args=[importer.pk, dest_repo.pk, src_repo["name"], path],
+ args=(importer.pk, dest_repo.pk, src_repo["name"], path),
task_group=task_group,
)
| Memory error when importing large repository
**Description of problem:**
Getting MemoryError when importing large repository, such as rhel-7-server-rpms repo.
The PackageResource.json file of the rhel-7-server-rpms repo is about 5.5GB. Pulp uses "json.load" method to decode the json file. When decoding the json, it will use up to 18GB+ memory and returns a 5.5GB python dictionary. If the system doesn't have enough memory, the json decode will fail with MemoryError.
hammer content-import version --path /var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/ --organization-id 1
[................................................ ] [24%]
Error: 1 subtask(s) failed for task group /pulp/api/v3/task-groups/ebc7514a-f606-4965-a599-eab74101e9b0/.
**/var/log/messages**
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: Task d5f26164-087a-4acb-aca1-7aed30851040 failed ()
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 317, in _perform_task
pulpcore-worker-2: result = func(*args, **kwargs)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 161, in import_repository_version
pulpcore-worker-2: a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 62, in _import_file
pulpcore-worker-2: data = Dataset().load(json_file.read(), format="json")
pulpcore-worker-2: File "/usr/lib64/python3.6/codecs.py", line 321, in decode
pulpcore-worker-2: (result, consumed) = self._buffer_decode(data, self.errors, final)
**Output of top command**
16011 pulp 20 0 5924132 2.5g 2496 D 25.0 16.7 21:43.78 pulpcore-worker
16011 pulp 20 0 5924132 2.8g 2496 D 6.2 18.7 21:44.76 pulpcore-worker
16011 pulp 20 0 5924132 3.1g 2496 D 12.5 20.7 21:45.55 pulpcore-worker
16011 pulp 20 0 5924132 3.5g 2496 D 12.5 23.2 21:46.38 pulpcore-worker
16011 pulp 20 0 5924132 3.9g 2496 R 25.0 25.7 21:47.39 pulpcore-worker
16011 pulp 20 0 5924132 4.2g 2496 D 12.5 27.7 21:48.38 pulpcore-worker
16011 pulp 20 0 5924132 4.5g 2496 R 66.7 29.5 21:49.35 pulpcore-worker
16011 pulp 20 0 5924132 4.9g 2496 D 18.8 32.2 21:50.58 pulpcore-worker
16011 pulp 20 0 5924132 5.3g 2496 R 26.7 34.7 21:51.53 pulpcore-worker
**Output of free command**
total used free shared buff/cache available
Mem: 15879256 10213472 167648 193168 5498136 5135336
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10516964 162816 193168 5199476 4832256
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10774956 179184 193168 4925116 4573800
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11009476 168300 193168 4701480 4339684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11354424 179644 193168 4345188 3994460
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11653416 154880 193168 4070960 3695840
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11960864 150900 193168 3767492 3388684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12341972 150652 193168 3386632 3007040
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12741088 157716 193168 2980452 2608400
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13012212 159016 193168 2708028 2337108
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13309296 171804 193168 2398156 2039872
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13726688 180136 193168 1972432 1622400
total used free shared buff/cache available
Mem: 15879256 14151480 169480 193168 1558296 1197956
Swap: 8060924 352 8060572
**Test to decode the PackageResource.json file in the python console.**
```
python3
>>> fh = open("/var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/repository-Default_Organization-Red_Hat_Enterprise_Linux_Server-Red_Hat_Enterprise_Linux_7_Server_RPMs_x86_64_7Server_13/pulp_rpm.app.modelresource.PackageResource.json")
>>> import json
>>> json.load(fh)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.6/json/__init__.py", line 296, in load
return loads(fp.read(),
File "/usr/lib64/python3.6/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
MemoryError <===========
```
```
top -o %MEM
top - 11:50:42 up 1 day, 14:42, 3 users, load average: 0.49, 0.15, 0.09
Tasks: 134 total, 3 running, 131 sleeping, 0 stopped, 0 zombie
%Cpu0 : 0.0 us, 3.3 sy, 0.0 ni, 96.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu1 : 0.0 us, 3.7 sy, 0.0 ni, 96.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu2 : 0.0 us, 7.0 sy, 0.0 ni, 93.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu3 : 31.8 us, 68.2 sy, 0.0 ni, 0.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 99.2/19906644 [||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||]
KiB Swap: 1.0/8060924 [| ]
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
10034 root 20 0 20.8g 18.1g 3268 R 100.0 95.4 1:23.00 python3 <==========
```
**Steps to Reproduce:**
1. Prepare a disconnected Satellite with only 15GB RAM to easily reproduce the issue
2. Import a content view with only the rhel-7-server-rpms repo to the disconnected Satellite
Actual results:
Failed with memory error
Expected results:
No error and should consume only reasonable amount of memory.
**Research and Proposing potential solutions:**
1) Split the resource.json file into a number of chunk files when exporting the contents.
2) Or still have one large resource.json (non-standard json file). Each chunk is split into newline so that import can read them line by line.
3) Or use a non-standard json library, such as ijson to read the json file in chunk to save memory.
https://bugzilla.redhat.com/show_bug.cgi?id=2061224
| Is this issue distinct from https://github.com/pulp/pulpcore/issues/2072?
What version are you seeing this on? Is there a BZ that can be attached?
@dralley: Yes. This is different issue with #2072. #2072 fixed the export part only but unfortunately not the import part. The import still having memory issue after fixing the below line.
https://github.com/pulp/pulpcore/pull/1782/files#diff-06badc442eb525dc6f2de7f4e19a7762ce20a972b79b6a90cf1ee80211053ab5L62-R62
I had created a bugzilla for it:
https://bugzilla.redhat.com/show_bug.cgi?id=2061224 | 2022-04-20T21:01:30 |
|
pulp/pulpcore | 2,575 | pulp__pulpcore-2575 | [
"2550"
] | 11cf32d395c7f897fc0cfc020cbdce626a9095e6 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -102,9 +102,10 @@ def __init__(
if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):
raise UnsupportedDigestValidationError(
_(
- "Content at the url {} does not contain at least one trusted hasher which"
- " is specified in 'ALLOWED_CONTENT_CHECKSUMS' setting."
- ).format(self.url)
+ "Content at the URL '{}' does not contain at least one trusted hasher which"
+ " is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The"
+ " downloader expected one of the following hashers: {}"
+ ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))
)
def _ensure_writer_has_open_file(self):
| Make UnsupportedDigestValidationError more informative
**Version**
Current main branch in dev box.
**Describe the bug**
I recently tried to sync a ULN remote, and got the following error:
```
'Content at the url uln://ovm2_2.1.1_i386_patch/repodata/other.xml.gz does not contain at least one trusted hasher which is specified in 'ALLOWED_CONTENT_CHECKSUMS' setting.'
```
The error message comes from the following lines of code:
```
if self.expected_digests:
if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):
raise UnsupportedDigestValidationError(
_(
"Content at the url {} does not contain at least one trusted hasher which"
" is specified in 'ALLOWED_CONTENT_CHECKSUMS' setting."
).format(self.url)
)
```
**Expected behavior**
It would be great if we could add the `self.expected_digests` (and possibly the `Artifact.DIGEST_FIELDS`) to the error message here, that way the user knows exactly what checksums the repo in question provides. That way I know what checksum I need to enable.
**Additional context**
It looks like `self.expected_digests` is the digests provided by the repo, while `Artifact.DIGEST_FIELDS` is the same as `ALLOWED_CONTENT_CHECKSUMS`.
| 2022-04-21T15:19:16 |
||
pulp/pulpcore | 2,590 | pulp__pulpcore-2590 | [
"2589"
] | 6beca40cd5f40294a04fe24622cc2c1b163afe65 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -44,6 +44,10 @@
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
+# make before we decide this is a fatal error?
+MAX_ATTEMPTS = 3
+
def _destination_repo(importer, source_repo_name):
"""Find the destination repository based on source repo's name."""
@@ -62,31 +66,34 @@ def _import_file(fpath, resource_class, retry=False):
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- log.info(
- "...{} import-errors encountered importing {}, retrying".format(
- a_result.totals["error"], fpath
+ curr_attempt = 1
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ f"...{total_errors} import-errors encountered importing "
+ "{fpath}, attempt {curr_attempt}, retrying"
)
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(_("FATAL import-failure importing {}").format(fpath))
- raise
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
return a_result
except AttributeError:
- log.error(_("FAILURE importing file {}!").format(fpath))
+ log.error(f"FAILURE loading import-file {fpath}!")
raise
| Concurrent PulpImports with iverlapping content can still fail.
**Version**
core/3.14
**Describe the bug**
PulpImport imports repositories in parallel. If the repos in a single import-file have overlapping content, two repo-import-threads can attempt to create the same object at the same time, and collide. This is handled by retry-logic, that notices the collision and retries in the failing task.
HOWEVER - if "enough" repositories, with "enough" overlapping content, are imported in a system with "enough" available pulpcore-workers, then **more than two** tasks can collide. The first one "wins", the second fails, retries, and "wins", the third fails *twice* and aborts. We need to let PulpImport retry more than once.
Note that there needs to be a finite number of retries, because it's better to eventually give up, than to fall into an infinite retry-hole due to some equally-rare edge case.
**To Reproduce**
Steps to reproduce the behavior:
See discussion in https://bugzilla.redhat.com/show_bug.cgi?id=2056702 . We have seen this happen to exactly one user to date. We have not been able to reproduce it "in captivity" even once (but the hole clearly exists).
**Expected behavior**
PulpImport should retry an import more than once, in the event of an object-creation collision.
**Additional context**
Original collision-problem-report : https://pulp.plan.io/issues/8633
Retry fix : https://github.com/pulp/pulpcore/pull/1278
| 2022-04-22T17:59:37 |
||
pulp/pulpcore | 2,591 | pulp__pulpcore-2591 | [
"2269"
] | 78571f751eeb9f541224533c2a904216d2e7bb9f | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -92,7 +92,12 @@ def _import_file(fpath, resource_class, retry=False):
def _check_versions(version_json):
- """Compare the export version_json to the installed components."""
+ """
+ Compare the export version_json to the installed components.
+
+ An upstream whose db-metadata doesn't match the downstream won't import successfully; check
+ for compatibility and raise a ValidationError if incompatible versions are found.
+ """
error_messages = []
for component in version_json:
try:
@@ -102,10 +107,13 @@ def _check_versions(version_json):
_("Export uses {} which is not installed.").format(component["component"])
)
else:
- if version != component["version"]:
+ # Check that versions are compatible. Currently, "compatible" is defined as "same X.Y".
+ # Versions are strings that generally look like "X.Y.Z" or "X.Y.Z.dev"; we check that
+ # first two places are the same.
+ if version.split(".")[:2] != component["version"].split(".")[:2]:
error_messages.append(
_(
- "Export version {export_ver} of {component} does not match "
+ "Export version {export_ver} of {component} incompatible with "
"installed version {ver}."
).format(
export_ver=component["version"],
@@ -114,8 +122,8 @@ def _check_versions(version_json):
)
)
- if error_messages:
- raise ValidationError((" ".join(error_messages)))
+ if error_messages:
+ raise ValidationError((" ".join(error_messages)))
def import_repository_version(importer_pk, destination_repo_pk, source_repo_name, tar_path):
| diff --git a/pulpcore/tests/unit/test_import_checks.py b/pulpcore/tests/unit/test_import_checks.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/unit/test_import_checks.py
@@ -0,0 +1,28 @@
+from unittest.mock import patch
+
+from django.test import TestCase
+from rest_framework.serializers import ValidationError
+
+from pulpcore.app.tasks.importer import _check_versions
+
+
+class TestObject:
+ version = "1.2.3" # Every component is vers 1.2.3
+
+
+class TestCheckVersions(TestCase):
+ @patch("pulpcore.app.tasks.importer.get_distribution", return_value=TestObject())
+ def test_vers_check(self, mock_get_distribution):
+ export_json = [{"component": "xyz", "version": "1.2.3"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "xy", "version": "1.2"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "x_noty_z", "version": "1.4.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
+
+ export_json = [{"component": "notx_y_z", "version": "2.2.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
| Version Matching for Import/Export too stringent
**Is your feature request related to a problem? Please describe.**
When importing a previously exported content pulp runs a check on pulpcore, pulp_rpm and other plugin versions of both the export archive and currently installed system. For example if my exporter is using pulp 3.16.1 and import server 3.16.2 I 'd get an error like
`Export version 3.16.1 of pulpcore does not match installed version 3.16.2`
While checking compatibility is a good thing, we need to be mindful of the fact customers in disconnected environments will often have different minor releases of pulp. Expecting the z-stream to also match will make it unwieldy and very hard for us to control the environment.
Location of the check -> https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/importer.py#L93-L118
As described in https://hackmd.io/HLptudH9R6S4PCmm8nRmmg?view#Open-Questions
- [DONE] How will backwards incompatible changes in the exported data format be handled over time? e.g. I exported this data a loooong time ago and now the system I'm importing into expects a newer, different format?
- idea 1: Tie the data format to the exported system's pulpcore version number. Put that version number into the exported data somehow. Then have systems importing know the oldest version they support and refuse to import older exports.
- idea 2: Same idea as (1) except use it's own numbering scheme, probably semver based
- **Current Status**: versions.json included in export, with core and plugin versions for all
plugins involved in the export. Import checks for **exact match** to versions.json on import,
and errors if there are diffs.
We definitely need more flexibility on this.
**Describe the solution you'd like**
Discuss various approaches to solve this in both short term and long term.
- Only require match of x.y releases and not x.y.z when checking compatibility (not sure that is sufficient).
- As described in the design docs use a unique numbering/version scheme that changes only when 2 versions are incompatible. That way if there are no breaking changes between 3.16-3.18 the version will be identical and hence compatible
**Additional context**
Further discussion pending on this topic. Pulpcore needs to be careful to not make it too stringent,
| https://bugzilla.redhat.com/show_bug.cgi?id=2067301 | 2022-04-22T18:08:37 |
pulp/pulpcore | 2,592 | pulp__pulpcore-2592 | [
"2269"
] | 32af2e93bf992ea15c90099fbb15e7a91b159754 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -91,7 +91,12 @@ def _import_file(fpath, resource_class, retry=False):
def _check_versions(version_json):
- """Compare the export version_json to the installed components."""
+ """
+ Compare the export version_json to the installed components.
+
+ An upstream whose db-metadata doesn't match the downstream won't import successfully; check
+ for compatibility and raise a ValidationError if incompatible versions are found.
+ """
error_messages = []
for component in version_json:
try:
@@ -101,10 +106,13 @@ def _check_versions(version_json):
_("Export uses {} which is not installed.").format(component["component"])
)
else:
- if version != component["version"]:
+ # Check that versions are compatible. Currently, "compatible" is defined as "same X.Y".
+ # Versions are strings that generally look like "X.Y.Z" or "X.Y.Z.dev"; we check that
+ # first two places are the same.
+ if version.split(".")[:2] != component["version"].split(".")[:2]:
error_messages.append(
_(
- "Export version {export_ver} of {component} does not match "
+ "Export version {export_ver} of {component} incompatible with "
"installed version {ver}."
).format(
export_ver=component["version"],
@@ -113,8 +121,8 @@ def _check_versions(version_json):
)
)
- if error_messages:
- raise ValidationError((" ".join(error_messages)))
+ if error_messages:
+ raise ValidationError((" ".join(error_messages)))
def import_repository_version(importer_pk, destination_repo_pk, source_repo_name, tar_path):
| diff --git a/pulpcore/tests/unit/test_import_checks.py b/pulpcore/tests/unit/test_import_checks.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/unit/test_import_checks.py
@@ -0,0 +1,28 @@
+from unittest.mock import patch
+
+from django.test import TestCase
+from rest_framework.serializers import ValidationError
+
+from pulpcore.app.tasks.importer import _check_versions
+
+
+class TestObject:
+ version = "1.2.3" # Every component is vers 1.2.3
+
+
+class TestCheckVersions(TestCase):
+ @patch("pulpcore.app.tasks.importer.get_distribution", return_value=TestObject())
+ def test_vers_check(self, mock_get_distribution):
+ export_json = [{"component": "xyz", "version": "1.2.3"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "xy", "version": "1.2"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "x_noty_z", "version": "1.4.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
+
+ export_json = [{"component": "notx_y_z", "version": "2.2.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
| Version Matching for Import/Export too stringent
**Is your feature request related to a problem? Please describe.**
When importing a previously exported content pulp runs a check on pulpcore, pulp_rpm and other plugin versions of both the export archive and currently installed system. For example if my exporter is using pulp 3.16.1 and import server 3.16.2 I 'd get an error like
`Export version 3.16.1 of pulpcore does not match installed version 3.16.2`
While checking compatibility is a good thing, we need to be mindful of the fact customers in disconnected environments will often have different minor releases of pulp. Expecting the z-stream to also match will make it unwieldy and very hard for us to control the environment.
Location of the check -> https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/importer.py#L93-L118
As described in https://hackmd.io/HLptudH9R6S4PCmm8nRmmg?view#Open-Questions
- [DONE] How will backwards incompatible changes in the exported data format be handled over time? e.g. I exported this data a loooong time ago and now the system I'm importing into expects a newer, different format?
- idea 1: Tie the data format to the exported system's pulpcore version number. Put that version number into the exported data somehow. Then have systems importing know the oldest version they support and refuse to import older exports.
- idea 2: Same idea as (1) except use it's own numbering scheme, probably semver based
- **Current Status**: versions.json included in export, with core and plugin versions for all
plugins involved in the export. Import checks for **exact match** to versions.json on import,
and errors if there are diffs.
We definitely need more flexibility on this.
**Describe the solution you'd like**
Discuss various approaches to solve this in both short term and long term.
- Only require match of x.y releases and not x.y.z when checking compatibility (not sure that is sufficient).
- As described in the design docs use a unique numbering/version scheme that changes only when 2 versions are incompatible. That way if there are no breaking changes between 3.16-3.18 the version will be identical and hence compatible
**Additional context**
Further discussion pending on this topic. Pulpcore needs to be careful to not make it too stringent,
| https://bugzilla.redhat.com/show_bug.cgi?id=2067301 | 2022-04-22T18:09:09 |
pulp/pulpcore | 2,633 | pulp__pulpcore-2633 | [
"1812"
] | fa541c5db3b6739616d054fd60f4d08571a287e0 | diff --git a/pulpcore/exceptions/__init__.py b/pulpcore/exceptions/__init__.py
--- a/pulpcore/exceptions/__init__.py
+++ b/pulpcore/exceptions/__init__.py
@@ -5,7 +5,6 @@
TimeoutException,
exception_to_dict,
)
-from .http import MissingResource # noqa
from .validation import ( # noqa
DigestValidationError,
SizeValidationError,
diff --git a/pulpcore/exceptions/http.py b/pulpcore/exceptions/http.py
deleted file mode 100644
--- a/pulpcore/exceptions/http.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import http.client
-from gettext import gettext as _
-
-from .base import PulpException
-
-
-class MissingResource(PulpException):
- """
- Base class for missing resource exceptions.
-
- Exceptions that are raised due to requests for resources that do not exist should inherit
- from this base class.
- """
-
- http_status_code = http.client.NOT_FOUND
-
- def __init__(self, **resources):
- """
- :param resources: keyword arguments of resource_type=resource_id
- :type resources: dict
- """
- super().__init__("PLP0001")
- self.resources = resources
-
- def __str__(self):
- resources_str = ", ".join("%s=%s" % (k, v) for k, v in self.resources.items())
- msg = _("The following resources are missing: %s") % resources_str
- return msg.encode("utf-8")
diff --git a/pulpcore/tasking/util.py b/pulpcore/tasking/util.py
--- a/pulpcore/tasking/util.py
+++ b/pulpcore/tasking/util.py
@@ -8,7 +8,6 @@
from pulpcore.app.models import Task
from pulpcore.app.util import get_view_name_for_model
from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES
-from pulpcore.exceptions import MissingResource
_logger = logging.getLogger(__name__)
@@ -24,12 +23,9 @@ def cancel(task_id):
task_id (str): The ID of the task you wish to cancel
Raises:
- MissingResource: if a task with given task_id does not exist
+ rest_framework.exceptions.NotFound: If a task with given task_id does not exist
"""
- try:
- task_status = Task.objects.get(pk=task_id)
- except Task.DoesNotExist:
- raise MissingResource(task=task_id)
+ task_status = Task.objects.get(pk=task_id)
if task_status.state in TASK_FINAL_STATES:
# If the task is already done, just stop
| diff --git a/pulpcore/tests/functional/__init__.py b/pulpcore/tests/functional/__init__.py
--- a/pulpcore/tests/functional/__init__.py
+++ b/pulpcore/tests/functional/__init__.py
@@ -4,6 +4,8 @@
from pulp_smash.config import get_config
from pulp_smash.pulp3.bindings import delete_orphans
+from pulp_smash.utils import get_pulp_setting
+
from pulpcore.client.pulpcore import (
ApiClient,
@@ -141,3 +143,13 @@ def delete_orphans_pre(request):
raise pytest.UsageError("This test is not suitable to be marked parallel.")
delete_orphans()
yield
+
+
[email protected](scope="session")
+def pulp_api_v3_path(cli_client):
+ v3_api_root = get_pulp_setting(cli_client, "V3_API_ROOT")
+ if v3_api_root is None:
+ raise RuntimeError(
+ "This fixture requires the server to have the `V3_API_ROOT` setting set."
+ )
+ return v3_api_root
diff --git a/pulpcore/tests/functional/api/test_tasks.py b/pulpcore/tests/functional/api/test_tasks.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/functional/api/test_tasks.py
@@ -0,0 +1,17 @@
+from uuid import uuid4
+
+import pytest
+
+from pulpcore.client.pulpcore.exceptions import ApiException
+
+
+def test_cancel_invalid_task_raises_404(pulp_api_v3_path, tasks_api_client):
+ patched_task_cancel = {"state": "canceled"}
+
+ missing_task_url = f"{pulp_api_v3_path}tasks/{uuid4()}/"
+
+ with pytest.raises(ApiException) as e_info:
+ tasks_api_client.tasks_cancel(
+ task_href=missing_task_url, patched_task_cancel=patched_task_cancel
+ )
+ assert e_info.value.status == 404
| Replace MissingResource exception with a subclass of rest_framework.exceptions.APIException
Author: @dkliban ([email protected])
Redmine Issue: 2382, https://pulp.plan.io/issues/2382
---
DRF provides an APIException\[0\] base class that should be used to create custom exceptions for Pulp's API. The new MissingResource exception should take two arguments: resource_type and resource_id.
\[0\] http://www.django-rest-framework.org/api-guide/exceptions/#apiexception
| I think we should still do this and do it in 3.20.
We should not port this, but instead delete it. It's used in zero places. Here's my audit:
https://github.com/pulp/pulpcore/search?q=MissingResource <--- lives here, and used in exactly one place
https://github.com/pulp/pulp_ansible/search?q=MissingResource
https://github.com/pulp/pulp-certguard/search?q=MissingResource
https://github.com/pulp/pulp_cookbook/search?q=MissingResource
https://github.com/pulp/pulp_container/search?q=MissingResource
https://github.com/pulp/pulp_deb/search?q=MissingResource
https://github.com/pulp/pulp_file/search?q=MissingResource
https://github.com/ansible/galaxy_ng/search?q=MissingResource
https://github.com/pulp/pulp_maven/search?q=MissingResource
https://github.com/pulp/pulp_npm/search?q=MissingResource
https://github.com/pulp/pulp_ostree/search?q=MissingResource
https://github.com/pulp/pulp_python/search?q=MissingResource
https://github.com/pulp/pulp_rpm/search?q=MissingResource
The one place is [here](https://github.com/pulp/pulpcore/blob/fcb355c1f8fec6780524c010cf5bba7e1aa1d51b/pulpcore/tasking/util.py#L32). I think we should just switch that single line to ApiException and not have Pulp's plugin API offer a custom exception. For something to be so broadly offered and unused tells me, at least right now, we don't need this.
Don't even catch the DoesNotExist exception in that place. It should do the proper 404 on it's own. | 2022-04-28T18:45:04 |
pulp/pulpcore | 2,635 | pulp__pulpcore-2635 | [
"2634"
] | 3310845db073b58de521f8d33cab135c34da86d8 | diff --git a/pulpcore/app/models/base.py b/pulpcore/app/models/base.py
--- a/pulpcore/app/models/base.py
+++ b/pulpcore/app/models/base.py
@@ -8,8 +8,6 @@
from django.db.models.base import ModelBase
from django_lifecycle import LifecycleModel
-from pulpcore.app.loggers import deprecation_logger
-
class Label(LifecycleModel):
"""Model for handling resource labels.
@@ -69,14 +67,6 @@ class BaseModel(LifecycleModel):
class Meta:
abstract = True
- def __init__(self, *args, **kwargs):
- if hasattr(self, "ACCESS_POLICY_VIEWSET_NAME"):
- deprecation_logger.warn(
- f"The model {self.__class__} defines the 'ACCESS_POLICY_VIEWSET_NAME' class "
- f"attribute which is no longer required and is discouraged to be set."
- )
- return super().__init__(*args, **kwargs)
-
def __str__(self):
try:
# if we have a name, use it
| Remove deprecation warning about `ACCESS_POLICY_VIEWSET_NAME` attribute
In the pulpcore 3.18 and 3.19 releases, a deprecation warning was emitted if plugin writers defined a Model attribute named `ACCESS_POLICY_VIEWSET_NAME`. The CI shows deprecation warnings as a failure, so we believe all pulp plugins have discontinued setting this attribute. It's time to remove this deprecation warning with 3.20.
| 2022-04-28T19:46:48 |
||
pulp/pulpcore | 2,644 | pulp__pulpcore-2644 | [
"2642"
] | 80314fb1a402333042cd8bba1c02e935f608b321 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -261,52 +261,10 @@
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
-extlinks = {'github': ('https://github.com/pulp/pulpcore/issues/%s', '#'),
- 'redmine': ('https://pulp.plan.io/issues/%s', '#'),
- 'fixedbugs_pulp': ('https://pulp.plan.io/projects/pulp/issues?c%%5B%%5D=tracker&c%%5B%'
- '%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_5&c%%5B%%5D=subject&c%%'
- '5B%%5D=author&c%%5B%%5D=assigned_to&c%%5B%%5D=cf_3&f%%5B%%5D=cf_4&'
- 'f%%5B%%5D=tracker_id&f%%5B%%5D=&group_by=&op%%5Bcf_4%%5D=%%3D&op%%'
- '5Btracker_id%%5D=%%3D&set_filter=1&sort=priority%%3Adesc%%2Ccf_5%%'
- '3Adesc%%2Cid%%3Adesc&utf8=%%E2%%9C%%93&v%%5Bcf_4%%5D%%5B%%5D=%s&v%'
- '%5Btracker_id%%5D%%5B%%5D=1', 'bugs fixed in '),
- 'fixedbugs_pulp_rpm': ('https://pulp.plan.io/projects/pulp_rpm/issues?c%%5B%%5D=tracke'
- 'r&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_5&c%%5B%%5D'
- '=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_to&c%%5B%%5D=cf_3'
- '&f%%5B%%5D=cf_4&f%%5B%%5D=tracker_id&f%%5B%%5D=&group_by=&op%%'
- '5Bcf_4%%5D=%%3D&op%%5Btracker_id%%5D=%%3D&set_filter=1&sort=pr'
- 'iority%%3Adesc%%2Ccf_5%%3Adesc%%2Cstatus&utf8=%%E2%%9C%%93&v%%'
- '5Bcf_4%%5D%%5B%%5D=%s&v%%5Btracker_id%%5D%%5B%%5D=1',
- 'bugs fixed in '),
- 'fixedbugs_pulp_puppet': ('https://pulp.plan.io/projects/pulp_puppet/issues?utf8=%%E2%'
- '%9C%%93&set_filter=1&f%%5B%%5D=cf_4&op%%5Bcf_4%%5D=%%3D&v%%'
- '5Bcf_4%%5D%%5B%%5D=%s&f%%5B%%5D=tracker_id&op%%5Btracker_id'
- '%%5D=%%3D&v%%5Btracker_id%%5D%%5B%%5D=1&f%%5B%%5D=&c%%5B%%5'
- 'D=tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_'
- '5&c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_to&'
- 'c%%5B%%5D=cf_3&group_by=', 'bugs fixed in '),
- 'fixedbugs_pulp_python': ('https://pulp.plan.io/projects/pulp_python/issues?c%%5B%%5D='
- 'tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_5&'
- 'c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_to&c%'
- '%5B%%5D=cf_3&f%%5B%%5D=cf_11&f%%5B%%5D=tracker_id&f%%5B%%5D'
- '=&group_by=&op%%5Bcf_11%%5D=%%3D&op%%5Btracker_id%%5D=%%3D&'
- 'set_filter=1&sort=priority%%3Adesc%%2Ccf_5%%3Adesc%%2Cstatu'
- 's&utf8=%%E2%%9C%%93&v%%5Bcf_11%%5D%%5B%%5D=%s&v%%5Btracker_'
- 'id%%5D%%5B%%5D=1', 'bugs fixed in '),
- 'fixedbugs_pulp_docker': ('https://pulp.plan.io/projects/pulp_docker/issues?utf8=%%E2%'
- '%9C%%93&set_filter=1&f%%5B%%5D=cf_12&op%%5Bcf_12%%5D=%%3D&v'
- '%%5Bcf_12%%5D%%5B%%5D=%s&f%%5B%%5D=tracker_id&op%%5Btracker'
- '_id%%5D=%%3D&v%%5Btracker_id%%5D%%5B%%5D=1&f%%5B%%5D=&c%%5B'
- '%%5D=tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D='
- 'cf_5&c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_'
- 'to&c%%5B%%5D=cf_3&group_by=', 'bugs fixed in '),
- 'fixedbugs_pulp_ostree': ('https://pulp.plan.io/projects/pulp_ostree/issues?utf8=%%E2%'
- '%9C%%93&set_filter=1&f%%5B%%5D=cf_17&op%%5Bcf_17%%5D=%%3D&v'
- '%%5Bcf_17%%5D%%5B%%5D=%s&f%%5B%%5D=tracker_id&op%%5Btracker'
- '_id%%5D=%%3D&v%%5Btracker_id%%5D%%5B%%5D=1&f%%5B%%5D=&c%%5B'
- '%%5D=tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D='
- 'cf_5&c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_'
- 'to&c%%5B%%5D=cf_3&group_by=', 'bugs fixed in '),}
+extlinks = {
+ 'github': ('https://github.com/pulp/pulpcore/issues/%s', '#'),
+ 'redmine': ('https://pulp.plan.io/issues/%s', '#'),
+}
# napoleon uses .. attribute by default, but :ivar: is more succinct and looks better,
# particularly on classes with a lot of attributes, like django models and related objects
| Remove some old redmine aspects from our docs
If you search this query, you'll see some things that need cleaning up
https://docs.pulpproject.org/pulpcore/search.html?q=Redmine&check_keywords=yes&area=default
| 2022-04-29T19:49:01 |
||
pulp/pulpcore | 2,655 | pulp__pulpcore-2655 | [
"2654"
] | a2db378f00dee28f366cec81d079364df462de58 | diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -256,6 +256,17 @@ async def download_wrapper():
return await self._run(extra_data=extra_data)
except asyncio.TimeoutError:
raise TimeoutException(self.url)
+ except aiohttp.ClientHttpProxyError as e:
+ log.error(
+ "Proxy {!r} rejected connection request during a request to "
+ "{!r}, status={}, message={!r}".format(
+ e.request_info.real_url,
+ e.request_info.url,
+ e.status,
+ e.message,
+ )
+ )
+ raise e
return await download_wrapper()
| Improve proxy connection failure error message
Raise a more informative error message when the proxy rejects requests from Pulp
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485
| 2022-05-03T16:37:45 |
||
pulp/pulpcore | 2,657 | pulp__pulpcore-2657 | [
"2589"
] | 12d8fa40b63118d0aeb90552fb9d7d3b7fb934c0 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -44,6 +44,10 @@
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
+# make before we decide this is a fatal error?
+MAX_ATTEMPTS = 3
+
def _destination_repo(importer, source_repo_name):
"""Find the destination repository based on source repo's name."""
@@ -62,31 +66,34 @@ def _import_file(fpath, resource_class, retry=False):
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {}, retrying").format(
- a_result.totals["error"], fpath
+ curr_attempt = 1
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ f"...{total_errors} import-errors encountered importing "
+ "{fpath}, attempt {curr_attempt}, retrying"
)
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(_("FATAL import-failure importing {}").format(fpath))
- raise
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
return a_result
except AttributeError:
- log.error(_("FAILURE importing file {}!").format(fpath))
+ log.error(f"FAILURE loading import-file {fpath}!")
raise
| Concurrent PulpImports with iverlapping content can still fail.
**Version**
core/3.14
**Describe the bug**
PulpImport imports repositories in parallel. If the repos in a single import-file have overlapping content, two repo-import-threads can attempt to create the same object at the same time, and collide. This is handled by retry-logic, that notices the collision and retries in the failing task.
HOWEVER - if "enough" repositories, with "enough" overlapping content, are imported in a system with "enough" available pulpcore-workers, then **more than two** tasks can collide. The first one "wins", the second fails, retries, and "wins", the third fails *twice* and aborts. We need to let PulpImport retry more than once.
Note that there needs to be a finite number of retries, because it's better to eventually give up, than to fall into an infinite retry-hole due to some equally-rare edge case.
**To Reproduce**
Steps to reproduce the behavior:
See discussion in https://bugzilla.redhat.com/show_bug.cgi?id=2056702 . We have seen this happen to exactly one user to date. We have not been able to reproduce it "in captivity" even once (but the hole clearly exists).
**Expected behavior**
PulpImport should retry an import more than once, in the event of an object-creation collision.
**Additional context**
Original collision-problem-report : https://pulp.plan.io/issues/8633
Retry fix : https://github.com/pulp/pulpcore/pull/1278
| 2022-05-03T18:23:49 |
||
pulp/pulpcore | 2,658 | pulp__pulpcore-2658 | [
"2589"
] | 4dfb710a39262fdf559f7957e8badcbf5c3923e0 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -44,6 +44,10 @@
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
+# make before we decide this is a fatal error?
+MAX_ATTEMPTS = 3
+
def _destination_repo(importer, source_repo_name):
"""Find the destination repository based on source repo's name."""
@@ -62,31 +66,34 @@ def _import_file(fpath, resource_class, retry=False):
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {}, retrying").format(
- a_result.totals["error"], fpath
+ curr_attempt = 1
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ f"...{total_errors} import-errors encountered importing "
+ "{fpath}, attempt {curr_attempt}, retrying"
)
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(_("FATAL import-failure importing {}").format(fpath))
- raise
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
return a_result
except AttributeError:
- log.error(_("FAILURE importing file {}!").format(fpath))
+ log.error(f"FAILURE loading import-file {fpath}!")
raise
| Concurrent PulpImports with iverlapping content can still fail.
**Version**
core/3.14
**Describe the bug**
PulpImport imports repositories in parallel. If the repos in a single import-file have overlapping content, two repo-import-threads can attempt to create the same object at the same time, and collide. This is handled by retry-logic, that notices the collision and retries in the failing task.
HOWEVER - if "enough" repositories, with "enough" overlapping content, are imported in a system with "enough" available pulpcore-workers, then **more than two** tasks can collide. The first one "wins", the second fails, retries, and "wins", the third fails *twice* and aborts. We need to let PulpImport retry more than once.
Note that there needs to be a finite number of retries, because it's better to eventually give up, than to fall into an infinite retry-hole due to some equally-rare edge case.
**To Reproduce**
Steps to reproduce the behavior:
See discussion in https://bugzilla.redhat.com/show_bug.cgi?id=2056702 . We have seen this happen to exactly one user to date. We have not been able to reproduce it "in captivity" even once (but the hole clearly exists).
**Expected behavior**
PulpImport should retry an import more than once, in the event of an object-creation collision.
**Additional context**
Original collision-problem-report : https://pulp.plan.io/issues/8633
Retry fix : https://github.com/pulp/pulpcore/pull/1278
| 2022-05-03T18:25:34 |
||
pulp/pulpcore | 2,659 | pulp__pulpcore-2659 | [
"2589"
] | 6fc666ecb2951baf8fad45b156af16558b2ba555 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -45,6 +45,10 @@
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
+# make before we decide this is a fatal error?
+MAX_ATTEMPTS = 3
+
def _destination_repo(importer, source_repo_name):
"""Find the destination repository based on source repo's name."""
@@ -63,31 +67,34 @@ def _import_file(fpath, resource_class, retry=False):
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {}, retrying").format(
- a_result.totals["error"], fpath
+ curr_attempt = 1
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ f"...{total_errors} import-errors encountered importing "
+ "{fpath}, attempt {curr_attempt}, retrying"
)
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(_("FATAL import-failure importing {}").format(fpath))
- raise
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
return a_result
except AttributeError:
- log.error(_("FAILURE importing file {}!").format(fpath))
+ log.error(f"FAILURE loading import-file {fpath}!")
raise
| Concurrent PulpImports with iverlapping content can still fail.
**Version**
core/3.14
**Describe the bug**
PulpImport imports repositories in parallel. If the repos in a single import-file have overlapping content, two repo-import-threads can attempt to create the same object at the same time, and collide. This is handled by retry-logic, that notices the collision and retries in the failing task.
HOWEVER - if "enough" repositories, with "enough" overlapping content, are imported in a system with "enough" available pulpcore-workers, then **more than two** tasks can collide. The first one "wins", the second fails, retries, and "wins", the third fails *twice* and aborts. We need to let PulpImport retry more than once.
Note that there needs to be a finite number of retries, because it's better to eventually give up, than to fall into an infinite retry-hole due to some equally-rare edge case.
**To Reproduce**
Steps to reproduce the behavior:
See discussion in https://bugzilla.redhat.com/show_bug.cgi?id=2056702 . We have seen this happen to exactly one user to date. We have not been able to reproduce it "in captivity" even once (but the hole clearly exists).
**Expected behavior**
PulpImport should retry an import more than once, in the event of an object-creation collision.
**Additional context**
Original collision-problem-report : https://pulp.plan.io/issues/8633
Retry fix : https://github.com/pulp/pulpcore/pull/1278
| 2022-05-03T18:28:03 |
||
pulp/pulpcore | 2,660 | pulp__pulpcore-2660 | [
"2589"
] | c5bfdc1a758b511e229833f356cb2c66684341da | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -44,6 +44,10 @@
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
+# make before we decide this is a fatal error?
+MAX_ATTEMPTS = 3
+
def _destination_repo(importer, source_repo_name):
"""Find the destination repository based on source repo's name."""
@@ -62,31 +66,34 @@ def _import_file(fpath, resource_class, retry=False):
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {}, retrying").format(
- a_result.totals["error"], fpath
+ curr_attempt = 1
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ f"...{total_errors} import-errors encountered importing "
+ "{fpath}, attempt {curr_attempt}, retrying"
)
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(_("FATAL import-failure importing {}").format(fpath))
- raise
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
return a_result
except AttributeError:
- log.error(_("FAILURE importing file {}!").format(fpath))
+ log.error(f"FAILURE loading import-file {fpath}!")
raise
| Concurrent PulpImports with iverlapping content can still fail.
**Version**
core/3.14
**Describe the bug**
PulpImport imports repositories in parallel. If the repos in a single import-file have overlapping content, two repo-import-threads can attempt to create the same object at the same time, and collide. This is handled by retry-logic, that notices the collision and retries in the failing task.
HOWEVER - if "enough" repositories, with "enough" overlapping content, are imported in a system with "enough" available pulpcore-workers, then **more than two** tasks can collide. The first one "wins", the second fails, retries, and "wins", the third fails *twice* and aborts. We need to let PulpImport retry more than once.
Note that there needs to be a finite number of retries, because it's better to eventually give up, than to fall into an infinite retry-hole due to some equally-rare edge case.
**To Reproduce**
Steps to reproduce the behavior:
See discussion in https://bugzilla.redhat.com/show_bug.cgi?id=2056702 . We have seen this happen to exactly one user to date. We have not been able to reproduce it "in captivity" even once (but the hole clearly exists).
**Expected behavior**
PulpImport should retry an import more than once, in the event of an object-creation collision.
**Additional context**
Original collision-problem-report : https://pulp.plan.io/issues/8633
Retry fix : https://github.com/pulp/pulpcore/pull/1278
| 2022-05-03T18:29:41 |
||
pulp/pulpcore | 2,661 | pulp__pulpcore-2661 | [
"2589"
] | e96d9d842b0169e2f6f33f2935c9de0b5e91791c | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -44,6 +44,10 @@
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
+# make before we decide this is a fatal error?
+MAX_ATTEMPTS = 3
+
def _destination_repo(importer, source_repo_name):
"""Find the destination repository based on source repo's name."""
@@ -62,31 +66,34 @@ def _import_file(fpath, resource_class, retry=False):
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {}, retrying").format(
- a_result.totals["error"], fpath
+ curr_attempt = 1
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ f"...{total_errors} import-errors encountered importing "
+ "{fpath}, attempt {curr_attempt}, retrying"
)
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(_("FATAL import-failure importing {}").format(fpath))
- raise
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
return a_result
except AttributeError:
- log.error(_("FAILURE importing file {}!").format(fpath))
+ log.error(f"FAILURE loading import-file {fpath}!")
raise
| Concurrent PulpImports with iverlapping content can still fail.
**Version**
core/3.14
**Describe the bug**
PulpImport imports repositories in parallel. If the repos in a single import-file have overlapping content, two repo-import-threads can attempt to create the same object at the same time, and collide. This is handled by retry-logic, that notices the collision and retries in the failing task.
HOWEVER - if "enough" repositories, with "enough" overlapping content, are imported in a system with "enough" available pulpcore-workers, then **more than two** tasks can collide. The first one "wins", the second fails, retries, and "wins", the third fails *twice* and aborts. We need to let PulpImport retry more than once.
Note that there needs to be a finite number of retries, because it's better to eventually give up, than to fall into an infinite retry-hole due to some equally-rare edge case.
**To Reproduce**
Steps to reproduce the behavior:
See discussion in https://bugzilla.redhat.com/show_bug.cgi?id=2056702 . We have seen this happen to exactly one user to date. We have not been able to reproduce it "in captivity" even once (but the hole clearly exists).
**Expected behavior**
PulpImport should retry an import more than once, in the event of an object-creation collision.
**Additional context**
Original collision-problem-report : https://pulp.plan.io/issues/8633
Retry fix : https://github.com/pulp/pulpcore/pull/1278
| 2022-05-03T18:31:20 |
||
pulp/pulpcore | 2,662 | pulp__pulpcore-2662 | [
"2589"
] | e70ea082d2d9dc2f71575366a4394b234e472937 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -44,6 +44,10 @@
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
+# make before we decide this is a fatal error?
+MAX_ATTEMPTS = 3
+
def _destination_repo(importer, source_repo_name):
"""Find the destination repository based on source repo's name."""
@@ -62,31 +66,34 @@ def _import_file(fpath, resource_class, retry=False):
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {}, retrying").format(
- a_result.totals["error"], fpath
+ curr_attempt = 1
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ f"...{total_errors} import-errors encountered importing "
+ "{fpath}, attempt {curr_attempt}, retrying"
)
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(_("FATAL import-failure importing {}").format(fpath))
- raise
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
return a_result
except AttributeError:
- log.error(_("FAILURE importing file {}!").format(fpath))
+ log.error(f"FAILURE loading import-file {fpath}!")
raise
| Concurrent PulpImports with iverlapping content can still fail.
**Version**
core/3.14
**Describe the bug**
PulpImport imports repositories in parallel. If the repos in a single import-file have overlapping content, two repo-import-threads can attempt to create the same object at the same time, and collide. This is handled by retry-logic, that notices the collision and retries in the failing task.
HOWEVER - if "enough" repositories, with "enough" overlapping content, are imported in a system with "enough" available pulpcore-workers, then **more than two** tasks can collide. The first one "wins", the second fails, retries, and "wins", the third fails *twice* and aborts. We need to let PulpImport retry more than once.
Note that there needs to be a finite number of retries, because it's better to eventually give up, than to fall into an infinite retry-hole due to some equally-rare edge case.
**To Reproduce**
Steps to reproduce the behavior:
See discussion in https://bugzilla.redhat.com/show_bug.cgi?id=2056702 . We have seen this happen to exactly one user to date. We have not been able to reproduce it "in captivity" even once (but the hole clearly exists).
**Expected behavior**
PulpImport should retry an import more than once, in the event of an object-creation collision.
**Additional context**
Original collision-problem-report : https://pulp.plan.io/issues/8633
Retry fix : https://github.com/pulp/pulpcore/pull/1278
| 2022-05-03T18:33:34 |
||
pulp/pulpcore | 2,663 | pulp__pulpcore-2663 | [
"2654"
] | f5b0cb2a39eb909934c1b6935d139dc895838683 | diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -256,6 +256,17 @@ async def download_wrapper():
return await self._run(extra_data=extra_data)
except asyncio.TimeoutError:
raise TimeoutException(self.url)
+ except aiohttp.ClientHttpProxyError as e:
+ log.error(
+ "Proxy {!r} rejected connection request during a request to "
+ "{!r}, status={}, message={!r}".format(
+ e.request_info.real_url,
+ e.request_info.url,
+ e.status,
+ e.message,
+ )
+ )
+ raise e
return await download_wrapper()
| Improve proxy connection failure error message
Raise a more informative error message when the proxy rejects requests from Pulp
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485
| 2022-05-03T19:58:00 |
||
pulp/pulpcore | 2,664 | pulp__pulpcore-2664 | [
"2654"
] | 3140e913d515e043a894fe65e1bb818b432ceb55 | diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -256,6 +256,17 @@ async def download_wrapper():
return await self._run(extra_data=extra_data)
except asyncio.TimeoutError:
raise TimeoutException(self.url)
+ except aiohttp.ClientHttpProxyError as e:
+ log.error(
+ "Proxy {!r} rejected connection request during a request to "
+ "{!r}, status={}, message={!r}".format(
+ e.request_info.real_url,
+ e.request_info.url,
+ e.status,
+ e.message,
+ )
+ )
+ raise e
return await download_wrapper()
| Improve proxy connection failure error message
Raise a more informative error message when the proxy rejects requests from Pulp
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485
| 2022-05-03T19:58:14 |
||
pulp/pulpcore | 2,665 | pulp__pulpcore-2665 | [
"2654"
] | 9dae4c0c95bbb7a232b144c83c553dd91b489a20 | diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -256,6 +256,17 @@ async def download_wrapper():
return await self._run(extra_data=extra_data)
except asyncio.TimeoutError:
raise TimeoutException(self.url)
+ except aiohttp.ClientHttpProxyError as e:
+ log.error(
+ "Proxy {!r} rejected connection request during a request to "
+ "{!r}, status={}, message={!r}".format(
+ e.request_info.real_url,
+ e.request_info.url,
+ e.status,
+ e.message,
+ )
+ )
+ raise e
return await download_wrapper()
| Improve proxy connection failure error message
Raise a more informative error message when the proxy rejects requests from Pulp
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485
| 2022-05-03T19:58:29 |
||
pulp/pulpcore | 2,666 | pulp__pulpcore-2666 | [
"2654"
] | 64a3cf43c2210269629a9455b9251f7082f89717 | diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -256,6 +256,17 @@ async def download_wrapper():
return await self._run(extra_data=extra_data)
except asyncio.TimeoutError:
raise TimeoutException(self.url)
+ except aiohttp.ClientHttpProxyError as e:
+ log.error(
+ "Proxy {!r} rejected connection request during a request to "
+ "{!r}, status={}, message={!r}".format(
+ e.request_info.real_url,
+ e.request_info.url,
+ e.status,
+ e.message,
+ )
+ )
+ raise e
return await download_wrapper()
| Improve proxy connection failure error message
Raise a more informative error message when the proxy rejects requests from Pulp
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2047485
| 2022-05-03T19:58:59 |
||
pulp/pulpcore | 2,684 | pulp__pulpcore-2684 | [
"2075"
] | 966c1e072ac2dedea4b60d123a067a3be38f798a | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -790,7 +790,7 @@ async def _serve_content_artifact(self, content_artifact, headers, request):
elif not settings.REDIRECT_TO_OBJECT_STORAGE:
return ArtifactResponse(content_artifact.artifact, headers=headers)
elif settings.DEFAULT_FILE_STORAGE == "storages.backends.s3boto3.S3Boto3Storage":
- content_disposition = f"attachment;filename={content_artifact.relative_path}"
+ content_disposition = f"attachment%3Bfilename={content_artifact.relative_path}"
parameters = {"ResponseContentDisposition": content_disposition}
if headers.get("Content-Type"):
parameters["ResponseContentType"] = headers.get("Content-Type")
@@ -802,7 +802,7 @@ async def _serve_content_artifact(self, content_artifact, headers, request):
)
raise HTTPFound(url)
elif settings.DEFAULT_FILE_STORAGE == "storages.backends.azure_storage.AzureStorage":
- content_disposition = f"attachment;filename={artifact_name}"
+ content_disposition = f"attachment%3Bfilename={artifact_name}"
parameters = {"content_disposition": content_disposition}
if headers.get("Content-Type"):
parameters["content_type"] = headers.get("Content-Type")
| S3 URL generation for content artifacts is broken for apt clients when using Minio behind a load-balancer
Author: jlsm-se (jlsm-se)
Redmine Issue: 9669, https://pulp.plan.io/issues/9669
---
Here's the simplified setup I've used to reproduce this issue:
~~~
Pulp server:
CentOS 8
pulp_installer/pulpcore 3.17.2
pulp_deb 2.16
Minio server:
Ubuntu 18.04.6
minio 2022-01-08T03:11:54Z
HTTP load-balancer in front of Minio:
CentOS 8
haproxy 1.8.27 (also tried with nginx and sidekick)
apt client:
Ubuntu 18.04.6
apt 1.16.14
~~~
When I run `apt-get update` on a client configured to use the Pulp server, Pulp responds with the 302 redirect pointing to the HTTP load-balancer I've set up in front of Minio. So far so good.
The problem is that the redirect URL contains a semicolon as a query separator, which none of the load-balancers I've tried seem to handle correctly (the `filename` parameter in `response-content-disposition` seem to get discarded). The apt client always gets a 4XX error (e.g. `401 unauthorized`).
This seems to happen because content-proxies (that is, the load-balancers) will strip semicolons from query parameters, [because that is what is recommended by the WHATWG since december 2017](https://www.w3.org/TR/2017/REC-html52-20171214/) and somewhat recently discovered [cache poisoning attacks](https://snyk.io/blog/cache-poisoning-in-popular-open-source-packages/) seems to have sped up efforts to follow this recommendation among languages and frameworks (see [CVE-2021-23336](https://bugs.python.org/issue42967)).
These two comments in the golang issue tracker helped me come to this conclusion:
https://github.com/golang/go/issues/25192#issuecomment-385662789
https://github.com/golang/go/issues/25192#issuecomment-789799446
I've managed to hackishly solve the issue (apt clients can now use my repos!) with the below patch, but I'm not sure if it's actually the correct solution or even the safest since it still involves a semicolon as a query separator. The ideal would maybe be to avoid the semicolon entirely, but I'm not sure if the AWS S3 specs allow for that.
~~~ diff
diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
index 1d8e834c6..0db26d1eb 100644
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -773,7 +773,8 @@ class Handler:
if settings.DEFAULT_FILE_STORAGE == "pulpcore.app.models.storage.FileSystem":
return FileResponse(os.path.join(settings.MEDIA_ROOT, artifact_name), headers=headers)
elif settings.DEFAULT_FILE_STORAGE == "storages.backends.s3boto3.S3Boto3Storage":
- content_disposition = f"attachment;filename={content_artifact.relative_path}"
+ content_disposition = f"attachment%3Bfilename={content_artifact.relative_path}"
parameters = {"ResponseContentDisposition": content_disposition}
if headers.get("Content-Type"):
parameters["ResponseContentType"] = headers.get("Content-Type")
~~~
| I confirm.
Without the above mentioned patch, apt get the wrong link to Release file in s3. Сurl and browser themselves carry out autocorrect, but the apt don't | 2022-05-06T17:07:11 |
|
pulp/pulpcore | 2,685 | pulp__pulpcore-2685 | [
"2307"
] | fe923775a52ebe33250a216101a0ddbfedb3c914 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -10,7 +10,8 @@
from django.core.files.storage import default_storage
from django.db.models import F
-
+from naya.json import stream_array, tokenize
+from io import StringIO
from pkg_resources import DistributionNotFound, get_distribution
from rest_framework.serializers import ValidationError
from tablib import Dataset
@@ -43,6 +44,8 @@
CA_FILE = "pulpcore.app.modelresource.ContentArtifactResource.json"
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# How many entities from an import-file should be processed at one time
+IMPORT_BATCH_SIZE = 100
# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
# make before we decide this is a fatal error?
@@ -58,22 +61,52 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
+def _impfile_iterator(fd):
+ """
+ Iterate over an import-file returning batches of rows as a json-array-string.
+
+ We use naya.json.stream_array() to get individual rows; once a batch is gathered,
+ we yield the result of json.dumps() for that batch. Repeat until all rows have been
+ called for.
+ """
+ eof = False
+ batch = []
+ rows = stream_array(tokenize(fd))
+ while not eof:
+ try:
+ while len(batch) < IMPORT_BATCH_SIZE:
+ batch.append(next(rows))
+ except StopIteration:
+ eof = True
+ yield json.dumps(batch)
+ batch.clear()
+
+
def _import_file(fpath, resource_class, retry=False):
+ """
+ Import the specified resource-file in batches to limit memory-use.
+
+ We process resource-files one "batch" at a time. Because of the way django-import's
+ internals work, we have to feed it batches as StringIO-streams of json-formatted strings.
+ The file-to-json-to-string-to-import is overhead, but it lets us put an upper bound on the
+ number of entities in memory at any one time at import-time.
+ """
try:
- log.info(_("Importing file {}.").format(fpath))
+ log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
- data = Dataset().load(json_file, format="json")
resource = resource_class()
- log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- if retry:
- curr_attempt = 1
- while curr_attempt < MAX_ATTEMPTS:
- curr_attempt += 1
+ log.info("...Importing resource {resource.__class__.__name__}.")
+ # Load one batch-sized chunk of the specified import-file at a time. If requested,
+ # retry a batch if it looks like we collided with some other repo being imported with
+ # overlapping content.
+ for batch_str in _impfile_iterator(json_file):
+ data = Dataset().load(StringIO(batch_str))
+ if retry:
# django import-export can have a problem with concurrent-imports that are
# importing the same 'thing' (e.g., a Package that exists in two different
# repo-versions that are being imported at the same time). If we're asked to
# retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll
+ # (rather than failing with an exception) first. If errors happen, we'll do one
# retry before we give up on this repo-version's import.
a_result = resource.import_data(data, raise_errors=False)
if a_result.has_errors():
@@ -82,16 +115,16 @@ def _import_file(fpath, resource_class, retry=False):
f"...{total_errors} import-errors encountered importing "
"{fpath}, attempt {curr_attempt}, retrying"
)
- # Last attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
+ else:
a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
- else:
- a_result = resource.import_data(data, raise_errors=True)
- return a_result
+ yield a_result
except AttributeError:
log.error(f"FAILURE loading import-file {fpath}!")
raise
@@ -194,15 +227,19 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
+ content_count = 0
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
- resulting_content_ids.extend(
- row.object_id for row in a_result.rows if row.import_type in ("new", "update")
- )
+ for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
+ content_count += len(a_result.rows)
+ resulting_content_ids.extend(
+ row.object_id for row in a_result.rows if row.import_type in ("new", "update")
+ )
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource, retry=True)
+ # We don't do anything with the imported batches, we just need to get them imported
+ for a_batch in _import_file(ca_path, ContentArtifactResource, retry=True):
+ pass
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
@@ -399,20 +436,24 @@ def validate_and_assemble(toc_filename):
_check_versions(version_json)
# Artifacts
- ar_result = _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource)
data = dict(
- message="Importing Artifacts", code="import.artifacts", total=len(ar_result.rows)
+ message="Importing Artifacts",
+ code="import.artifacts",
)
with ProgressReport(**data) as pb:
- for row in pb.iter(ar_result.rows):
- artifact = Artifact.objects.get(pk=row.object_id)
- base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
- src = os.path.join(temp_dir, base_path)
-
- if not default_storage.exists(base_path):
- with open(src, "rb") as f:
- default_storage.save(base_path, f)
-
+ # Import artifacts, and place their binary blobs, one batch at a time.
+ # Skip artifacts that already exist in storage.
+ for ar_result in _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource):
+ for row in pb.iter(ar_result.rows):
+ artifact = Artifact.objects.get(pk=row.object_id)
+ base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
+ src = os.path.join(temp_dir, base_path)
+
+ if not default_storage.exists(base_path):
+ with open(src, "rb") as f:
+ default_storage.save(base_path, f)
+
+ # Now import repositories, in parallel.
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -438,7 +479,7 @@ def validate_and_assemble(toc_filename):
dispatch(
import_repository_version,
exclusive_resources=[dest_repo],
- args=[importer.pk, dest_repo.pk, src_repo["name"], path],
+ args=(importer.pk, dest_repo.pk, src_repo["name"], path),
task_group=task_group,
)
| Memory error when importing large repository
**Description of problem:**
Getting MemoryError when importing large repository, such as rhel-7-server-rpms repo.
The PackageResource.json file of the rhel-7-server-rpms repo is about 5.5GB. Pulp uses "json.load" method to decode the json file. When decoding the json, it will use up to 18GB+ memory and returns a 5.5GB python dictionary. If the system doesn't have enough memory, the json decode will fail with MemoryError.
hammer content-import version --path /var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/ --organization-id 1
[................................................ ] [24%]
Error: 1 subtask(s) failed for task group /pulp/api/v3/task-groups/ebc7514a-f606-4965-a599-eab74101e9b0/.
**/var/log/messages**
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: Task d5f26164-087a-4acb-aca1-7aed30851040 failed ()
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 317, in _perform_task
pulpcore-worker-2: result = func(*args, **kwargs)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 161, in import_repository_version
pulpcore-worker-2: a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 62, in _import_file
pulpcore-worker-2: data = Dataset().load(json_file.read(), format="json")
pulpcore-worker-2: File "/usr/lib64/python3.6/codecs.py", line 321, in decode
pulpcore-worker-2: (result, consumed) = self._buffer_decode(data, self.errors, final)
**Output of top command**
16011 pulp 20 0 5924132 2.5g 2496 D 25.0 16.7 21:43.78 pulpcore-worker
16011 pulp 20 0 5924132 2.8g 2496 D 6.2 18.7 21:44.76 pulpcore-worker
16011 pulp 20 0 5924132 3.1g 2496 D 12.5 20.7 21:45.55 pulpcore-worker
16011 pulp 20 0 5924132 3.5g 2496 D 12.5 23.2 21:46.38 pulpcore-worker
16011 pulp 20 0 5924132 3.9g 2496 R 25.0 25.7 21:47.39 pulpcore-worker
16011 pulp 20 0 5924132 4.2g 2496 D 12.5 27.7 21:48.38 pulpcore-worker
16011 pulp 20 0 5924132 4.5g 2496 R 66.7 29.5 21:49.35 pulpcore-worker
16011 pulp 20 0 5924132 4.9g 2496 D 18.8 32.2 21:50.58 pulpcore-worker
16011 pulp 20 0 5924132 5.3g 2496 R 26.7 34.7 21:51.53 pulpcore-worker
**Output of free command**
total used free shared buff/cache available
Mem: 15879256 10213472 167648 193168 5498136 5135336
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10516964 162816 193168 5199476 4832256
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10774956 179184 193168 4925116 4573800
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11009476 168300 193168 4701480 4339684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11354424 179644 193168 4345188 3994460
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11653416 154880 193168 4070960 3695840
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11960864 150900 193168 3767492 3388684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12341972 150652 193168 3386632 3007040
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12741088 157716 193168 2980452 2608400
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13012212 159016 193168 2708028 2337108
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13309296 171804 193168 2398156 2039872
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13726688 180136 193168 1972432 1622400
total used free shared buff/cache available
Mem: 15879256 14151480 169480 193168 1558296 1197956
Swap: 8060924 352 8060572
**Test to decode the PackageResource.json file in the python console.**
```
python3
>>> fh = open("/var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/repository-Default_Organization-Red_Hat_Enterprise_Linux_Server-Red_Hat_Enterprise_Linux_7_Server_RPMs_x86_64_7Server_13/pulp_rpm.app.modelresource.PackageResource.json")
>>> import json
>>> json.load(fh)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.6/json/__init__.py", line 296, in load
return loads(fp.read(),
File "/usr/lib64/python3.6/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
MemoryError <===========
```
```
top -o %MEM
top - 11:50:42 up 1 day, 14:42, 3 users, load average: 0.49, 0.15, 0.09
Tasks: 134 total, 3 running, 131 sleeping, 0 stopped, 0 zombie
%Cpu0 : 0.0 us, 3.3 sy, 0.0 ni, 96.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu1 : 0.0 us, 3.7 sy, 0.0 ni, 96.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu2 : 0.0 us, 7.0 sy, 0.0 ni, 93.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu3 : 31.8 us, 68.2 sy, 0.0 ni, 0.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 99.2/19906644 [||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||]
KiB Swap: 1.0/8060924 [| ]
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
10034 root 20 0 20.8g 18.1g 3268 R 100.0 95.4 1:23.00 python3 <==========
```
**Steps to Reproduce:**
1. Prepare a disconnected Satellite with only 15GB RAM to easily reproduce the issue
2. Import a content view with only the rhel-7-server-rpms repo to the disconnected Satellite
Actual results:
Failed with memory error
Expected results:
No error and should consume only reasonable amount of memory.
**Research and Proposing potential solutions:**
1) Split the resource.json file into a number of chunk files when exporting the contents.
2) Or still have one large resource.json (non-standard json file). Each chunk is split into newline so that import can read them line by line.
3) Or use a non-standard json library, such as ijson to read the json file in chunk to save memory.
https://bugzilla.redhat.com/show_bug.cgi?id=2061224
| Is this issue distinct from https://github.com/pulp/pulpcore/issues/2072?
What version are you seeing this on? Is there a BZ that can be attached?
@dralley: Yes. This is different issue with #2072. #2072 fixed the export part only but unfortunately not the import part. The import still having memory issue after fixing the below line.
https://github.com/pulp/pulpcore/pull/1782/files#diff-06badc442eb525dc6f2de7f4e19a7762ce20a972b79b6a90cf1ee80211053ab5L62-R62
I had created a bugzilla for it:
https://bugzilla.redhat.com/show_bug.cgi?id=2061224 | 2022-05-06T17:13:38 |
|
pulp/pulpcore | 2,686 | pulp__pulpcore-2686 | [
"2307"
] | 157842021ea51613f052276caaef26ecabf60bda | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -10,7 +10,8 @@
from django.core.files.storage import default_storage
from django.db.models import F
-
+from naya.json import stream_array, tokenize
+from io import StringIO
from pkg_resources import DistributionNotFound, get_distribution
from rest_framework.serializers import ValidationError
from tablib import Dataset
@@ -43,6 +44,8 @@
CA_FILE = "pulpcore.app.modelresource.ContentArtifactResource.json"
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# How many entities from an import-file should be processed at one time
+IMPORT_BATCH_SIZE = 100
# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
# make before we decide this is a fatal error?
@@ -58,22 +61,52 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
+def _impfile_iterator(fd):
+ """
+ Iterate over an import-file returning batches of rows as a json-array-string.
+
+ We use naya.json.stream_array() to get individual rows; once a batch is gathered,
+ we yield the result of json.dumps() for that batch. Repeat until all rows have been
+ called for.
+ """
+ eof = False
+ batch = []
+ rows = stream_array(tokenize(fd))
+ while not eof:
+ try:
+ while len(batch) < IMPORT_BATCH_SIZE:
+ batch.append(next(rows))
+ except StopIteration:
+ eof = True
+ yield json.dumps(batch)
+ batch.clear()
+
+
def _import_file(fpath, resource_class, retry=False):
+ """
+ Import the specified resource-file in batches to limit memory-use.
+
+ We process resource-files one "batch" at a time. Because of the way django-import's
+ internals work, we have to feed it batches as StringIO-streams of json-formatted strings.
+ The file-to-json-to-string-to-import is overhead, but it lets us put an upper bound on the
+ number of entities in memory at any one time at import-time.
+ """
try:
- log.info(_("Importing file {}.").format(fpath))
+ log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
- data = Dataset().load(json_file, format="json")
resource = resource_class()
- log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- if retry:
- curr_attempt = 1
- while curr_attempt < MAX_ATTEMPTS:
- curr_attempt += 1
+ log.info("...Importing resource {resource.__class__.__name__}.")
+ # Load one batch-sized chunk of the specified import-file at a time. If requested,
+ # retry a batch if it looks like we collided with some other repo being imported with
+ # overlapping content.
+ for batch_str in _impfile_iterator(json_file):
+ data = Dataset().load(StringIO(batch_str))
+ if retry:
# django import-export can have a problem with concurrent-imports that are
# importing the same 'thing' (e.g., a Package that exists in two different
# repo-versions that are being imported at the same time). If we're asked to
# retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll
+ # (rather than failing with an exception) first. If errors happen, we'll do one
# retry before we give up on this repo-version's import.
a_result = resource.import_data(data, raise_errors=False)
if a_result.has_errors():
@@ -82,16 +115,16 @@ def _import_file(fpath, resource_class, retry=False):
f"...{total_errors} import-errors encountered importing "
"{fpath}, attempt {curr_attempt}, retrying"
)
- # Last attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
+ else:
a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
- else:
- a_result = resource.import_data(data, raise_errors=True)
- return a_result
+ yield a_result
except AttributeError:
log.error(f"FAILURE loading import-file {fpath}!")
raise
@@ -194,15 +227,19 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
+ content_count = 0
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
- resulting_content_ids.extend(
- row.object_id for row in a_result.rows if row.import_type in ("new", "update")
- )
+ for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
+ content_count += len(a_result.rows)
+ resulting_content_ids.extend(
+ row.object_id for row in a_result.rows if row.import_type in ("new", "update")
+ )
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource, retry=True)
+ # We don't do anything with the imported batches, we just need to get them imported
+ for a_batch in _import_file(ca_path, ContentArtifactResource, retry=True):
+ pass
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
@@ -399,20 +436,24 @@ def validate_and_assemble(toc_filename):
_check_versions(version_json)
# Artifacts
- ar_result = _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource)
data = dict(
- message="Importing Artifacts", code="import.artifacts", total=len(ar_result.rows)
+ message="Importing Artifacts",
+ code="import.artifacts",
)
with ProgressReport(**data) as pb:
- for row in pb.iter(ar_result.rows):
- artifact = Artifact.objects.get(pk=row.object_id)
- base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
- src = os.path.join(temp_dir, base_path)
-
- if not default_storage.exists(base_path):
- with open(src, "rb") as f:
- default_storage.save(base_path, f)
-
+ # Import artifacts, and place their binary blobs, one batch at a time.
+ # Skip artifacts that already exist in storage.
+ for ar_result in _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource):
+ for row in pb.iter(ar_result.rows):
+ artifact = Artifact.objects.get(pk=row.object_id)
+ base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
+ src = os.path.join(temp_dir, base_path)
+
+ if not default_storage.exists(base_path):
+ with open(src, "rb") as f:
+ default_storage.save(base_path, f)
+
+ # Now import repositories, in parallel.
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -438,7 +479,7 @@ def validate_and_assemble(toc_filename):
dispatch(
import_repository_version,
exclusive_resources=[dest_repo],
- args=[importer.pk, dest_repo.pk, src_repo["name"], path],
+ args=(importer.pk, dest_repo.pk, src_repo["name"], path),
task_group=task_group,
)
| Memory error when importing large repository
**Description of problem:**
Getting MemoryError when importing large repository, such as rhel-7-server-rpms repo.
The PackageResource.json file of the rhel-7-server-rpms repo is about 5.5GB. Pulp uses "json.load" method to decode the json file. When decoding the json, it will use up to 18GB+ memory and returns a 5.5GB python dictionary. If the system doesn't have enough memory, the json decode will fail with MemoryError.
hammer content-import version --path /var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/ --organization-id 1
[................................................ ] [24%]
Error: 1 subtask(s) failed for task group /pulp/api/v3/task-groups/ebc7514a-f606-4965-a599-eab74101e9b0/.
**/var/log/messages**
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: Task d5f26164-087a-4acb-aca1-7aed30851040 failed ()
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 317, in _perform_task
pulpcore-worker-2: result = func(*args, **kwargs)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 161, in import_repository_version
pulpcore-worker-2: a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 62, in _import_file
pulpcore-worker-2: data = Dataset().load(json_file.read(), format="json")
pulpcore-worker-2: File "/usr/lib64/python3.6/codecs.py", line 321, in decode
pulpcore-worker-2: (result, consumed) = self._buffer_decode(data, self.errors, final)
**Output of top command**
16011 pulp 20 0 5924132 2.5g 2496 D 25.0 16.7 21:43.78 pulpcore-worker
16011 pulp 20 0 5924132 2.8g 2496 D 6.2 18.7 21:44.76 pulpcore-worker
16011 pulp 20 0 5924132 3.1g 2496 D 12.5 20.7 21:45.55 pulpcore-worker
16011 pulp 20 0 5924132 3.5g 2496 D 12.5 23.2 21:46.38 pulpcore-worker
16011 pulp 20 0 5924132 3.9g 2496 R 25.0 25.7 21:47.39 pulpcore-worker
16011 pulp 20 0 5924132 4.2g 2496 D 12.5 27.7 21:48.38 pulpcore-worker
16011 pulp 20 0 5924132 4.5g 2496 R 66.7 29.5 21:49.35 pulpcore-worker
16011 pulp 20 0 5924132 4.9g 2496 D 18.8 32.2 21:50.58 pulpcore-worker
16011 pulp 20 0 5924132 5.3g 2496 R 26.7 34.7 21:51.53 pulpcore-worker
**Output of free command**
total used free shared buff/cache available
Mem: 15879256 10213472 167648 193168 5498136 5135336
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10516964 162816 193168 5199476 4832256
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10774956 179184 193168 4925116 4573800
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11009476 168300 193168 4701480 4339684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11354424 179644 193168 4345188 3994460
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11653416 154880 193168 4070960 3695840
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11960864 150900 193168 3767492 3388684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12341972 150652 193168 3386632 3007040
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12741088 157716 193168 2980452 2608400
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13012212 159016 193168 2708028 2337108
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13309296 171804 193168 2398156 2039872
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13726688 180136 193168 1972432 1622400
total used free shared buff/cache available
Mem: 15879256 14151480 169480 193168 1558296 1197956
Swap: 8060924 352 8060572
**Test to decode the PackageResource.json file in the python console.**
```
python3
>>> fh = open("/var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/repository-Default_Organization-Red_Hat_Enterprise_Linux_Server-Red_Hat_Enterprise_Linux_7_Server_RPMs_x86_64_7Server_13/pulp_rpm.app.modelresource.PackageResource.json")
>>> import json
>>> json.load(fh)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.6/json/__init__.py", line 296, in load
return loads(fp.read(),
File "/usr/lib64/python3.6/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
MemoryError <===========
```
```
top -o %MEM
top - 11:50:42 up 1 day, 14:42, 3 users, load average: 0.49, 0.15, 0.09
Tasks: 134 total, 3 running, 131 sleeping, 0 stopped, 0 zombie
%Cpu0 : 0.0 us, 3.3 sy, 0.0 ni, 96.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu1 : 0.0 us, 3.7 sy, 0.0 ni, 96.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu2 : 0.0 us, 7.0 sy, 0.0 ni, 93.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu3 : 31.8 us, 68.2 sy, 0.0 ni, 0.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 99.2/19906644 [||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||]
KiB Swap: 1.0/8060924 [| ]
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
10034 root 20 0 20.8g 18.1g 3268 R 100.0 95.4 1:23.00 python3 <==========
```
**Steps to Reproduce:**
1. Prepare a disconnected Satellite with only 15GB RAM to easily reproduce the issue
2. Import a content view with only the rhel-7-server-rpms repo to the disconnected Satellite
Actual results:
Failed with memory error
Expected results:
No error and should consume only reasonable amount of memory.
**Research and Proposing potential solutions:**
1) Split the resource.json file into a number of chunk files when exporting the contents.
2) Or still have one large resource.json (non-standard json file). Each chunk is split into newline so that import can read them line by line.
3) Or use a non-standard json library, such as ijson to read the json file in chunk to save memory.
https://bugzilla.redhat.com/show_bug.cgi?id=2061224
| Is this issue distinct from https://github.com/pulp/pulpcore/issues/2072?
What version are you seeing this on? Is there a BZ that can be attached?
@dralley: Yes. This is different issue with #2072. #2072 fixed the export part only but unfortunately not the import part. The import still having memory issue after fixing the below line.
https://github.com/pulp/pulpcore/pull/1782/files#diff-06badc442eb525dc6f2de7f4e19a7762ce20a972b79b6a90cf1ee80211053ab5L62-R62
I had created a bugzilla for it:
https://bugzilla.redhat.com/show_bug.cgi?id=2061224 | 2022-05-06T17:15:18 |
|
pulp/pulpcore | 2,687 | pulp__pulpcore-2687 | [
"2307"
] | c01c61f550244dfe85960e99b720c52b2c07dd56 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -10,7 +10,8 @@
from django.core.files.storage import default_storage
from django.db.models import F
-
+from naya.json import stream_array, tokenize
+from io import StringIO
from pkg_resources import DistributionNotFound, get_distribution
from rest_framework.serializers import ValidationError
from tablib import Dataset
@@ -43,6 +44,8 @@
CA_FILE = "pulpcore.app.modelresource.ContentArtifactResource.json"
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# How many entities from an import-file should be processed at one time
+IMPORT_BATCH_SIZE = 100
# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
# make before we decide this is a fatal error?
@@ -58,22 +61,52 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
+def _impfile_iterator(fd):
+ """
+ Iterate over an import-file returning batches of rows as a json-array-string.
+
+ We use naya.json.stream_array() to get individual rows; once a batch is gathered,
+ we yield the result of json.dumps() for that batch. Repeat until all rows have been
+ called for.
+ """
+ eof = False
+ batch = []
+ rows = stream_array(tokenize(fd))
+ while not eof:
+ try:
+ while len(batch) < IMPORT_BATCH_SIZE:
+ batch.append(next(rows))
+ except StopIteration:
+ eof = True
+ yield json.dumps(batch)
+ batch.clear()
+
+
def _import_file(fpath, resource_class, retry=False):
+ """
+ Import the specified resource-file in batches to limit memory-use.
+
+ We process resource-files one "batch" at a time. Because of the way django-import's
+ internals work, we have to feed it batches as StringIO-streams of json-formatted strings.
+ The file-to-json-to-string-to-import is overhead, but it lets us put an upper bound on the
+ number of entities in memory at any one time at import-time.
+ """
try:
- log.info(_("Importing file {}.").format(fpath))
+ log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
- data = Dataset().load(json_file, format="json")
resource = resource_class()
- log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- if retry:
- curr_attempt = 1
- while curr_attempt < MAX_ATTEMPTS:
- curr_attempt += 1
+ log.info("...Importing resource {resource.__class__.__name__}.")
+ # Load one batch-sized chunk of the specified import-file at a time. If requested,
+ # retry a batch if it looks like we collided with some other repo being imported with
+ # overlapping content.
+ for batch_str in _impfile_iterator(json_file):
+ data = Dataset().load(StringIO(batch_str))
+ if retry:
# django import-export can have a problem with concurrent-imports that are
# importing the same 'thing' (e.g., a Package that exists in two different
# repo-versions that are being imported at the same time). If we're asked to
# retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll
+ # (rather than failing with an exception) first. If errors happen, we'll do one
# retry before we give up on this repo-version's import.
a_result = resource.import_data(data, raise_errors=False)
if a_result.has_errors():
@@ -82,16 +115,16 @@ def _import_file(fpath, resource_class, retry=False):
f"...{total_errors} import-errors encountered importing "
"{fpath}, attempt {curr_attempt}, retrying"
)
- # Last attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
+ else:
a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
- else:
- a_result = resource.import_data(data, raise_errors=True)
- return a_result
+ yield a_result
except AttributeError:
log.error(f"FAILURE loading import-file {fpath}!")
raise
@@ -194,15 +227,19 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
+ content_count = 0
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
- resulting_content_ids.extend(
- row.object_id for row in a_result.rows if row.import_type in ("new", "update")
- )
+ for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
+ content_count += len(a_result.rows)
+ resulting_content_ids.extend(
+ row.object_id for row in a_result.rows if row.import_type in ("new", "update")
+ )
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource, retry=True)
+ # We don't do anything with the imported batches, we just need to get them imported
+ for a_batch in _import_file(ca_path, ContentArtifactResource, retry=True):
+ pass
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
@@ -399,20 +436,24 @@ def validate_and_assemble(toc_filename):
_check_versions(version_json)
# Artifacts
- ar_result = _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource)
data = dict(
- message="Importing Artifacts", code="import.artifacts", total=len(ar_result.rows)
+ message="Importing Artifacts",
+ code="import.artifacts",
)
with ProgressReport(**data) as pb:
- for row in pb.iter(ar_result.rows):
- artifact = Artifact.objects.get(pk=row.object_id)
- base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
- src = os.path.join(temp_dir, base_path)
-
- if not default_storage.exists(base_path):
- with open(src, "rb") as f:
- default_storage.save(base_path, f)
-
+ # Import artifacts, and place their binary blobs, one batch at a time.
+ # Skip artifacts that already exist in storage.
+ for ar_result in _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource):
+ for row in pb.iter(ar_result.rows):
+ artifact = Artifact.objects.get(pk=row.object_id)
+ base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
+ src = os.path.join(temp_dir, base_path)
+
+ if not default_storage.exists(base_path):
+ with open(src, "rb") as f:
+ default_storage.save(base_path, f)
+
+ # Now import repositories, in parallel.
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -438,7 +479,7 @@ def validate_and_assemble(toc_filename):
dispatch(
import_repository_version,
exclusive_resources=[dest_repo],
- args=[importer.pk, dest_repo.pk, src_repo["name"], path],
+ args=(importer.pk, dest_repo.pk, src_repo["name"], path),
task_group=task_group,
)
| Memory error when importing large repository
**Description of problem:**
Getting MemoryError when importing large repository, such as rhel-7-server-rpms repo.
The PackageResource.json file of the rhel-7-server-rpms repo is about 5.5GB. Pulp uses "json.load" method to decode the json file. When decoding the json, it will use up to 18GB+ memory and returns a 5.5GB python dictionary. If the system doesn't have enough memory, the json decode will fail with MemoryError.
hammer content-import version --path /var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/ --organization-id 1
[................................................ ] [24%]
Error: 1 subtask(s) failed for task group /pulp/api/v3/task-groups/ebc7514a-f606-4965-a599-eab74101e9b0/.
**/var/log/messages**
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: Task d5f26164-087a-4acb-aca1-7aed30851040 failed ()
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 317, in _perform_task
pulpcore-worker-2: result = func(*args, **kwargs)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 161, in import_repository_version
pulpcore-worker-2: a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 62, in _import_file
pulpcore-worker-2: data = Dataset().load(json_file.read(), format="json")
pulpcore-worker-2: File "/usr/lib64/python3.6/codecs.py", line 321, in decode
pulpcore-worker-2: (result, consumed) = self._buffer_decode(data, self.errors, final)
**Output of top command**
16011 pulp 20 0 5924132 2.5g 2496 D 25.0 16.7 21:43.78 pulpcore-worker
16011 pulp 20 0 5924132 2.8g 2496 D 6.2 18.7 21:44.76 pulpcore-worker
16011 pulp 20 0 5924132 3.1g 2496 D 12.5 20.7 21:45.55 pulpcore-worker
16011 pulp 20 0 5924132 3.5g 2496 D 12.5 23.2 21:46.38 pulpcore-worker
16011 pulp 20 0 5924132 3.9g 2496 R 25.0 25.7 21:47.39 pulpcore-worker
16011 pulp 20 0 5924132 4.2g 2496 D 12.5 27.7 21:48.38 pulpcore-worker
16011 pulp 20 0 5924132 4.5g 2496 R 66.7 29.5 21:49.35 pulpcore-worker
16011 pulp 20 0 5924132 4.9g 2496 D 18.8 32.2 21:50.58 pulpcore-worker
16011 pulp 20 0 5924132 5.3g 2496 R 26.7 34.7 21:51.53 pulpcore-worker
**Output of free command**
total used free shared buff/cache available
Mem: 15879256 10213472 167648 193168 5498136 5135336
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10516964 162816 193168 5199476 4832256
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10774956 179184 193168 4925116 4573800
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11009476 168300 193168 4701480 4339684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11354424 179644 193168 4345188 3994460
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11653416 154880 193168 4070960 3695840
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11960864 150900 193168 3767492 3388684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12341972 150652 193168 3386632 3007040
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12741088 157716 193168 2980452 2608400
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13012212 159016 193168 2708028 2337108
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13309296 171804 193168 2398156 2039872
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13726688 180136 193168 1972432 1622400
total used free shared buff/cache available
Mem: 15879256 14151480 169480 193168 1558296 1197956
Swap: 8060924 352 8060572
**Test to decode the PackageResource.json file in the python console.**
```
python3
>>> fh = open("/var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/repository-Default_Organization-Red_Hat_Enterprise_Linux_Server-Red_Hat_Enterprise_Linux_7_Server_RPMs_x86_64_7Server_13/pulp_rpm.app.modelresource.PackageResource.json")
>>> import json
>>> json.load(fh)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.6/json/__init__.py", line 296, in load
return loads(fp.read(),
File "/usr/lib64/python3.6/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
MemoryError <===========
```
```
top -o %MEM
top - 11:50:42 up 1 day, 14:42, 3 users, load average: 0.49, 0.15, 0.09
Tasks: 134 total, 3 running, 131 sleeping, 0 stopped, 0 zombie
%Cpu0 : 0.0 us, 3.3 sy, 0.0 ni, 96.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu1 : 0.0 us, 3.7 sy, 0.0 ni, 96.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu2 : 0.0 us, 7.0 sy, 0.0 ni, 93.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu3 : 31.8 us, 68.2 sy, 0.0 ni, 0.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 99.2/19906644 [||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||]
KiB Swap: 1.0/8060924 [| ]
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
10034 root 20 0 20.8g 18.1g 3268 R 100.0 95.4 1:23.00 python3 <==========
```
**Steps to Reproduce:**
1. Prepare a disconnected Satellite with only 15GB RAM to easily reproduce the issue
2. Import a content view with only the rhel-7-server-rpms repo to the disconnected Satellite
Actual results:
Failed with memory error
Expected results:
No error and should consume only reasonable amount of memory.
**Research and Proposing potential solutions:**
1) Split the resource.json file into a number of chunk files when exporting the contents.
2) Or still have one large resource.json (non-standard json file). Each chunk is split into newline so that import can read them line by line.
3) Or use a non-standard json library, such as ijson to read the json file in chunk to save memory.
https://bugzilla.redhat.com/show_bug.cgi?id=2061224
| Is this issue distinct from https://github.com/pulp/pulpcore/issues/2072?
What version are you seeing this on? Is there a BZ that can be attached?
@dralley: Yes. This is different issue with #2072. #2072 fixed the export part only but unfortunately not the import part. The import still having memory issue after fixing the below line.
https://github.com/pulp/pulpcore/pull/1782/files#diff-06badc442eb525dc6f2de7f4e19a7762ce20a972b79b6a90cf1ee80211053ab5L62-R62
I had created a bugzilla for it:
https://bugzilla.redhat.com/show_bug.cgi?id=2061224 | 2022-05-06T17:19:15 |
|
pulp/pulpcore | 2,688 | pulp__pulpcore-2688 | [
"2307"
] | cd4cdfba54af840d057e3eaa9f11e56235568328 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -10,7 +10,8 @@
from django.core.files.storage import default_storage
from django.db.models import F
-
+from naya.json import stream_array, tokenize
+from io import StringIO
from pkg_resources import DistributionNotFound, get_distribution
from rest_framework.serializers import ValidationError
from tablib import Dataset
@@ -43,6 +44,8 @@
CA_FILE = "pulpcore.app.modelresource.ContentArtifactResource.json"
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# How many entities from an import-file should be processed at one time
+IMPORT_BATCH_SIZE = 100
# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
# make before we decide this is a fatal error?
@@ -58,22 +61,52 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
+def _impfile_iterator(fd):
+ """
+ Iterate over an import-file returning batches of rows as a json-array-string.
+
+ We use naya.json.stream_array() to get individual rows; once a batch is gathered,
+ we yield the result of json.dumps() for that batch. Repeat until all rows have been
+ called for.
+ """
+ eof = False
+ batch = []
+ rows = stream_array(tokenize(fd))
+ while not eof:
+ try:
+ while len(batch) < IMPORT_BATCH_SIZE:
+ batch.append(next(rows))
+ except StopIteration:
+ eof = True
+ yield json.dumps(batch)
+ batch.clear()
+
+
def _import_file(fpath, resource_class, retry=False):
+ """
+ Import the specified resource-file in batches to limit memory-use.
+
+ We process resource-files one "batch" at a time. Because of the way django-import's
+ internals work, we have to feed it batches as StringIO-streams of json-formatted strings.
+ The file-to-json-to-string-to-import is overhead, but it lets us put an upper bound on the
+ number of entities in memory at any one time at import-time.
+ """
try:
- log.info(_("Importing file {}.").format(fpath))
+ log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
- data = Dataset().load(json_file, format="json")
resource = resource_class()
- log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- if retry:
- curr_attempt = 1
- while curr_attempt < MAX_ATTEMPTS:
- curr_attempt += 1
+ log.info("...Importing resource {resource.__class__.__name__}.")
+ # Load one batch-sized chunk of the specified import-file at a time. If requested,
+ # retry a batch if it looks like we collided with some other repo being imported with
+ # overlapping content.
+ for batch_str in _impfile_iterator(json_file):
+ data = Dataset().load(StringIO(batch_str))
+ if retry:
# django import-export can have a problem with concurrent-imports that are
# importing the same 'thing' (e.g., a Package that exists in two different
# repo-versions that are being imported at the same time). If we're asked to
# retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll
+ # (rather than failing with an exception) first. If errors happen, we'll do one
# retry before we give up on this repo-version's import.
a_result = resource.import_data(data, raise_errors=False)
if a_result.has_errors():
@@ -82,16 +115,16 @@ def _import_file(fpath, resource_class, retry=False):
f"...{total_errors} import-errors encountered importing "
"{fpath}, attempt {curr_attempt}, retrying"
)
- # Last attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
+ else:
a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
- else:
- a_result = resource.import_data(data, raise_errors=True)
- return a_result
+ yield a_result
except AttributeError:
log.error(f"FAILURE loading import-file {fpath}!")
raise
@@ -194,15 +227,19 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
+ content_count = 0
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
- resulting_content_ids.extend(
- row.object_id for row in a_result.rows if row.import_type in ("new", "update")
- )
+ for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
+ content_count += len(a_result.rows)
+ resulting_content_ids.extend(
+ row.object_id for row in a_result.rows if row.import_type in ("new", "update")
+ )
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource, retry=True)
+ # We don't do anything with the imported batches, we just need to get them imported
+ for a_batch in _import_file(ca_path, ContentArtifactResource, retry=True):
+ pass
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
@@ -399,20 +436,24 @@ def validate_and_assemble(toc_filename):
_check_versions(version_json)
# Artifacts
- ar_result = _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource)
data = dict(
- message="Importing Artifacts", code="import.artifacts", total=len(ar_result.rows)
+ message="Importing Artifacts",
+ code="import.artifacts",
)
with ProgressReport(**data) as pb:
- for row in pb.iter(ar_result.rows):
- artifact = Artifact.objects.get(pk=row.object_id)
- base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
- src = os.path.join(temp_dir, base_path)
-
- if not default_storage.exists(base_path):
- with open(src, "rb") as f:
- default_storage.save(base_path, f)
-
+ # Import artifacts, and place their binary blobs, one batch at a time.
+ # Skip artifacts that already exist in storage.
+ for ar_result in _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource):
+ for row in pb.iter(ar_result.rows):
+ artifact = Artifact.objects.get(pk=row.object_id)
+ base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
+ src = os.path.join(temp_dir, base_path)
+
+ if not default_storage.exists(base_path):
+ with open(src, "rb") as f:
+ default_storage.save(base_path, f)
+
+ # Now import repositories, in parallel.
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -438,7 +479,7 @@ def validate_and_assemble(toc_filename):
dispatch(
import_repository_version,
exclusive_resources=[dest_repo],
- args=[importer.pk, dest_repo.pk, src_repo["name"], path],
+ args=(importer.pk, dest_repo.pk, src_repo["name"], path),
task_group=task_group,
)
| Memory error when importing large repository
**Description of problem:**
Getting MemoryError when importing large repository, such as rhel-7-server-rpms repo.
The PackageResource.json file of the rhel-7-server-rpms repo is about 5.5GB. Pulp uses "json.load" method to decode the json file. When decoding the json, it will use up to 18GB+ memory and returns a 5.5GB python dictionary. If the system doesn't have enough memory, the json decode will fail with MemoryError.
hammer content-import version --path /var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/ --organization-id 1
[................................................ ] [24%]
Error: 1 subtask(s) failed for task group /pulp/api/v3/task-groups/ebc7514a-f606-4965-a599-eab74101e9b0/.
**/var/log/messages**
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: Task d5f26164-087a-4acb-aca1-7aed30851040 failed ()
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 317, in _perform_task
pulpcore-worker-2: result = func(*args, **kwargs)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 161, in import_repository_version
pulpcore-worker-2: a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 62, in _import_file
pulpcore-worker-2: data = Dataset().load(json_file.read(), format="json")
pulpcore-worker-2: File "/usr/lib64/python3.6/codecs.py", line 321, in decode
pulpcore-worker-2: (result, consumed) = self._buffer_decode(data, self.errors, final)
**Output of top command**
16011 pulp 20 0 5924132 2.5g 2496 D 25.0 16.7 21:43.78 pulpcore-worker
16011 pulp 20 0 5924132 2.8g 2496 D 6.2 18.7 21:44.76 pulpcore-worker
16011 pulp 20 0 5924132 3.1g 2496 D 12.5 20.7 21:45.55 pulpcore-worker
16011 pulp 20 0 5924132 3.5g 2496 D 12.5 23.2 21:46.38 pulpcore-worker
16011 pulp 20 0 5924132 3.9g 2496 R 25.0 25.7 21:47.39 pulpcore-worker
16011 pulp 20 0 5924132 4.2g 2496 D 12.5 27.7 21:48.38 pulpcore-worker
16011 pulp 20 0 5924132 4.5g 2496 R 66.7 29.5 21:49.35 pulpcore-worker
16011 pulp 20 0 5924132 4.9g 2496 D 18.8 32.2 21:50.58 pulpcore-worker
16011 pulp 20 0 5924132 5.3g 2496 R 26.7 34.7 21:51.53 pulpcore-worker
**Output of free command**
total used free shared buff/cache available
Mem: 15879256 10213472 167648 193168 5498136 5135336
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10516964 162816 193168 5199476 4832256
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10774956 179184 193168 4925116 4573800
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11009476 168300 193168 4701480 4339684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11354424 179644 193168 4345188 3994460
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11653416 154880 193168 4070960 3695840
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11960864 150900 193168 3767492 3388684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12341972 150652 193168 3386632 3007040
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12741088 157716 193168 2980452 2608400
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13012212 159016 193168 2708028 2337108
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13309296 171804 193168 2398156 2039872
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13726688 180136 193168 1972432 1622400
total used free shared buff/cache available
Mem: 15879256 14151480 169480 193168 1558296 1197956
Swap: 8060924 352 8060572
**Test to decode the PackageResource.json file in the python console.**
```
python3
>>> fh = open("/var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/repository-Default_Organization-Red_Hat_Enterprise_Linux_Server-Red_Hat_Enterprise_Linux_7_Server_RPMs_x86_64_7Server_13/pulp_rpm.app.modelresource.PackageResource.json")
>>> import json
>>> json.load(fh)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.6/json/__init__.py", line 296, in load
return loads(fp.read(),
File "/usr/lib64/python3.6/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
MemoryError <===========
```
```
top -o %MEM
top - 11:50:42 up 1 day, 14:42, 3 users, load average: 0.49, 0.15, 0.09
Tasks: 134 total, 3 running, 131 sleeping, 0 stopped, 0 zombie
%Cpu0 : 0.0 us, 3.3 sy, 0.0 ni, 96.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu1 : 0.0 us, 3.7 sy, 0.0 ni, 96.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu2 : 0.0 us, 7.0 sy, 0.0 ni, 93.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu3 : 31.8 us, 68.2 sy, 0.0 ni, 0.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 99.2/19906644 [||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||]
KiB Swap: 1.0/8060924 [| ]
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
10034 root 20 0 20.8g 18.1g 3268 R 100.0 95.4 1:23.00 python3 <==========
```
**Steps to Reproduce:**
1. Prepare a disconnected Satellite with only 15GB RAM to easily reproduce the issue
2. Import a content view with only the rhel-7-server-rpms repo to the disconnected Satellite
Actual results:
Failed with memory error
Expected results:
No error and should consume only reasonable amount of memory.
**Research and Proposing potential solutions:**
1) Split the resource.json file into a number of chunk files when exporting the contents.
2) Or still have one large resource.json (non-standard json file). Each chunk is split into newline so that import can read them line by line.
3) Or use a non-standard json library, such as ijson to read the json file in chunk to save memory.
https://bugzilla.redhat.com/show_bug.cgi?id=2061224
| Is this issue distinct from https://github.com/pulp/pulpcore/issues/2072?
What version are you seeing this on? Is there a BZ that can be attached?
@dralley: Yes. This is different issue with #2072. #2072 fixed the export part only but unfortunately not the import part. The import still having memory issue after fixing the below line.
https://github.com/pulp/pulpcore/pull/1782/files#diff-06badc442eb525dc6f2de7f4e19a7762ce20a972b79b6a90cf1ee80211053ab5L62-R62
I had created a bugzilla for it:
https://bugzilla.redhat.com/show_bug.cgi?id=2061224 | 2022-05-06T17:22:08 |
|
pulp/pulpcore | 2,689 | pulp__pulpcore-2689 | [
"2307"
] | 56c0e4ee558b2394529a64d8e9ecfa37c6dffb3d | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -8,10 +8,10 @@
from gettext import gettext as _
from logging import getLogger
-from django.conf import settings
from django.core.files.storage import default_storage
from django.db.models import F
-
+from naya.json import stream_array, tokenize
+from io import StringIO
from pkg_resources import DistributionNotFound, get_distribution
from rest_framework.serializers import ValidationError
from tablib import Dataset
@@ -44,6 +44,8 @@
CA_FILE = "pulpcore.app.modelresource.ContentArtifactResource.json"
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# How many entities from an import-file should be processed at one time
+IMPORT_BATCH_SIZE = 100
# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
# make before we decide this is a fatal error?
@@ -59,22 +61,52 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
+def _impfile_iterator(fd):
+ """
+ Iterate over an import-file returning batches of rows as a json-array-string.
+
+ We use naya.json.stream_array() to get individual rows; once a batch is gathered,
+ we yield the result of json.dumps() for that batch. Repeat until all rows have been
+ called for.
+ """
+ eof = False
+ batch = []
+ rows = stream_array(tokenize(fd))
+ while not eof:
+ try:
+ while len(batch) < IMPORT_BATCH_SIZE:
+ batch.append(next(rows))
+ except StopIteration:
+ eof = True
+ yield json.dumps(batch)
+ batch.clear()
+
+
def _import_file(fpath, resource_class, retry=False):
+ """
+ Import the specified resource-file in batches to limit memory-use.
+
+ We process resource-files one "batch" at a time. Because of the way django-import's
+ internals work, we have to feed it batches as StringIO-streams of json-formatted strings.
+ The file-to-json-to-string-to-import is ovehead, but it lets us put an upper bound on the
+ number of entities in memory at any one time at import-time.
+ """
try:
- log.info(_("Importing file {}.").format(fpath))
+ log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
- data = Dataset().load(json_file.read(), format="json")
resource = resource_class()
- log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- if retry:
- curr_attempt = 1
- while curr_attempt < MAX_ATTEMPTS:
- curr_attempt += 1
+ log.info("...Importing resource {resource.__class__.__name__}.")
+ # Load one batch-sized chunk of the specified import-file at a time. If requested,
+ # retry a batch if it looks like we collided with some other repo being imported with
+ # overlapping content.
+ for batch_str in _impfile_iterator(json_file):
+ data = Dataset().load(StringIO(batch_str))
+ if retry:
# django import-export can have a problem with concurrent-imports that are
# importing the same 'thing' (e.g., a Package that exists in two different
# repo-versions that are being imported at the same time). If we're asked to
# retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll
+ # (rather than failing with an exception) first. If errors happen, we'll do one
# retry before we give up on this repo-version's import.
a_result = resource.import_data(data, raise_errors=False)
if a_result.has_errors():
@@ -83,16 +115,16 @@ def _import_file(fpath, resource_class, retry=False):
f"...{total_errors} import-errors encountered importing "
"{fpath}, attempt {curr_attempt}, retrying"
)
- # Last attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
+ else:
a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
- else:
- a_result = resource.import_data(data, raise_errors=True)
- return a_result
+ yield a_result
except AttributeError:
log.error(f"FAILURE loading import-file {fpath}!")
raise
@@ -195,15 +227,19 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
+ content_count = 0
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
- resulting_content_ids.extend(
- row.object_id for row in a_result.rows if row.import_type in ("new", "update")
- )
+ for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
+ content_count += len(a_result.rows)
+ resulting_content_ids.extend(
+ row.object_id for row in a_result.rows if row.import_type in ("new", "update")
+ )
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource, retry=True)
+ # We don't do anything with the imported batches, we just need to get them imported
+ for a_batch in _import_file(ca_path, ContentArtifactResource, retry=True):
+ pass
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
@@ -404,21 +440,24 @@ def validate_and_assemble(toc_filename):
_check_versions(version_json)
# Artifacts
- ar_result = _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource)
data = dict(
- message="Importing Artifacts", code="import.artifacts", total=len(ar_result.rows)
+ message="Importing Artifacts",
+ code="import.artifacts",
)
with ProgressReport(**data) as pb:
- for row in pb.iter(ar_result.rows):
- artifact = Artifact.objects.get(pk=row.object_id)
- base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
- src = os.path.join(temp_dir, base_path)
- dest = os.path.join(settings.MEDIA_ROOT, base_path)
-
- if not default_storage.exists(dest):
- with open(src, "rb") as f:
- default_storage.save(dest, f)
-
+ # Import artifacts, and place their binary blobs, one batch at a time.
+ # Skip artifacts that already exist in storage.
+ for ar_result in _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource):
+ for row in pb.iter(ar_result.rows):
+ artifact = Artifact.objects.get(pk=row.object_id)
+ base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
+ src = os.path.join(temp_dir, base_path)
+
+ if not default_storage.exists(base_path):
+ with open(src, "rb") as f:
+ default_storage.save(base_path, f)
+
+ # Now import repositories, in parallel.
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -444,7 +483,7 @@ def validate_and_assemble(toc_filename):
dispatch(
import_repository_version,
exclusive_resources=[dest_repo],
- args=[importer.pk, dest_repo.pk, src_repo["name"], path],
+ args=(importer.pk, dest_repo.pk, src_repo["name"], path),
task_group=task_group,
)
| Memory error when importing large repository
**Description of problem:**
Getting MemoryError when importing large repository, such as rhel-7-server-rpms repo.
The PackageResource.json file of the rhel-7-server-rpms repo is about 5.5GB. Pulp uses "json.load" method to decode the json file. When decoding the json, it will use up to 18GB+ memory and returns a 5.5GB python dictionary. If the system doesn't have enough memory, the json decode will fail with MemoryError.
hammer content-import version --path /var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/ --organization-id 1
[................................................ ] [24%]
Error: 1 subtask(s) failed for task group /pulp/api/v3/task-groups/ebc7514a-f606-4965-a599-eab74101e9b0/.
**/var/log/messages**
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: Task d5f26164-087a-4acb-aca1-7aed30851040 failed ()
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 317, in _perform_task
pulpcore-worker-2: result = func(*args, **kwargs)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 161, in import_repository_version
pulpcore-worker-2: a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 62, in _import_file
pulpcore-worker-2: data = Dataset().load(json_file.read(), format="json")
pulpcore-worker-2: File "/usr/lib64/python3.6/codecs.py", line 321, in decode
pulpcore-worker-2: (result, consumed) = self._buffer_decode(data, self.errors, final)
**Output of top command**
16011 pulp 20 0 5924132 2.5g 2496 D 25.0 16.7 21:43.78 pulpcore-worker
16011 pulp 20 0 5924132 2.8g 2496 D 6.2 18.7 21:44.76 pulpcore-worker
16011 pulp 20 0 5924132 3.1g 2496 D 12.5 20.7 21:45.55 pulpcore-worker
16011 pulp 20 0 5924132 3.5g 2496 D 12.5 23.2 21:46.38 pulpcore-worker
16011 pulp 20 0 5924132 3.9g 2496 R 25.0 25.7 21:47.39 pulpcore-worker
16011 pulp 20 0 5924132 4.2g 2496 D 12.5 27.7 21:48.38 pulpcore-worker
16011 pulp 20 0 5924132 4.5g 2496 R 66.7 29.5 21:49.35 pulpcore-worker
16011 pulp 20 0 5924132 4.9g 2496 D 18.8 32.2 21:50.58 pulpcore-worker
16011 pulp 20 0 5924132 5.3g 2496 R 26.7 34.7 21:51.53 pulpcore-worker
**Output of free command**
total used free shared buff/cache available
Mem: 15879256 10213472 167648 193168 5498136 5135336
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10516964 162816 193168 5199476 4832256
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10774956 179184 193168 4925116 4573800
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11009476 168300 193168 4701480 4339684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11354424 179644 193168 4345188 3994460
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11653416 154880 193168 4070960 3695840
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11960864 150900 193168 3767492 3388684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12341972 150652 193168 3386632 3007040
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12741088 157716 193168 2980452 2608400
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13012212 159016 193168 2708028 2337108
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13309296 171804 193168 2398156 2039872
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13726688 180136 193168 1972432 1622400
total used free shared buff/cache available
Mem: 15879256 14151480 169480 193168 1558296 1197956
Swap: 8060924 352 8060572
**Test to decode the PackageResource.json file in the python console.**
```
python3
>>> fh = open("/var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/repository-Default_Organization-Red_Hat_Enterprise_Linux_Server-Red_Hat_Enterprise_Linux_7_Server_RPMs_x86_64_7Server_13/pulp_rpm.app.modelresource.PackageResource.json")
>>> import json
>>> json.load(fh)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.6/json/__init__.py", line 296, in load
return loads(fp.read(),
File "/usr/lib64/python3.6/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
MemoryError <===========
```
```
top -o %MEM
top - 11:50:42 up 1 day, 14:42, 3 users, load average: 0.49, 0.15, 0.09
Tasks: 134 total, 3 running, 131 sleeping, 0 stopped, 0 zombie
%Cpu0 : 0.0 us, 3.3 sy, 0.0 ni, 96.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu1 : 0.0 us, 3.7 sy, 0.0 ni, 96.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu2 : 0.0 us, 7.0 sy, 0.0 ni, 93.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu3 : 31.8 us, 68.2 sy, 0.0 ni, 0.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 99.2/19906644 [||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||]
KiB Swap: 1.0/8060924 [| ]
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
10034 root 20 0 20.8g 18.1g 3268 R 100.0 95.4 1:23.00 python3 <==========
```
**Steps to Reproduce:**
1. Prepare a disconnected Satellite with only 15GB RAM to easily reproduce the issue
2. Import a content view with only the rhel-7-server-rpms repo to the disconnected Satellite
Actual results:
Failed with memory error
Expected results:
No error and should consume only reasonable amount of memory.
**Research and Proposing potential solutions:**
1) Split the resource.json file into a number of chunk files when exporting the contents.
2) Or still have one large resource.json (non-standard json file). Each chunk is split into newline so that import can read them line by line.
3) Or use a non-standard json library, such as ijson to read the json file in chunk to save memory.
https://bugzilla.redhat.com/show_bug.cgi?id=2061224
| Is this issue distinct from https://github.com/pulp/pulpcore/issues/2072?
What version are you seeing this on? Is there a BZ that can be attached?
@dralley: Yes. This is different issue with #2072. #2072 fixed the export part only but unfortunately not the import part. The import still having memory issue after fixing the below line.
https://github.com/pulp/pulpcore/pull/1782/files#diff-06badc442eb525dc6f2de7f4e19a7762ce20a972b79b6a90cf1ee80211053ab5L62-R62
I had created a bugzilla for it:
https://bugzilla.redhat.com/show_bug.cgi?id=2061224 | 2022-05-06T17:28:06 |
|
pulp/pulpcore | 2,690 | pulp__pulpcore-2690 | [
"2307"
] | df6787d320735d614f38ec61bdd493461b0e85f5 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -10,7 +10,8 @@
from django.core.files.storage import default_storage
from django.db.models import F
-
+from naya.json import stream_array, tokenize
+from io import StringIO
from pkg_resources import DistributionNotFound, get_distribution
from rest_framework.serializers import ValidationError
from tablib import Dataset
@@ -43,6 +44,8 @@
CA_FILE = "pulpcore.app.modelresource.ContentArtifactResource.json"
VERSIONS_FILE = "versions.json"
CONTENT_MAPPING_FILE = "content_mapping.json"
+# How many entities from an import-file should be processed at one time
+IMPORT_BATCH_SIZE = 100
# Concurrent imports w/ overlapping content can collide - how many attempts are we willing to
# make before we decide this is a fatal error?
@@ -58,22 +61,52 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
+def _impfile_iterator(fd):
+ """
+ Iterate over an import-file returning batches of rows as a json-array-string.
+
+ We use naya.json.stream_array() to get individual rows; once a batch is gathered,
+ we yield the result of json.dumps() for that batch. Repeat until all rows have been
+ called for.
+ """
+ eof = False
+ batch = []
+ rows = stream_array(tokenize(fd))
+ while not eof:
+ try:
+ while len(batch) < IMPORT_BATCH_SIZE:
+ batch.append(next(rows))
+ except StopIteration:
+ eof = True
+ yield json.dumps(batch)
+ batch.clear()
+
+
def _import_file(fpath, resource_class, retry=False):
+ """
+ Import the specified resource-file in batches to limit memory-use.
+
+ We process resource-files one "batch" at a time. Because of the way django-import's
+ internals work, we have to feed it batches as StringIO-streams of json-formatted strings.
+ The file-to-json-to-string-to-import is overhead, but it lets us put an upper bound on the
+ number of entities in memory at any one time at import-time.
+ """
try:
- log.info(_("Importing file {}.").format(fpath))
+ log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
- data = Dataset().load(json_file, format="json")
resource = resource_class()
- log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- if retry:
- curr_attempt = 1
- while curr_attempt < MAX_ATTEMPTS:
- curr_attempt += 1
+ log.info("...Importing resource {resource.__class__.__name__}.")
+ # Load one batch-sized chunk of the specified import-file at a time. If requested,
+ # retry a batch if it looks like we collided with some other repo being imported with
+ # overlapping content.
+ for batch_str in _impfile_iterator(json_file):
+ data = Dataset().load(StringIO(batch_str))
+ if retry:
# django import-export can have a problem with concurrent-imports that are
# importing the same 'thing' (e.g., a Package that exists in two different
# repo-versions that are being imported at the same time). If we're asked to
# retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll
+ # (rather than failing with an exception) first. If errors happen, we'll do one
# retry before we give up on this repo-version's import.
a_result = resource.import_data(data, raise_errors=False)
if a_result.has_errors():
@@ -82,16 +115,16 @@ def _import_file(fpath, resource_class, retry=False):
f"...{total_errors} import-errors encountered importing "
"{fpath}, attempt {curr_attempt}, retrying"
)
- # Last attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
+ else:
a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
- else:
- a_result = resource.import_data(data, raise_errors=True)
- return a_result
+ yield a_result
except AttributeError:
log.error(f"FAILURE loading import-file {fpath}!")
raise
@@ -194,15 +227,19 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
+ content_count = 0
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
- resulting_content_ids.extend(
- row.object_id for row in a_result.rows if row.import_type in ("new", "update")
- )
+ for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
+ content_count += len(a_result.rows)
+ resulting_content_ids.extend(
+ row.object_id for row in a_result.rows if row.import_type in ("new", "update")
+ )
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource, retry=True)
+ # We don't do anything with the imported batches, we just need to get them imported
+ for a_batch in _import_file(ca_path, ContentArtifactResource, retry=True):
+ pass
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
@@ -403,20 +440,24 @@ def validate_and_assemble(toc_filename):
_check_versions(version_json)
# Artifacts
- ar_result = _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource)
data = dict(
- message="Importing Artifacts", code="import.artifacts", total=len(ar_result.rows)
+ message="Importing Artifacts",
+ code="import.artifacts",
)
with ProgressReport(**data) as pb:
- for row in pb.iter(ar_result.rows):
- artifact = Artifact.objects.get(pk=row.object_id)
- base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
- src = os.path.join(temp_dir, base_path)
-
- if not default_storage.exists(base_path):
- with open(src, "rb") as f:
- default_storage.save(base_path, f)
-
+ # Import artifacts, and place their binary blobs, one batch at a time.
+ # Skip artifacts that already exist in storage.
+ for ar_result in _import_file(os.path.join(temp_dir, ARTIFACT_FILE), ArtifactResource):
+ for row in pb.iter(ar_result.rows):
+ artifact = Artifact.objects.get(pk=row.object_id)
+ base_path = os.path.join("artifact", artifact.sha256[0:2], artifact.sha256[2:])
+ src = os.path.join(temp_dir, base_path)
+
+ if not default_storage.exists(base_path):
+ with open(src, "rb") as f:
+ default_storage.save(base_path, f)
+
+ # Now import repositories, in parallel.
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
| Memory error when importing large repository
**Description of problem:**
Getting MemoryError when importing large repository, such as rhel-7-server-rpms repo.
The PackageResource.json file of the rhel-7-server-rpms repo is about 5.5GB. Pulp uses "json.load" method to decode the json file. When decoding the json, it will use up to 18GB+ memory and returns a 5.5GB python dictionary. If the system doesn't have enough memory, the json decode will fail with MemoryError.
hammer content-import version --path /var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/ --organization-id 1
[................................................ ] [24%]
Error: 1 subtask(s) failed for task group /pulp/api/v3/task-groups/ebc7514a-f606-4965-a599-eab74101e9b0/.
**/var/log/messages**
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: Task d5f26164-087a-4acb-aca1-7aed30851040 failed ()
pulpcore-worker-2: pulp [6eda5a34-529f-42b9-909b-fb526c9f35e0]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 317, in _perform_task
pulpcore-worker-2: result = func(*args, **kwargs)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 161, in import_repository_version
pulpcore-worker-2: a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
pulpcore-worker-2: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/importer.py", line 62, in _import_file
pulpcore-worker-2: data = Dataset().load(json_file.read(), format="json")
pulpcore-worker-2: File "/usr/lib64/python3.6/codecs.py", line 321, in decode
pulpcore-worker-2: (result, consumed) = self._buffer_decode(data, self.errors, final)
**Output of top command**
16011 pulp 20 0 5924132 2.5g 2496 D 25.0 16.7 21:43.78 pulpcore-worker
16011 pulp 20 0 5924132 2.8g 2496 D 6.2 18.7 21:44.76 pulpcore-worker
16011 pulp 20 0 5924132 3.1g 2496 D 12.5 20.7 21:45.55 pulpcore-worker
16011 pulp 20 0 5924132 3.5g 2496 D 12.5 23.2 21:46.38 pulpcore-worker
16011 pulp 20 0 5924132 3.9g 2496 R 25.0 25.7 21:47.39 pulpcore-worker
16011 pulp 20 0 5924132 4.2g 2496 D 12.5 27.7 21:48.38 pulpcore-worker
16011 pulp 20 0 5924132 4.5g 2496 R 66.7 29.5 21:49.35 pulpcore-worker
16011 pulp 20 0 5924132 4.9g 2496 D 18.8 32.2 21:50.58 pulpcore-worker
16011 pulp 20 0 5924132 5.3g 2496 R 26.7 34.7 21:51.53 pulpcore-worker
**Output of free command**
total used free shared buff/cache available
Mem: 15879256 10213472 167648 193168 5498136 5135336
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10516964 162816 193168 5199476 4832256
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 10774956 179184 193168 4925116 4573800
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11009476 168300 193168 4701480 4339684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11354424 179644 193168 4345188 3994460
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11653416 154880 193168 4070960 3695840
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 11960864 150900 193168 3767492 3388684
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12341972 150652 193168 3386632 3007040
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 12741088 157716 193168 2980452 2608400
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13012212 159016 193168 2708028 2337108
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13309296 171804 193168 2398156 2039872
Swap: 8060924 352 8060572
total used free shared buff/cache available
Mem: 15879256 13726688 180136 193168 1972432 1622400
total used free shared buff/cache available
Mem: 15879256 14151480 169480 193168 1558296 1197956
Swap: 8060924 352 8060572
**Test to decode the PackageResource.json file in the python console.**
```
python3
>>> fh = open("/var/lib/pulp/imports/Default_Organization/rhel-7-imported/1.0/2022-03-04T17-17-09-11-00/repository-Default_Organization-Red_Hat_Enterprise_Linux_Server-Red_Hat_Enterprise_Linux_7_Server_RPMs_x86_64_7Server_13/pulp_rpm.app.modelresource.PackageResource.json")
>>> import json
>>> json.load(fh)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python3.6/json/__init__.py", line 296, in load
return loads(fp.read(),
File "/usr/lib64/python3.6/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
MemoryError <===========
```
```
top -o %MEM
top - 11:50:42 up 1 day, 14:42, 3 users, load average: 0.49, 0.15, 0.09
Tasks: 134 total, 3 running, 131 sleeping, 0 stopped, 0 zombie
%Cpu0 : 0.0 us, 3.3 sy, 0.0 ni, 96.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu1 : 0.0 us, 3.7 sy, 0.0 ni, 96.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu2 : 0.0 us, 7.0 sy, 0.0 ni, 93.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu3 : 31.8 us, 68.2 sy, 0.0 ni, 0.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 99.2/19906644 [||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||]
KiB Swap: 1.0/8060924 [| ]
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
10034 root 20 0 20.8g 18.1g 3268 R 100.0 95.4 1:23.00 python3 <==========
```
**Steps to Reproduce:**
1. Prepare a disconnected Satellite with only 15GB RAM to easily reproduce the issue
2. Import a content view with only the rhel-7-server-rpms repo to the disconnected Satellite
Actual results:
Failed with memory error
Expected results:
No error and should consume only reasonable amount of memory.
**Research and Proposing potential solutions:**
1) Split the resource.json file into a number of chunk files when exporting the contents.
2) Or still have one large resource.json (non-standard json file). Each chunk is split into newline so that import can read them line by line.
3) Or use a non-standard json library, such as ijson to read the json file in chunk to save memory.
https://bugzilla.redhat.com/show_bug.cgi?id=2061224
| Is this issue distinct from https://github.com/pulp/pulpcore/issues/2072?
What version are you seeing this on? Is there a BZ that can be attached?
@dralley: Yes. This is different issue with #2072. #2072 fixed the export part only but unfortunately not the import part. The import still having memory issue after fixing the below line.
https://github.com/pulp/pulpcore/pull/1782/files#diff-06badc442eb525dc6f2de7f4e19a7762ce20a972b79b6a90cf1ee80211053ab5L62-R62
I had created a bugzilla for it:
https://bugzilla.redhat.com/show_bug.cgi?id=2061224 | 2022-05-06T17:31:29 |
|
pulp/pulpcore | 2,719 | pulp__pulpcore-2719 | [
"2175"
] | 3279dba615441136e99aa27bf83f4c4c9d80626e | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -477,7 +477,9 @@ async def run(self):
if getattr(d_artifact.artifact, checksum_type):
checksum = getattr(d_artifact.artifact, checksum_type)
if checksum in existing_ras_dict:
- d_artifact.url = existing_ras_dict[checksum]["url"]
+ d_artifact.urls = [
+ existing_ras_dict[checksum]["url"]
+ ] + d_artifact.urls
d_artifact.remote = existing_ras_dict[checksum]["remote"]
for d_content in batch:
diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py
--- a/pulpcore/plugin/stages/models.py
+++ b/pulpcore/plugin/stages/models.py
@@ -22,6 +22,8 @@ class DeclarativeArtifact:
:class:`~pulpcore.plugin.models.Artifact` either saved or unsaved. If unsaved, it
may have partial digest information attached to it.
url (str): the url to fetch the :class:`~pulpcore.plugin.models.Artifact` from.
+ urls (List[str]): A list of many possible URLs to fetch the
+ :class:`~pulpcore.plugin.models.Artifact` from.
relative_path (str): the relative_path this :class:`~pulpcore.plugin.models.Artifact`
should be published at for any Publication.
remote (:class:`~pulpcore.plugin.models.Remote`): The remote used to fetch this
@@ -35,19 +37,29 @@ class DeclarativeArtifact:
specified and `artifact` doesn't have a file.
"""
- __slots__ = ("artifact", "url", "relative_path", "remote", "extra_data", "deferred_download")
+ __slots__ = (
+ "artifact",
+ "urls",
+ "relative_path",
+ "remote",
+ "extra_data",
+ "deferred_download",
+ )
def __init__(
self,
artifact=None,
url=None,
+ urls=None,
relative_path=None,
remote=None,
extra_data=None,
deferred_download=False,
):
- if not url:
- raise ValueError(_("DeclarativeArtifact must have a 'url'"))
+ if not (url or urls):
+ raise ValueError(_("DeclarativeArtifact must have a at least one 'url' provided"))
+ if url and urls:
+ raise ValueError(_("DeclarativeArtifact must not provide both 'url' and 'urls'"))
if not relative_path:
raise ValueError(_("DeclarativeArtifact must have a 'relative_path'"))
if not artifact:
@@ -60,12 +72,16 @@ def __init__(
)
)
self.artifact = artifact
- self.url = url
+ self.urls = [url] if url else urls
self.relative_path = relative_path
self.remote = remote
self.extra_data = extra_data or {}
self.deferred_download = deferred_download
+ @property
+ def url(self):
+ return self.urls[0]
+
async def download(self):
"""
Download content and update the associated Artifact.
@@ -84,11 +100,27 @@ async def download(self):
if self.artifact.size:
expected_size = self.artifact.size
validation_kwargs["expected_size"] = expected_size
- downloader = self.remote.get_downloader(url=self.url, **validation_kwargs)
- # Custom downloaders may need extra information to complete the request.
- download_result = await downloader.run(extra_data=self.extra_data)
- self.artifact = Artifact(**download_result.artifact_attributes, file=download_result.path)
- return download_result
+
+ urls = iter(self.urls)
+ url = next(urls)
+
+ while True:
+ downloader = self.remote.get_downloader(url=url, **validation_kwargs)
+ try:
+ # Custom downloaders may need extra information to complete the request.
+ download_result = await downloader.run(extra_data=self.extra_data)
+ except Exception as e:
+ if url := next(urls, None):
+ # If there's more mirrors to try, ignore the error and move on
+ continue
+ else:
+ # There's no more mirrors to try, we need to raise the error instead of
+ # swallowing it
+ raise e
+ self.artifact = Artifact(
+ **download_result.artifact_attributes, file=download_result.path
+ )
+ return download_result
class DeclarativeContent:
| As a plugin writer, I can provide many different possible sources for one DeclarativeArtifact
Instead of providing a single URL for a given DeclarativeArtifact, it should be possible to provide many, and have the sync pipeline will produce RemoteArtifacts accordingly.
Primary use case: mirrorlist support, where a user might expect that Pulp could fall back appropriately to other mirrors if a single mirror fails, rather than raising an error. Currently there is no way to facilitate this inside the sync pipeline.
| 2022-05-11T17:41:28 |
||
pulp/pulpcore | 2,728 | pulp__pulpcore-2728 | [
"2739"
] | 6c2c93b4de1695639782b0ee71c48d7153443a8e | diff --git a/pulpcore/app/role_util.py b/pulpcore/app/role_util.py
--- a/pulpcore/app/role_util.py
+++ b/pulpcore/app/role_util.py
@@ -220,12 +220,17 @@ def get_objects_for_group(group, perms, qs, any_perm=False, accept_global_perms=
def get_users_with_perms_roles(
- obj, with_superusers=False, with_group_users=True, only_with_perms_in=None
+ obj,
+ with_superusers=False,
+ with_group_users=True,
+ only_with_perms_in=None,
+ include_model_permissions=True,
+ for_concrete_model=False,
):
qs = User.objects.none()
if with_superusers:
qs |= User.objects.filter(is_superuser=True)
- ctype = ContentType.objects.get_for_model(obj, for_concrete_model=False)
+ ctype = ContentType.objects.get_for_model(obj, for_concrete_model=for_concrete_model)
perms = Permission.objects.filter(content_type__pk=ctype.id)
if only_with_perms_in:
codenames = [
@@ -234,9 +239,12 @@ def get_users_with_perms_roles(
if len(split_perm) == 1 or split_perm[0] == ctype.app_label
]
perms = perms.filter(codename__in=codenames)
- user_roles = UserRole.objects.filter(role__permissions__in=perms).filter(
- Q(object_id=None) | Q(content_type=ctype, object_id=obj.pk)
- )
+
+ object_query = Q(content_type=ctype, object_id=obj.pk)
+ if include_model_permissions:
+ object_query = Q(object_id=None) | object_query
+
+ user_roles = UserRole.objects.filter(role__permissions__in=perms).filter(object_query)
qs |= User.objects.filter(Exists(user_roles.filter(user=OuterRef("pk"))))
if with_group_users:
group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(
@@ -248,9 +256,14 @@ def get_users_with_perms_roles(
def get_users_with_perms_attached_perms(
- obj, with_superusers=False, with_group_users=True, only_with_perms_in=None
+ obj,
+ with_superusers=False,
+ with_group_users=True,
+ only_with_perms_in=None,
+ include_model_permissions=True,
+ for_concrete_model=False,
):
- ctype = ContentType.objects.get_for_model(obj, for_concrete_model=False)
+ ctype = ContentType.objects.get_for_model(obj, for_concrete_model=for_concrete_model)
perms = Permission.objects.filter(content_type__pk=ctype.id)
if only_with_perms_in:
codenames = [
@@ -259,9 +272,12 @@ def get_users_with_perms_attached_perms(
if len(split_perm) == 1 or split_perm[0] == ctype.app_label
]
perms = perms.filter(codename__in=codenames)
- user_roles = UserRole.objects.filter(role__permissions__in=perms).filter(
- Q(object_id=None) | Q(content_type=ctype, object_id=obj.pk)
- )
+
+ object_query = Q(content_type=ctype, object_id=obj.pk)
+ if include_model_permissions:
+ object_query = Q(object_id=None) | object_query
+
+ user_roles = UserRole.objects.filter(role__permissions__in=perms).filter(object_query)
res = defaultdict(set)
if with_superusers:
for user in User.objects.filter(is_superuser=True):
@@ -271,9 +287,7 @@ def get_users_with_perms_attached_perms(
user_role.role.permissions.filter(pk__in=perms).values_list("codename", flat=True)
)
if with_group_users:
- group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(
- Q(object_id=None) | Q(content_type=ctype, object_id=obj.pk)
- )
+ group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(object_query)
for group_role in group_roles:
for user in group_role.group.user_set.all():
res[user].update(
@@ -284,8 +298,14 @@ def get_users_with_perms_attached_perms(
return {k: list(v) for k, v in res.items()}
-def get_users_with_perms_attached_roles(obj, with_group_users=True, only_with_perms_in=None):
- ctype = ContentType.objects.get_for_model(obj, for_concrete_model=False)
+def get_users_with_perms_attached_roles(
+ obj,
+ with_group_users=True,
+ only_with_perms_in=None,
+ include_model_permissions=True,
+ for_concrete_model=False,
+):
+ ctype = ContentType.objects.get_for_model(obj, for_concrete_model=for_concrete_model)
perms = Permission.objects.filter(content_type__pk=ctype.id)
if only_with_perms_in:
codenames = [
@@ -294,16 +314,17 @@ def get_users_with_perms_attached_roles(obj, with_group_users=True, only_with_pe
if len(split_perm) == 1 or split_perm[0] == ctype.app_label
]
perms = perms.filter(codename__in=codenames)
- user_roles = UserRole.objects.filter(role__permissions__in=perms).filter(
- Q(object_id=None) | Q(content_type=ctype, object_id=obj.pk)
- )
+
+ object_query = Q(content_type=ctype, object_id=obj.pk)
+ if include_model_permissions:
+ object_query = Q(object_id=None) | object_query
+
+ user_roles = UserRole.objects.filter(role__permissions__in=perms).filter(object_query)
res = defaultdict(set)
for user_role in user_roles:
res[user_role.user].update(user_role.role.name)
if with_group_users:
- group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(
- Q(object_id=None) | Q(content_type=ctype, object_id=obj.pk)
- )
+ group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(object_query)
for group_role in group_roles:
for user in group_role.group.user_set.all():
res[user].update(group_role.role.name)
@@ -312,7 +333,13 @@ def get_users_with_perms_attached_roles(obj, with_group_users=True, only_with_pe
# Interface copied from django guardian
def get_users_with_perms(
- obj, attach_perms=False, with_superusers=False, with_group_users=True, only_with_perms_in=None
+ obj,
+ attach_perms=False,
+ with_superusers=False,
+ with_group_users=True,
+ only_with_perms_in=None,
+ include_model_permissions=True,
+ for_concrete_model=False,
):
if attach_perms:
res = defaultdict(set)
@@ -322,6 +349,8 @@ def get_users_with_perms(
with_superusers=with_superusers,
with_group_users=with_group_users,
only_with_perms_in=only_with_perms_in,
+ include_model_permissions=include_model_permissions,
+ for_concrete_model=for_concrete_model,
).items():
res[key].update(value)
return {k: list(v) for k, v in res.items()}
@@ -332,12 +361,16 @@ def get_users_with_perms(
with_superusers=with_superusers,
with_group_users=with_group_users,
only_with_perms_in=only_with_perms_in,
+ include_model_permissions=include_model_permissions,
+ for_concrete_model=for_concrete_model,
)
return qs.distinct()
-def get_groups_with_perms_roles(obj, only_with_perms_in=None):
- ctype = ContentType.objects.get_for_model(obj, for_concrete_model=False)
+def get_groups_with_perms_roles(
+ obj, only_with_perms_in=None, include_model_permissions=True, for_concrete_model=False
+):
+ ctype = ContentType.objects.get_for_model(obj, for_concrete_model=for_concrete_model)
perms = Permission.objects.filter(content_type__pk=ctype.id)
if only_with_perms_in:
codenames = [
@@ -346,15 +379,20 @@ def get_groups_with_perms_roles(obj, only_with_perms_in=None):
if len(split_perm) == 1 or split_perm[0] == ctype.app_label
]
perms = perms.filter(codename__in=codenames)
- group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(
- Q(object_id=None) | Q(content_type=ctype, object_id=obj.pk)
- )
+
+ object_query = Q(content_type=ctype, object_id=obj.pk)
+ if include_model_permissions:
+ object_query = Q(object_id=None) | object_query
+
+ group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(object_query)
qs = Group.objects.filter(Exists(group_roles.filter(group=OuterRef("pk")))).distinct()
return qs
-def get_groups_with_perms_attached_perms(obj, only_with_perms_in=None):
- ctype = ContentType.objects.get_for_model(obj, for_concrete_model=False)
+def get_groups_with_perms_attached_perms(
+ obj, only_with_perms_in=None, include_model_permissions=True, for_concrete_model=False
+):
+ ctype = ContentType.objects.get_for_model(obj, for_concrete_model=for_concrete_model)
perms = Permission.objects.filter(content_type__pk=ctype.id)
if only_with_perms_in:
codenames = [
@@ -363,9 +401,12 @@ def get_groups_with_perms_attached_perms(obj, only_with_perms_in=None):
if len(split_perm) == 1 or split_perm[0] == ctype.app_label
]
perms = perms.filter(codename__in=codenames)
- group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(
- Q(object_id=None) | Q(content_type=ctype, object_id=obj.pk)
- )
+
+ object_query = Q(content_type=ctype, object_id=obj.pk)
+ if include_model_permissions:
+ object_query = Q(object_id=None) | object_query
+
+ group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(object_query)
res = defaultdict(set)
for group_role in group_roles:
res[group_role.group].update(
@@ -374,8 +415,10 @@ def get_groups_with_perms_attached_perms(obj, only_with_perms_in=None):
return {k: list(v) for k, v in res.items()}
-def get_groups_with_perms_attached_roles(obj, only_with_perms_in=None):
- ctype = ContentType.objects.get_for_model(obj, for_concrete_model=False)
+def get_groups_with_perms_attached_roles(
+ obj, only_with_perms_in=None, include_model_permissions=True, for_concrete_model=False
+):
+ ctype = ContentType.objects.get_for_model(obj, for_concrete_model=for_concrete_model)
perms = Permission.objects.filter(content_type__pk=ctype.id)
if only_with_perms_in:
codenames = [
@@ -384,9 +427,12 @@ def get_groups_with_perms_attached_roles(obj, only_with_perms_in=None):
if len(split_perm) == 1 or split_perm[0] == ctype.app_label
]
perms = perms.filter(codename__in=codenames)
- group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(
- Q(object_id=None) | Q(content_type=ctype, object_id=obj.pk)
- )
+
+ object_query = Q(content_type=ctype, object_id=obj.pk)
+ if include_model_permissions:
+ object_query = Q(object_id=None) | object_query
+
+ group_roles = GroupRole.objects.filter(role__permissions__in=perms).filter(object_query)
res = defaultdict(set)
for group_role in group_roles:
res[group_role.group].add(group_role.role.name)
@@ -394,15 +440,28 @@ def get_groups_with_perms_attached_roles(obj, only_with_perms_in=None):
# Interface copied from django guardian
-def get_groups_with_perms(obj, attach_perms=False):
+def get_groups_with_perms(
+ obj,
+ attach_perms=False,
+ include_model_permissions=True,
+ for_concrete_model=False,
+):
if attach_perms:
res = defaultdict(set)
if "pulpcore.backends.ObjectRolePermissionBackend" in settings.AUTHENTICATION_BACKENDS:
- for key, value in get_groups_with_perms_attached_perms(obj).items():
+ for key, value in get_groups_with_perms_attached_perms(
+ obj,
+ include_model_permissions=include_model_permissions,
+ for_concrete_model=for_concrete_model,
+ ).items():
res[key].update(value)
return {k: list(v) for k, v in res.items()}
else:
qs = Group.objects.none()
if "pulpcore.backends.ObjectRolePermissionBackend" in settings.AUTHENTICATION_BACKENDS:
- qs |= get_groups_with_perms_roles(obj)
+ qs |= get_groups_with_perms_roles(
+ obj,
+ include_model_permissions=include_model_permissions,
+ for_concrete_model=for_concrete_model,
+ )
return qs.distinct()
| Make role utils behave like django guardian
**Version**
3.18
**Describe the bug**
`get_[users|groups]_with_perms` doesn't have the same behavior as django guardian. It:
- Includes model permissions along with object permissions
- Doesn't pass through proxy models.
| 2022-05-13T21:28:33 |
||
pulp/pulpcore | 2,759 | pulp__pulpcore-2759 | [
"2758"
] | b8c918e83e1a0912566bd7be55a8e4fb6ec2c6c1 | diff --git a/pulpcore/app/serializers/user.py b/pulpcore/app/serializers/user.py
--- a/pulpcore/app/serializers/user.py
+++ b/pulpcore/app/serializers/user.py
@@ -236,7 +236,10 @@ class UserRoleSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):
)
content_object = ContentObjectField(
- help_text=_("Optional pulp_href of the object the permissions are to be asserted on."),
+ help_text=_(
+ "pulp_href of the object for which role permissions should be asserted. "
+ "If set to 'null', permissions will act on the model-level."
+ ),
source="*",
allow_null=True,
)
@@ -300,7 +303,10 @@ class GroupRoleSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):
)
content_object = ContentObjectField(
- help_text=_("Optional pulp_href of the object the permissions are to be asserted on."),
+ help_text=_(
+ "pulp_href of the object for which role permissions should be asserted. "
+ "If set to 'null', permissions will act on the model-level."
+ ),
source="*",
allow_null=True,
)
| Add a note about passing `null` to `content_object`
| Actually they can. The client needs to explicitly pass `content_object=None` in the POST request body.
i.e. `http <...> content_object:=null`
@pulp/core, please change the name of the issue to "Add a note about passing `null` to `content_object`" or something similar. | 2022-05-23T15:08:28 |
|
pulp/pulpcore | 2,766 | pulp__pulpcore-2766 | [
"2765"
] | e4fa85a8baa28d2386cfc31de5eda2cdcb554d6f | diff --git a/pulpcore/app/serializers/user.py b/pulpcore/app/serializers/user.py
--- a/pulpcore/app/serializers/user.py
+++ b/pulpcore/app/serializers/user.py
@@ -1,3 +1,5 @@
+import typing
+
from gettext import gettext as _
from django.contrib.auth import get_user_model
@@ -239,6 +241,15 @@ class UserRoleSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):
allow_null=True,
)
+ description = serializers.SerializerMethodField()
+ permissions = serializers.SerializerMethodField()
+
+ def get_description(self, obj):
+ return obj.role.description
+
+ def get_permissions(self, obj) -> typing.List[str]:
+ return [f"{p.content_type.app_label}.{p.codename}" for p in obj.role.permissions.all()]
+
def validate(self, data):
data = super().validate(data)
data["user"] = User.objects.get(pk=self.context["request"].resolver_match.kwargs["user_pk"])
@@ -268,7 +279,12 @@ def validate(self, data):
class Meta:
model = UserRole
- fields = ModelSerializer.Meta.fields + ("role", "content_object")
+ fields = ModelSerializer.Meta.fields + (
+ "role",
+ "content_object",
+ "description",
+ "permissions",
+ )
class GroupRoleSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):
@@ -289,6 +305,15 @@ class GroupRoleSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):
allow_null=True,
)
+ description = serializers.SerializerMethodField()
+ permissions = serializers.SerializerMethodField()
+
+ def get_description(self, obj):
+ return obj.role.description
+
+ def get_permissions(self, obj) -> typing.List[str]:
+ return [f"{p.content_type.app_label}.{p.codename}" for p in obj.role.permissions.all()]
+
def validate(self, data):
data = super().validate(data)
data["group"] = Group.objects.get(
@@ -320,7 +345,12 @@ def validate(self, data):
class Meta:
model = GroupRole
- fields = ModelSerializer.Meta.fields + ("role", "content_object")
+ fields = ModelSerializer.Meta.fields + (
+ "role",
+ "content_object",
+ "description",
+ "permissions",
+ )
class NestedRoleSerializer(serializers.Serializer):
diff --git a/pulpcore/app/viewsets/user.py b/pulpcore/app/viewsets/user.py
--- a/pulpcore/app/viewsets/user.py
+++ b/pulpcore/app/viewsets/user.py
@@ -365,6 +365,14 @@ def content_object_filter_function(self, queryset, name, value):
obj_type = ContentType.objects.get_for_model(obj, for_concrete_model=False)
return queryset.filter(content_type_id=obj_type.id, object_id=obj.pk)
+ sort = filters.OrderingFilter(
+ fields=(
+ ("role__name", "role"),
+ ("pulp_created", "pulp_created"),
+ ("role__description", "description"),
+ )
+ )
+
class Meta:
fields = (
"role",
| As a user, I want to see role details for roles that are assigned to users and groups
**Is your feature request related to a problem? Please describe.**
The group and user role lists currently look like this:
```json
{
"pulp_href": "/pulp/api/v3/groups/3/roles/6205b617-b825-4d89-a87f-433795bb6248/",
"pulp_created": "2022-05-24T18:31:33.683333Z",
"role": "_permission:container.add_containernamespace",
"content_object": null,
},
```
The role name is included, but the permissions and description for the role are missing. It would be nice to be able to browse the API and see exactly what permissions a user or group has for each role.
**Describe the solution you'd like**
Add the role description and permissions to the serializer.
```json
{
"pulp_href": "/pulp/api/v3/groups/3/roles/6205b617-b825-4d89-a87f-433795bb6248/",
"pulp_created": "2022-05-24T18:31:33.683333Z",
"role": "_permission:container.add_containernamespace",
"content_object": null,
"description": "Auto generated role for permission container.add_containernamespace.",
"permissions": [
"container.add_containernamespace"
]
},
```
| 2022-05-24T20:55:37 |
||
pulp/pulpcore | 2,767 | pulp__pulpcore-2767 | [
"2068"
] | 98673b911069ff4cc2d4b946d02e7d34320a7621 | diff --git a/pulpcore/app/serializers/repository.py b/pulpcore/app/serializers/repository.py
--- a/pulpcore/app/serializers/repository.py
+++ b/pulpcore/app/serializers/repository.py
@@ -115,10 +115,14 @@ class RemoteSerializer(ModelSerializer):
write_only=True,
)
proxy_password = serializers.CharField(
- help_text="The password to authenticte to the proxy.",
+ help_text=_(
+ "The password to authenticate to the proxy. Extra leading and trailing whitespace "
+ "characters are not trimmed."
+ ),
required=False,
allow_null=True,
write_only=True,
+ trim_whitespace=False,
style={"input_type": "password"},
)
username = serializers.CharField(
@@ -128,10 +132,14 @@ class RemoteSerializer(ModelSerializer):
write_only=True,
)
password = serializers.CharField(
- help_text="The password to be used for authentication when syncing.",
+ help_text=_(
+ "The password to be used for authentication when syncing. Extra leading and trailing "
+ "whitespace characters are not trimmed."
+ ),
required=False,
allow_null=True,
write_only=True,
+ trim_whitespace=False,
style={"input_type": "password"},
)
pulp_last_updated = serializers.DateTimeField(
| Passwords stored within remotes are being truncated
Author: @lubosmj (lmjachky)
Redmine Issue: 9617, https://pulp.plan.io/issues/9617
---
Passwords with trailing whitespace characters are truncated automatically. This feature is not advertised at https://docs.pulpproject.org/pulpcore/restapi.html#operation/remotes_file_file_create or anywhere else.
```
(pulp) [vagrant@pulp3-source-fedora34 ~]$ http :24817/pulp/api/v3/remotes/file/file/ url="https://docker.io" username="lmjachky" password="pasword " name=foo
HTTP/1.1 201 Created
Access-Control-Expose-Headers: Correlation-ID
Allow: GET, POST, HEAD, OPTIONS
Connection: close
Content-Length: 506
Content-Type: application/json
Correlation-ID: 10166cb9b1eb4a9eb341525d413472bf
Date: Wed, 08 Dec 2021 13:57:44 GMT
Location: /pulp/api/v3/remotes/file/file/e0299ca2-80d5-4491-8174-8e468cc696af/
Referrer-Policy: same-origin
Server: gunicorn
Vary: Accept, Cookie
X-Content-Type-Options: nosniff
X-Frame-Options: DENY
{
"ca_cert": null,
"client_cert": null,
"connect_timeout": null,
"download_concurrency": null,
"headers": null,
"max_retries": null,
"name": "foo",
"policy": "immediate",
"proxy_url": null,
"pulp_created": "2021-12-08T13:57:44.659583Z",
"pulp_href": "/pulp/api/v3/remotes/file/file/e0299ca2-80d5-4491-8174-8e468cc696af/",
"pulp_labels": {},
"pulp_last_updated": "2021-12-08T13:57:44.659601Z",
"rate_limit": null,
"sock_connect_timeout": null,
"sock_read_timeout": null,
"tls_validation": true,
"total_timeout": null,
"url": "https://docker.io"
}
(pulp) [vagrant@pulp3-source-fedora34 ~]$ django-admin shell_plus
/usr/local/lib/pulp/lib64/python3.9/site-packages/redis/connection.py:72: UserWarning: redis-py works best with hiredis. Please consider installing
warnings.warn(msg)
1. Shell Plus Model Imports
from django.contrib.admin.models import LogEntry
from pulpcore.app.models.access_policy import AccessPolicy, Group
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.models import Session
from guardian.models.models import GroupObjectPermission, UserObjectPermission
from pulp_container.app.models import Blob, BlobManifest, ContainerDistribution, ContainerNamespace, ContainerPushRepository, ContainerRemote, ContainerRepository, ContentRedirectContentGuard, Manifest, ManifestListManifest, Tag
from pulpcore.app.models.upload import Upload, UploadChunk
from pulp_file.app.models import FileAlternateContentSource, FileContent, FileDistribution, FilePublication, FileRemote, FileRepository
from pulp_ostree.app.models import OstreeCommit, OstreeCommitObject, OstreeConfig, OstreeDistribution, OstreeObject, OstreeRef, OstreeRemote, OstreeRepository, OstreeSummary
from pulp_rpm.app.models.acs import RpmAlternateContentSource
from pulp_rpm.app.models.advisory import UpdateCollection, UpdateCollectionPackage, UpdateRecord, UpdateReference
from pulp_rpm.app.models.comps import PackageCategory, PackageEnvironment, PackageGroup, PackageLangpacks
from pulp_rpm.app.models.custom_metadata import RepoMetadataFile
from pulp_rpm.app.models.distribution import Addon, Checksum, DistributionTree, Image, Variant
from pulp_rpm.app.models.modulemd import Modulemd, ModulemdDefaults
from pulp_rpm.app.models.package import Package
from pulp_rpm.app.models.repository import RpmDistribution, RpmPublication, RpmRemote, RpmRepository, UlnRemote
from pulpcore.app.models.acs import AlternateContentSource, AlternateContentSourcePath
from pulpcore.app.models.base import Label
from pulpcore.app.models.content import Artifact, AsciiArmoredDetachedSigningService, Content, ContentArtifact, PulpTemporaryFile, RemoteArtifact, SigningService
from pulpcore.app.models.exporter import Export, ExportedResource, Exporter, FilesystemExport, FilesystemExporter, PulpExport, PulpExporter
from pulpcore.app.models.importer import Import, Importer, PulpImport, PulpImporter, PulpImporterRepository
from pulpcore.app.models.progress import GroupProgressReport, ProgressReport
from pulpcore.app.models.publication import BaseDistribution, ContentGuard, Distribution, Publication, PublishedArtifact, PublishedMetadata, RBACContentGuard
from pulpcore.app.models.repository import Remote, Repository, RepositoryContent, RepositoryVersion, RepositoryVersionContentDetails
from pulpcore.app.models.role import GroupRole, Role, UserRole
from pulpcore.app.models.status import ContentAppStatus
from pulpcore.app.models.task import CreatedResource, Task, TaskGroup, Worker
1. Shell Plus Django Imports
from django.core.cache import cache
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction
from django.db.models import Avg, Case, Count, F, Max, Min, Prefetch, Q, Sum, When
from django.utils import timezone
from django.urls import reverse
from django.db.models import Exists, OuterRef, Subquery
Python 3.9.7 (default, Aug 30 2021, 00:00:00)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.29.0 -- An enhanced Interactive Python. Type '?' for help.
In [1]: FileRemote.objects.first().password
Out[1]: 'pasword'
```
| This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution! | 2022-05-25T09:44:12 |
|
pulp/pulpcore | 2,768 | pulp__pulpcore-2768 | [
"2052"
] | 83defcf69030683f4a86187d52330191ac779fdc | diff --git a/pulpcore/plugin/actions.py b/pulpcore/plugin/actions.py
--- a/pulpcore/plugin/actions.py
+++ b/pulpcore/plugin/actions.py
@@ -1,4 +1,5 @@
from gettext import gettext as _
+
from drf_spectacular.utils import extend_schema
from rest_framework.decorators import action
from rest_framework.serializers import ValidationError
@@ -48,7 +49,7 @@ def modify(self, request, pk):
existing_content_units = Content.objects.filter(pk__in=content_units_pks)
existing_content_units.touch()
- self.verify_content_units(existing_content_units, add_content_units)
+ raise_for_unknown_content_units(existing_content_units, add_content_units)
add_content_units = list(add_content_units.keys())
@@ -60,7 +61,7 @@ def modify(self, request, pk):
remove_content_units[NamedModelViewSet.extract_pk(url)] = url
content_units_pks = set(remove_content_units.keys())
existing_content_units = Content.objects.filter(pk__in=content_units_pks)
- self.verify_content_units(existing_content_units, remove_content_units)
+ raise_for_unknown_content_units(existing_content_units, remove_content_units)
remove_content_units = list(remove_content_units.keys())
task = dispatch(
@@ -75,14 +76,24 @@ def modify(self, request, pk):
)
return OperationPostponedResponse(task, request)
- def verify_content_units(self, content_units, all_content_units):
- """Verify referenced content units."""
- existing_content_units_pks = content_units.values_list("pk", flat=True)
- existing_content_units_pks = {str(pk) for pk in existing_content_units_pks}
-
- missing_pks = set(all_content_units.keys()) - existing_content_units_pks
- if missing_pks:
- missing_hrefs = [all_content_units[pk] for pk in missing_pks]
- raise ValidationError(
- _("Could not find the following content units: {}").format(missing_hrefs)
- )
+
+def raise_for_unknown_content_units(existing_content_units, content_units_pks_hrefs):
+ """Verify if all the specified content units were found in the database.
+
+ Args:
+ existing_content_units (pulpcore.plugin.models.Content): Content filtered by
+ specified_content_units.
+ content_units_pks_hrefs (dict): An original dictionary of pk-href pairs that
+ are used for the verification.
+ Raises:
+ ValidationError: If some of the referenced content units are not present in the database
+ """
+ existing_content_units_pks = existing_content_units.values_list("pk", flat=True)
+ existing_content_units_pks = set(map(str, existing_content_units_pks))
+
+ missing_pks = set(content_units_pks_hrefs.keys()) - existing_content_units_pks
+ if missing_pks:
+ missing_hrefs = [content_units_pks_hrefs[pk] for pk in missing_pks]
+ raise ValidationError(
+ _("Could not find the following content units: {}").format(missing_hrefs)
+ )
| As a plugin writer, I want to have a function for touching content units
Author: @lubosmj (lmjachky)
Redmine Issue: 9419, https://pulp.plan.io/issues/9419
---
In the PR https://github.com/pulp/pulpcore/pull/1624, we introduced a method that uses `bulk_touch` for updating timestamps of content units. We should expose this method to all plugin writers (e.g., pulp_container currently implements the same method - this creates unnecessary duplicates).
| This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution! | 2022-05-25T13:00:41 |
|
pulp/pulpcore | 2,779 | pulp__pulpcore-2779 | [
"2890"
] | 5c84ab71928aef83a37bc97b5eda7d2a60463807 | diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py
--- a/pulpcore/app/models/upload.py
+++ b/pulpcore/app/models/upload.py
@@ -1,6 +1,8 @@
import hashlib
import os
+from gettext import gettext as _
+
from django.core.files.base import ContentFile
from django.db import models
from django.db.models.signals import post_delete
@@ -26,17 +28,18 @@ def append(self, chunk, offset, sha256=None):
Append a chunk to an upload.
Args:
- chunk (File): Binary file to append to the upload file.
+ chunk (File): Binary data to append to the upload file.
offset (int): First byte position to write chunk to.
"""
- chunk_read = chunk.read()
- current_sha256 = hashlib.sha256(chunk_read).hexdigest()
- if sha256 and sha256 != current_sha256:
- raise serializers.ValidationError("Checksum does not match chunk upload.")
+ chunk = chunk.read()
+ if sha256:
+ current_sha256 = hashlib.sha256(chunk).hexdigest()
+ if sha256 != current_sha256:
+ raise serializers.ValidationError(_("Checksum does not match chunk upload."))
upload_chunk = UploadChunk(upload=self, offset=offset, size=len(chunk))
filename = os.path.basename(upload_chunk.storage_path(""))
- upload_chunk.file.save(filename, ContentFile(chunk_read))
+ upload_chunk.file.save(filename, ContentFile(chunk))
class UploadChunk(BaseModel):
| Append of chunked upload processes raw data
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
| 2022-05-30T10:29:11 |
||
pulp/pulpcore | 2,783 | pulp__pulpcore-2783 | [
"2782"
] | 0425bfe3c7df362aeea3e2e6f638f0f3efc41533 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -262,8 +262,8 @@
#texinfo_show_urls = 'footnote'
extlinks = {
- 'github': ('https://github.com/pulp/pulpcore/issues/%s', '#'),
- 'redmine': ('https://pulp.plan.io/issues/%s', '#'),
+ 'github': ('https://github.com/pulp/pulpcore/issues/%s', '#%s'),
+ 'redmine': ('https://pulp.plan.io/issues/%s', '#%s'),
}
# napoleon uses .. attribute by default, but :ivar: is more succinct and looks better,
| Sphinx 5.0 causes docs not to build
Sphinx 5.0.0 was released on May 29th https://pypi.org/project/Sphinx/#history and emits the warning below:
```
extlinks: Sphinx-6.0 will require a caption string to contain exactly one '%s' and all other '%' need to be escaped as '%%'.
```
This is treated as an error due to the Makefile almost all release branches use, e.g. [here's the one for pulpcore:main](https://github.com/pulp/pulpcore/blob/main/docs/Makefile#L6).
## Solutions
We could...
1) have warnings not be treated as errors
2) Fix the extlinks references to get the warnings to stop
3) Pin Sphinx <5.0.0
I am going to try for (2). (1) Seems like a step backwards, and (2) likely won't work because Sphinx gets included from a variety of places as it's installed in the CI currently.
| 2022-05-31T17:42:54 |
||
pulp/pulpcore | 2,793 | pulp__pulpcore-2793 | [
"2340"
] | d77289cf1c2dad38c628bce5b7905376f4a8942f | diff --git a/pulpcore/app/urls.py b/pulpcore/app/urls.py
--- a/pulpcore/app/urls.py
+++ b/pulpcore/app/urls.py
@@ -10,6 +10,7 @@
SpectacularSwaggerView,
)
from rest_framework_nested import routers
+from rest_framework.routers import APIRootView
from pulpcore.app.apps import pulp_plugin_configs
from pulpcore.app.views import OrphansView, PulpImporterImportCheckView, RepairView, StatusView
@@ -104,6 +105,19 @@ def __repr__(self):
return str(self.viewset)
+class PulpAPIRootView(APIRootView):
+ """A Pulp-defined APIRootView class with no authentication requirements."""
+
+ authentication_classes = []
+ permission_classes = []
+
+
+class PulpDefaultRouter(routers.DefaultRouter):
+ """A DefaultRouter class that benefits from the customized PulpAPIRootView class."""
+
+ APIRootView = PulpAPIRootView
+
+
all_viewsets = []
plugin_patterns = []
# Iterate over each app, including pulpcore and the plugins.
@@ -118,9 +132,6 @@ def __repr__(self):
for viewset in sorted_by_depth:
vs_tree.add_decendent(ViewSetNode(viewset))
-#: The Pulp Platform v3 API router, which can be used to manually register ViewSets with the API.
-root_router = routers.DefaultRouter()
-
urlpatterns = [
path(f"{API_ROOT}repair/", RepairView.as_view()),
path(f"{API_ROOT}status/", StatusView.as_view()),
@@ -184,6 +195,8 @@ def __repr__(self):
)
)
+#: The Pulp Platform v3 API router, which can be used to manually register ViewSets with the API.
+root_router = PulpDefaultRouter()
all_routers = [root_router] + vs_tree.register_with(root_router)
for router in all_routers:
| diff --git a/pulpcore/tests/functional/__init__.py b/pulpcore/tests/functional/__init__.py
--- a/pulpcore/tests/functional/__init__.py
+++ b/pulpcore/tests/functional/__init__.py
@@ -249,6 +249,11 @@ def pulp_api_v3_path(cli_client):
return v3_api_root
[email protected](scope="session")
+def pulp_api_v3_url(pulp_cfg, pulp_api_v3_path):
+ return f"{pulp_cfg.get_base_url()}{pulp_api_v3_path}"
+
+
@pytest.fixture
def random_artifact(random_artifact_factory):
return random_artifact_factory()
diff --git a/pulpcore/tests/functional/api/test_root_endpoint.py b/pulpcore/tests/functional/api/test_root_endpoint.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/functional/api/test_root_endpoint.py
@@ -0,0 +1,10 @@
+import asyncio
+import pytest
+
+from pulpcore.tests.functional.utils import get_response
+
+
[email protected]
+def test_anonymous_access_to_root(pulp_api_v3_url):
+ response = asyncio.run(get_response(pulp_api_v3_url))
+ assert response.ok
diff --git a/pulpcore/tests/functional/utils.py b/pulpcore/tests/functional/utils.py
--- a/pulpcore/tests/functional/utils.py
+++ b/pulpcore/tests/functional/utils.py
@@ -1,4 +1,6 @@
"""Utilities for Pulpcore tests."""
+import aiohttp
+
from functools import partial
from unittest import SkipTest
@@ -7,6 +9,11 @@
from pulpcore.client.pulpcore import ApiClient
+async def get_response(url):
+ async with aiohttp.ClientSession() as session:
+ return await session.get(url)
+
+
skip_if = partial(selectors.skip_if, exc=SkipTest) # pylint:disable=invalid-name
"""The ``@skip_if`` decorator, customized for unittest.
| accesing /pulp/api/v3/ requires auth since 3.16, while didn't before
**Version**
3.16.3
**Describe the bug**
since 3.16 accessing `/pulp/api/v3/` requires auth, while before that it did not.
I *think* this is a side effect of https://github.com/pulp/pulpcore/pull/1593, just not sure if that was intentional or not.
**To Reproduce**
Steps to reproduce the behavior:
- setup a 3.16.3 isntall
- `curl -k https://localhost/pulp/api/v3/`
**Expected behavior**
Until 3.15, this would return 200, now it returns 403, unless I do provide auth.
Accessing `/pulp/api/v3/status/` still works fine without auth.
**Additional context**
Originally found while working on Puppet integration of 3.16+ in https://github.com/theforeman/puppet-pulpcore/pull/249
| 2022-06-01T14:54:02 |
|
pulp/pulpcore | 2,800 | pulp__pulpcore-2800 | [
"2798"
] | dd352a39473bc8ccf37850ce0da568a813088975 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -855,7 +855,7 @@ def validate(self):
Raises:
RuntimeError: If the validation has failed.
"""
- with tempfile.TemporaryDirectory(dir=".") as temp_directory_name:
+ with tempfile.TemporaryDirectory(dir=settings.WORKING_DIRECTORY) as temp_directory_name:
with tempfile.NamedTemporaryFile(dir=temp_directory_name) as temp_file:
temp_file.write(b"arbitrary data")
temp_file.flush()
diff --git a/pulpcore/app/util.py b/pulpcore/app/util.py
--- a/pulpcore/app/util.py
+++ b/pulpcore/app/util.py
@@ -3,6 +3,7 @@
import gnupg
+from django.conf import settings
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from pkg_resources import get_distribution
@@ -174,7 +175,7 @@ def verify_signature(filepath, public_key, detached_data=None):
When dealing with a detached signature (referenced by the 'filepath' argument), one have to pass
the reference to a data file that was signed by that signature.
"""
- with tempfile.TemporaryDirectory(dir=".") as temp_directory_name:
+ with tempfile.TemporaryDirectory(dir=settings.WORKING_DIRECTORY) as temp_directory_name:
gpg = gnupg.GPG(gnupghome=temp_directory_name)
gpg.import_keys(public_key)
imported_keys = gpg.list_keys()
| Can't create signing service
**Version**
3.20
**Describe the bug**
When attempting to create a singing service using the `pulpcore-manager add-signing-service` command I get the following error:
```
api_1 | Traceback (most recent call last):
api_1 | File "/venv/bin/django-admin", line 8, in <module>
api_1 | sys.exit(execute_from_command_line())
api_1 | File "/venv/lib64/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
api_1 | utility.execute()
api_1 | File "/venv/lib64/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute
api_1 | self.fetch_command(subcommand).run_from_argv(self.argv)
api_1 | File "/venv/lib64/python3.8/site-packages/django/core/management/base.py", line 354, in run_from_argv
api_1 | self.execute(*args, **cmd_options)
api_1 | File "/venv/lib64/python3.8/site-packages/django/core/management/base.py", line 398, in execute
api_1 | output = self.handle(*args, **options)
api_1 | File "/venv/lib64/python3.8/site-packages/pulpcore/app/management/commands/add-signing-service.py", line 83, in handle
api_1 | SigningService.objects.create(
api_1 | File "/venv/lib64/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
api_1 | return getattr(self.get_queryset(), name)(*args, **kwargs)
api_1 | File "/venv/lib64/python3.8/site-packages/django/db/models/query.py", line 453, in create
api_1 | obj.save(force_insert=True, using=self.db)
api_1 | File "/venv/lib64/python3.8/site-packages/pulpcore/app/models/content.py", line 826, in save
api_1 | self.validate()
api_1 | File "/venv/lib64/python3.8/site-packages/pulpcore/app/models/content.py", line 858, in validate
api_1 | with tempfile.TemporaryDirectory(dir=".") as temp_directory_name:
api_1 | File "/usr/lib64/python3.8/tempfile.py", line 780, in __init__
api_1 | self.name = mkdtemp(suffix, prefix, dir)
api_1 | File "/usr/lib64/python3.8/tempfile.py", line 358, in mkdtemp
api_1 | _os.mkdir(file, 0o700)
api_1 | PermissionError: [Errno 13] Permission denied: './tmpcmbnyssa'
```
The error seems to be caused by this change https://github.com/pulp/pulpcore/commit/d32c62a2da520e696e30504f6de98c1dfc4406e7#diff-81f6a78175bb93934b6beff952646d3ca1ef3731f1ff14492d4ec77bfc3fdf82R847, which attempts to create a new temporary directory in `.`, which my deployment doesn't have permissions to write to. I have no idea what directory `.` corresponds to here, so I can't make it writable.
**To Reproduce**
Steps to reproduce the behavior:
- attempt to create a signing service with `pulpcore-manager add-signing-service`
**Expected behavior**
I should be able to added a signing service
| If `.` corresponds to the directory that pulp is running in, it seems like giving the user that's running pulp write access in that directory could potentially be dangerous.
This is coming from [here](https://github.com/pulp/pulpcore/blob/af4dfa1021ea6fbbbd331098d703c18a6f56201b/pulpcore/app/models/content.py#L858), which is the validation that use done as part of `save()`.
One idea is we could port the implementation to use Python tooling to validate the signature. That would avoid the need to write to a filesystem at `'.'`. | 2022-06-03T11:13:20 |
|
pulp/pulpcore | 2,812 | pulp__pulpcore-2812 | [
"2810"
] | 6e44fb2fe609f92dc1f502b19c67abd08879148f | diff --git a/pulpcore/app/serializers/orphans.py b/pulpcore/app/serializers/orphans.py
--- a/pulpcore/app/serializers/orphans.py
+++ b/pulpcore/app/serializers/orphans.py
@@ -2,8 +2,6 @@
from rest_framework import fields, serializers
-from django.conf import settings
-
from pulpcore.app.models import Content
from pulpcore.app.serializers import ValidateFieldsMixin
@@ -24,7 +22,6 @@ class OrphansCleanupSerializer(serializers.Serializer, ValidateFieldsMixin):
),
allow_null=True,
required=False,
- default=settings.ORPHAN_PROTECTION_TIME,
)
def validate_content_hrefs(self, value):
diff --git a/pulpcore/app/viewsets/orphans.py b/pulpcore/app/viewsets/orphans.py
--- a/pulpcore/app/viewsets/orphans.py
+++ b/pulpcore/app/viewsets/orphans.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.viewsets import ViewSet
from pulpcore.app.response import OperationPostponedResponse
@@ -22,7 +23,9 @@ def cleanup(self, request):
serializer.is_valid(raise_exception=True)
content_pks = serializer.validated_data.get("content_hrefs", None)
- orphan_protection_time = serializer.validated_data.get("orphan_protection_time")
+ orphan_protection_time = serializer.validated_data.get(
+ "orphan_protection_time", settings.ORPHAN_PROTECTION_TIME
+ )
task = dispatch(
orphan_cleanup,
| Doc for default orphan protection time seems conflicting
Reading [the rest api docs for orphan cleanup endpoint](https://docs.pulpproject.org/pulpcore/restapi.html#operation/orphans_cleanup_cleanup) it seems there are conflicting messages. It says both that the default for `orphan_protection_time` is 0 and if `orphan_protection_time` is not specified, ORPHAN_PROTECTION_TIME will be used.
| 2022-06-08T19:06:57 |
||
pulp/pulpcore | 2,814 | pulp__pulpcore-2814 | [
"2817"
] | f33077b572eff81fb5d9ab7e302a63fd32a13605 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -50,7 +50,7 @@ def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRI
RuntimeError: If Artifacts are not downloaded or when trying to link non-fs files
"""
if content_artifacts.filter(artifact=None).exists():
- RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
+ raise RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
if (
settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem"
@@ -370,7 +370,7 @@ def _do_export(pulp_exporter, tar, the_export):
# an on_demand repo
content_artifacts = ContentArtifact.objects.filter(content__in=version.content)
if content_artifacts.filter(artifact=None).exists():
- RuntimeError(_("Remote artifacts cannot be exported."))
+ raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
| An error is not raised when exporting remote artifacts
| 2022-06-09T08:55:38 |
||
pulp/pulpcore | 2,818 | pulp__pulpcore-2818 | [
"2642"
] | f6c15acb9db6c1da5ed9b92cc46c385a3fc4be71 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -261,52 +261,10 @@
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
-extlinks = {'github': ('https://github.com/pulp/pulpcore/issues/%s', '#'),
- 'redmine': ('https://pulp.plan.io/issues/%s', '#'),
- 'fixedbugs_pulp': ('https://pulp.plan.io/projects/pulp/issues?c%%5B%%5D=tracker&c%%5B%'
- '%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_5&c%%5B%%5D=subject&c%%'
- '5B%%5D=author&c%%5B%%5D=assigned_to&c%%5B%%5D=cf_3&f%%5B%%5D=cf_4&'
- 'f%%5B%%5D=tracker_id&f%%5B%%5D=&group_by=&op%%5Bcf_4%%5D=%%3D&op%%'
- '5Btracker_id%%5D=%%3D&set_filter=1&sort=priority%%3Adesc%%2Ccf_5%%'
- '3Adesc%%2Cid%%3Adesc&utf8=%%E2%%9C%%93&v%%5Bcf_4%%5D%%5B%%5D=%s&v%'
- '%5Btracker_id%%5D%%5B%%5D=1', 'bugs fixed in '),
- 'fixedbugs_pulp_rpm': ('https://pulp.plan.io/projects/pulp_rpm/issues?c%%5B%%5D=tracke'
- 'r&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_5&c%%5B%%5D'
- '=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_to&c%%5B%%5D=cf_3'
- '&f%%5B%%5D=cf_4&f%%5B%%5D=tracker_id&f%%5B%%5D=&group_by=&op%%'
- '5Bcf_4%%5D=%%3D&op%%5Btracker_id%%5D=%%3D&set_filter=1&sort=pr'
- 'iority%%3Adesc%%2Ccf_5%%3Adesc%%2Cstatus&utf8=%%E2%%9C%%93&v%%'
- '5Bcf_4%%5D%%5B%%5D=%s&v%%5Btracker_id%%5D%%5B%%5D=1',
- 'bugs fixed in '),
- 'fixedbugs_pulp_puppet': ('https://pulp.plan.io/projects/pulp_puppet/issues?utf8=%%E2%'
- '%9C%%93&set_filter=1&f%%5B%%5D=cf_4&op%%5Bcf_4%%5D=%%3D&v%%'
- '5Bcf_4%%5D%%5B%%5D=%s&f%%5B%%5D=tracker_id&op%%5Btracker_id'
- '%%5D=%%3D&v%%5Btracker_id%%5D%%5B%%5D=1&f%%5B%%5D=&c%%5B%%5'
- 'D=tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_'
- '5&c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_to&'
- 'c%%5B%%5D=cf_3&group_by=', 'bugs fixed in '),
- 'fixedbugs_pulp_python': ('https://pulp.plan.io/projects/pulp_python/issues?c%%5B%%5D='
- 'tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_5&'
- 'c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_to&c%'
- '%5B%%5D=cf_3&f%%5B%%5D=cf_11&f%%5B%%5D=tracker_id&f%%5B%%5D'
- '=&group_by=&op%%5Bcf_11%%5D=%%3D&op%%5Btracker_id%%5D=%%3D&'
- 'set_filter=1&sort=priority%%3Adesc%%2Ccf_5%%3Adesc%%2Cstatu'
- 's&utf8=%%E2%%9C%%93&v%%5Bcf_11%%5D%%5B%%5D=%s&v%%5Btracker_'
- 'id%%5D%%5B%%5D=1', 'bugs fixed in '),
- 'fixedbugs_pulp_docker': ('https://pulp.plan.io/projects/pulp_docker/issues?utf8=%%E2%'
- '%9C%%93&set_filter=1&f%%5B%%5D=cf_12&op%%5Bcf_12%%5D=%%3D&v'
- '%%5Bcf_12%%5D%%5B%%5D=%s&f%%5B%%5D=tracker_id&op%%5Btracker'
- '_id%%5D=%%3D&v%%5Btracker_id%%5D%%5B%%5D=1&f%%5B%%5D=&c%%5B'
- '%%5D=tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D='
- 'cf_5&c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_'
- 'to&c%%5B%%5D=cf_3&group_by=', 'bugs fixed in '),
- 'fixedbugs_pulp_ostree': ('https://pulp.plan.io/projects/pulp_ostree/issues?utf8=%%E2%'
- '%9C%%93&set_filter=1&f%%5B%%5D=cf_17&op%%5Bcf_17%%5D=%%3D&v'
- '%%5Bcf_17%%5D%%5B%%5D=%s&f%%5B%%5D=tracker_id&op%%5Btracker'
- '_id%%5D=%%3D&v%%5Btracker_id%%5D%%5B%%5D=1&f%%5B%%5D=&c%%5B'
- '%%5D=tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D='
- 'cf_5&c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_'
- 'to&c%%5B%%5D=cf_3&group_by=', 'bugs fixed in '),}
+extlinks = {
+ 'github': ('https://github.com/pulp/pulpcore/issues/%s', '#'),
+ 'redmine': ('https://pulp.plan.io/issues/%s', '#'),
+}
# napoleon uses .. attribute by default, but :ivar: is more succinct and looks better,
# particularly on classes with a lot of attributes, like django models and related objects
| Remove some old redmine aspects from our docs
If you search this query, you'll see some things that need cleaning up
https://docs.pulpproject.org/pulpcore/search.html?q=Redmine&check_keywords=yes&area=default
| 2022-06-09T16:49:39 |
||
pulp/pulpcore | 2,819 | pulp__pulpcore-2819 | [
"2642"
] | 439a3d712a4f0e0da9c7a2e04b36d78a3a534db2 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -261,52 +261,10 @@
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
-extlinks = {'github': ('https://github.com/pulp/pulpcore/issues/%s', '#'),
- 'redmine': ('https://pulp.plan.io/issues/%s', '#'),
- 'fixedbugs_pulp': ('https://pulp.plan.io/projects/pulp/issues?c%%5B%%5D=tracker&c%%5B%'
- '%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_5&c%%5B%%5D=subject&c%%'
- '5B%%5D=author&c%%5B%%5D=assigned_to&c%%5B%%5D=cf_3&f%%5B%%5D=cf_4&'
- 'f%%5B%%5D=tracker_id&f%%5B%%5D=&group_by=&op%%5Bcf_4%%5D=%%3D&op%%'
- '5Btracker_id%%5D=%%3D&set_filter=1&sort=priority%%3Adesc%%2Ccf_5%%'
- '3Adesc%%2Cid%%3Adesc&utf8=%%E2%%9C%%93&v%%5Bcf_4%%5D%%5B%%5D=%s&v%'
- '%5Btracker_id%%5D%%5B%%5D=1', 'bugs fixed in '),
- 'fixedbugs_pulp_rpm': ('https://pulp.plan.io/projects/pulp_rpm/issues?c%%5B%%5D=tracke'
- 'r&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_5&c%%5B%%5D'
- '=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_to&c%%5B%%5D=cf_3'
- '&f%%5B%%5D=cf_4&f%%5B%%5D=tracker_id&f%%5B%%5D=&group_by=&op%%'
- '5Bcf_4%%5D=%%3D&op%%5Btracker_id%%5D=%%3D&set_filter=1&sort=pr'
- 'iority%%3Adesc%%2Ccf_5%%3Adesc%%2Cstatus&utf8=%%E2%%9C%%93&v%%'
- '5Bcf_4%%5D%%5B%%5D=%s&v%%5Btracker_id%%5D%%5B%%5D=1',
- 'bugs fixed in '),
- 'fixedbugs_pulp_puppet': ('https://pulp.plan.io/projects/pulp_puppet/issues?utf8=%%E2%'
- '%9C%%93&set_filter=1&f%%5B%%5D=cf_4&op%%5Bcf_4%%5D=%%3D&v%%'
- '5Bcf_4%%5D%%5B%%5D=%s&f%%5B%%5D=tracker_id&op%%5Btracker_id'
- '%%5D=%%3D&v%%5Btracker_id%%5D%%5B%%5D=1&f%%5B%%5D=&c%%5B%%5'
- 'D=tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_'
- '5&c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_to&'
- 'c%%5B%%5D=cf_3&group_by=', 'bugs fixed in '),
- 'fixedbugs_pulp_python': ('https://pulp.plan.io/projects/pulp_python/issues?c%%5B%%5D='
- 'tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D=cf_5&'
- 'c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_to&c%'
- '%5B%%5D=cf_3&f%%5B%%5D=cf_11&f%%5B%%5D=tracker_id&f%%5B%%5D'
- '=&group_by=&op%%5Bcf_11%%5D=%%3D&op%%5Btracker_id%%5D=%%3D&'
- 'set_filter=1&sort=priority%%3Adesc%%2Ccf_5%%3Adesc%%2Cstatu'
- 's&utf8=%%E2%%9C%%93&v%%5Bcf_11%%5D%%5B%%5D=%s&v%%5Btracker_'
- 'id%%5D%%5B%%5D=1', 'bugs fixed in '),
- 'fixedbugs_pulp_docker': ('https://pulp.plan.io/projects/pulp_docker/issues?utf8=%%E2%'
- '%9C%%93&set_filter=1&f%%5B%%5D=cf_12&op%%5Bcf_12%%5D=%%3D&v'
- '%%5Bcf_12%%5D%%5B%%5D=%s&f%%5B%%5D=tracker_id&op%%5Btracker'
- '_id%%5D=%%3D&v%%5Btracker_id%%5D%%5B%%5D=1&f%%5B%%5D=&c%%5B'
- '%%5D=tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D='
- 'cf_5&c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_'
- 'to&c%%5B%%5D=cf_3&group_by=', 'bugs fixed in '),
- 'fixedbugs_pulp_ostree': ('https://pulp.plan.io/projects/pulp_ostree/issues?utf8=%%E2%'
- '%9C%%93&set_filter=1&f%%5B%%5D=cf_17&op%%5Bcf_17%%5D=%%3D&v'
- '%%5Bcf_17%%5D%%5B%%5D=%s&f%%5B%%5D=tracker_id&op%%5Btracker'
- '_id%%5D=%%3D&v%%5Btracker_id%%5D%%5B%%5D=1&f%%5B%%5D=&c%%5B'
- '%%5D=tracker&c%%5B%%5D=status&c%%5B%%5D=priority&c%%5B%%5D='
- 'cf_5&c%%5B%%5D=subject&c%%5B%%5D=author&c%%5B%%5D=assigned_'
- 'to&c%%5B%%5D=cf_3&group_by=', 'bugs fixed in '),}
+extlinks = {
+ 'github': ('https://github.com/pulp/pulpcore/issues/%s', '#'),
+ 'redmine': ('https://pulp.plan.io/issues/%s', '#'),
+}
# napoleon uses .. attribute by default, but :ivar: is more succinct and looks better,
# particularly on classes with a lot of attributes, like django models and related objects
| Remove some old redmine aspects from our docs
If you search this query, you'll see some things that need cleaning up
https://docs.pulpproject.org/pulpcore/search.html?q=Redmine&check_keywords=yes&area=default
| 2022-06-09T16:49:39 |
||
pulp/pulpcore | 2,820 | pulp__pulpcore-2820 | [
"2782"
] | 657d97ae9dead89a47a2a2639dfdbffecb92ef1b | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -262,8 +262,8 @@
#texinfo_show_urls = 'footnote'
extlinks = {
- 'github': ('https://github.com/pulp/pulpcore/issues/%s', '#'),
- 'redmine': ('https://pulp.plan.io/issues/%s', '#'),
+ 'github': ('https://github.com/pulp/pulpcore/issues/%s', '#%s'),
+ 'redmine': ('https://pulp.plan.io/issues/%s', '#%s'),
}
# napoleon uses .. attribute by default, but :ivar: is more succinct and looks better,
| Sphinx 5.0 causes docs not to build
Sphinx 5.0.0 was released on May 29th https://pypi.org/project/Sphinx/#history and emits the warning below:
```
extlinks: Sphinx-6.0 will require a caption string to contain exactly one '%s' and all other '%' need to be escaped as '%%'.
```
This is treated as an error due to the Makefile almost all release branches use, e.g. [here's the one for pulpcore:main](https://github.com/pulp/pulpcore/blob/main/docs/Makefile#L6).
## Solutions
We could...
1) have warnings not be treated as errors
2) Fix the extlinks references to get the warnings to stop
3) Pin Sphinx <5.0.0
I am going to try for (2). (1) Seems like a step backwards, and (2) likely won't work because Sphinx gets included from a variety of places as it's installed in the CI currently.
| 2022-06-09T16:57:21 |
||
pulp/pulpcore | 2,821 | pulp__pulpcore-2821 | [
"2782"
] | 0c6a5cde1f6a05c316a5d21f8f14c894330378ce | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -262,8 +262,8 @@
#texinfo_show_urls = 'footnote'
extlinks = {
- 'github': ('https://github.com/pulp/pulpcore/issues/%s', '#'),
- 'redmine': ('https://pulp.plan.io/issues/%s', '#'),
+ 'github': ('https://github.com/pulp/pulpcore/issues/%s', '#%s'),
+ 'redmine': ('https://pulp.plan.io/issues/%s', '#%s'),
}
# napoleon uses .. attribute by default, but :ivar: is more succinct and looks better,
| Sphinx 5.0 causes docs not to build
Sphinx 5.0.0 was released on May 29th https://pypi.org/project/Sphinx/#history and emits the warning below:
```
extlinks: Sphinx-6.0 will require a caption string to contain exactly one '%s' and all other '%' need to be escaped as '%%'.
```
This is treated as an error due to the Makefile almost all release branches use, e.g. [here's the one for pulpcore:main](https://github.com/pulp/pulpcore/blob/main/docs/Makefile#L6).
## Solutions
We could...
1) have warnings not be treated as errors
2) Fix the extlinks references to get the warnings to stop
3) Pin Sphinx <5.0.0
I am going to try for (2). (1) Seems like a step backwards, and (2) likely won't work because Sphinx gets included from a variety of places as it's installed in the CI currently.
| 2022-06-09T16:57:31 |
||
pulp/pulpcore | 2,823 | pulp__pulpcore-2823 | [
"1982"
] | 83defcf69030683f4a86187d52330191ac779fdc | diff --git a/pulpcore/app/apps.py b/pulpcore/app/apps.py
--- a/pulpcore/app/apps.py
+++ b/pulpcore/app/apps.py
@@ -70,6 +70,17 @@ def __init__(self, app_name, app_module):
)
raise ImproperlyConfigured(msg.format(self.label))
+ try:
+ self.python_package_name
+ except AttributeError:
+ msg = (
+ "The plugin `{}` is missing a `python_package_name` declaration. Starting with "
+ "pulpcore==3.20, plugins are required to define the python package name providing "
+ "the Pulp plugin on the PulpPluginAppConfig subclass as the `python_package_name` "
+ "attribute."
+ )
+ raise ImproperlyConfigured(msg.format(self.label))
+
# Module containing viewsets eg. <module 'pulp_plugin.app.viewsets'
# from 'pulp_plugin/app/viewsets.py'>. Set by import_viewsets().
# None if the application doesn't have a viewsets module, automatically set
@@ -192,6 +203,9 @@ class PulpAppConfig(PulpPluginAppConfig):
# The version of this app
version = "3.20.0.dev"
+ # The python package name providing this app
+ python_package_name = "pulpcore"
+
def ready(self):
super().ready()
from . import checks # noqa
diff --git a/pulpcore/app/serializers/status.py b/pulpcore/app/serializers/status.py
--- a/pulpcore/app/serializers/status.py
+++ b/pulpcore/app/serializers/status.py
@@ -14,6 +14,8 @@ class VersionSerializer(serializers.Serializer):
version = serializers.CharField(help_text=_("Version of the component (e.g. 3.0.0)"))
+ package = serializers.CharField(help_text=_("Python package name providing the component"))
+
class DatabaseConnectionSerializer(serializers.Serializer):
"""
diff --git a/pulpcore/app/views/status.py b/pulpcore/app/views/status.py
--- a/pulpcore/app/views/status.py
+++ b/pulpcore/app/views/status.py
@@ -55,7 +55,9 @@ def get(self, request):
"""
versions = []
for app in pulp_plugin_configs():
- versions.append({"component": app.label, "version": app.version})
+ versions.append(
+ {"component": app.label, "version": app.version, "package": app.python_package_name}
+ )
if settings.CACHE_ENABLED:
redis_status = {"connected": self._get_redis_conn_status()}
| diff --git a/pulpcore/tests/functional/api/test_status.py b/pulpcore/tests/functional/api/test_status.py
--- a/pulpcore/tests/functional/api/test_status.py
+++ b/pulpcore/tests/functional/api/test_status.py
@@ -28,7 +28,11 @@
"type": "array",
"items": {
"type": "object",
- "properties": {"component": {"type": "string"}, "version": {"type": "string"}},
+ "properties": {
+ "component": {"type": "string"},
+ "version": {"type": "string"},
+ "package": {"type": "string"},
+ },
},
},
"storage": {
| Pulp users don't understand app labels in the status api/cli
Author: daviddavis (daviddavis)
Redmine Issue: 8434, https://pulp.plan.io/issues/8434
---
It's kind of confusing and perhaps nonobvious what the components in the status api mean. Suppose for example you have a package/plugin called pulp_foo that has app labels bar and baz. Users may not understand what bar and baz come from or what they mean.
I think this may have contributed to this user's confusion: https://github.com/pulp/pulp-cli/issues/184
| From: daviddavis (daviddavis)
Date: 2021-03-22T14:02:52Z
---
One possible solution is to show the package name with the app label:
```
{
"versions": [
{
"component": "core",
"package": "pulpcore",
"version": "3.12.0.dev"
},
{
"component": "bar",
"package": "pulp_foo",
"version": "5.6.0.dev"
},
...
```
From: @bmbouter (bmbouter)
Date: 2021-03-23T14:40:34Z
---
I'm reading that it would be helpful for the status API to list the actual python package providing the Django app name. That makes sense to me. That field could be added whereby the plugin provides that data explicitly.
Is there any concern about a django app being packaged multiple times? I don't think that would happen par-se, but if it did, the repackager would have to modify the code during packaging. I think that's ok tho, but I wanted to share it here.
From: daviddavis (daviddavis)
Date: 2021-03-26T13:39:46Z
---
> I'm reading that it would be helpful for the status API to list the actual python package providing the Django app name. That makes sense to me. That field could be added whereby the plugin provides that data explicitly.
That sounds great.
> Is there any concern about a django app being packaged multiple times? I don't think that would happen par-se, but if it did, the repackager would have to modify the code during packaging. I think that's ok tho, but I wanted to share it here.
Hmm, I can't think of a case where a django app would be shipped in two different packages. It seems theoretically but perhaps not practically possible. Then again, I am not super familiar with django app stuff.
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
I think we should consider requiring plugins to start providing this in 3.20+. I put onto the 3.20 milestone for discussion. | 2022-06-10T13:49:13 |
pulp/pulpcore | 2,837 | pulp__pulpcore-2837 | [
"2817"
] | 5ffad84b2c39aa32ee72334d1976bc087cb6a5d0 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -50,7 +50,7 @@ def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRI
RuntimeError: If Artifacts are not downloaded or when trying to link non-fs files
"""
if content_artifacts.filter(artifact=None).exists():
- RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
+ raise RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
if (
settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem"
@@ -369,7 +369,7 @@ def _do_export(pulp_exporter, tar, the_export):
# an on_demand repo
content_artifacts = ContentArtifact.objects.filter(content__in=version.content)
if content_artifacts.filter(artifact=None).exists():
- RuntimeError(_("Remote artifacts cannot be exported."))
+ raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
| An error is not raised when exporting remote artifacts
| 2022-06-13T19:57:07 |
||
pulp/pulpcore | 2,838 | pulp__pulpcore-2838 | [
"2817"
] | e5c9109c837c0d4fcf471d2eefa2bd4c8b7ca3c8 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -296,7 +296,7 @@ def _do_export(pulp_exporter, tar, the_export):
# an on_demand repo
content_artifacts = ContentArtifact.objects.filter(content__in=version.content)
if content_artifacts.filter(artifact=None).exists():
- RuntimeError(_("Remote artifacts cannot be exported."))
+ raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
| An error is not raised when exporting remote artifacts
| 2022-06-13T20:04:09 |
||
pulp/pulpcore | 2,840 | pulp__pulpcore-2840 | [
"2817"
] | 9ad89de6916928db73248ec9bda3c717191e1c83 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -50,7 +50,7 @@ def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRI
RuntimeError: If Artifacts are not downloaded or when trying to link non-fs files
"""
if content_artifacts.filter(artifact=None).exists():
- RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
+ raise RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
if (
settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem"
@@ -369,7 +369,7 @@ def _do_export(pulp_exporter, tar, the_export):
# an on_demand repo
content_artifacts = ContentArtifact.objects.filter(content__in=version.content)
if content_artifacts.filter(artifact=None).exists():
- RuntimeError(_("Remote artifacts cannot be exported."))
+ raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
| An error is not raised when exporting remote artifacts
| 2022-06-14T14:49:33 |
||
pulp/pulpcore | 2,848 | pulp__pulpcore-2848 | [
"2816"
] | 3e684a7dd5b67e2c01ca51ad101143d99ddfccfe | diff --git a/pulpcore/content/__init__.py b/pulpcore/content/__init__.py
--- a/pulpcore/content/__init__.py
+++ b/pulpcore/content/__init__.py
@@ -12,10 +12,6 @@
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
-# Until Django supports async ORM natively this is the best we can do given these parts of Pulp
-# run in coroutines. We try to ensure it is safe by never passing ORM data between co-routines.
-os.environ.setdefault("DJANGO_ALLOW_ASYNC_UNSAFE", "true")
-
django.setup()
from django.conf import settings # noqa: E402: module level not at top of file
diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -648,7 +648,7 @@ async def _stream_content_artifact(self, request, response, content_artifact):
"""
def get_remote_artifacts_blocking():
- return list(content_artifact.remoteartifact_set.order_by_acs())
+ return list(content_artifact.remoteartifact_set.select_related("remote").order_by_acs())
remote_artifacts = await sync_to_async(get_remote_artifacts_blocking)()
for remote_artifact in remote_artifacts:
| on-demand content results in unclosed, idle, database connections
**Version**
* core/main, pulp_rpm/main
* core/3.16, pulp_rpm/3.17
* pulp3-source-centos7
**Describe the bug**
A client asking for content from an "on_demand" remote results in a database connection remaining open for each piece of RemoteArtifact content that was streamed back to the client. This can run a pulp instance out of database connections, if (for example) a downstream Pulp attempts to sync 'immediate' from an on_demand upstream.
The idle connections all show an identical last-query, where they asked for the remote of the not-yet-streamed content. Here's an example:
```
SELECT "core_remote"."pulp_id", "core_remote"."pulp_created", "core_remote"."pulp_last_updated", "core_remote"."pulp_type", "core_remote"."name", "core_remote"."url", "core
_remote"."ca_cert", "core_remote"."client_cert", "core_remote"."client_key", "core_remote"."tls_validation", "core_remote"."username", "core_remote"."password", "core_remote
"."proxy_url", "core_remote"."proxy_username", "core_remote"."proxy_password", "core_remote"."download_concurrency", "core_remote"."max_retries", "core_remote"."policy", "co
re_remote"."total_timeout", "core_remote"."connect_timeout", "core_remote"."sock_connect_timeout", "core_remote"."sock_read_timeout", "core_remote"."headers", "core_remote".
"rate_limit" FROM "core_remote" WHERE "core_remote"."pulp_id" = '949036ee-cb03-4bfd-aaf8-739e2af1a0fc'::uuid LIMIT 21
```
If multiple clients ask for the same content at the same time, you can end up with more idle connections than content-units, due to the occasional concurrency-collisions.
**To Reproduce**
The following bash script will result in one connection to the database for each piece of content in the /rpm-signed fixture:
```
#!/usr/bin/bash
pulp rpm remote create --name test --policy on_demand --url https://fixtures.pulpproject.org/rpm-signed/
pulp rpm repository create --name test --remote test --autopublish
pulp rpm repository sync --name test
pulp rpm distribution create --name test --base-path test --repository test
echo "Connections at start:"
psql -U pulp -d pulp --host 127.0.0.1 -c "SELECT count(pid) FROM pg_stat_activity WHERE datname = 'pulp' and query like '%remote%';"
FILES=(\
b/bear-4.1-1.noarch.rpm \
c/camel-0.1-1.noarch.rpm \
c/cat-1.0-1.noarch.rpm \
c/cheetah-1.25.3-5.noarch.rpm \
c/chimpanzee-0.21-1.noarch.rpm \
c/cockateel-3.1-1.noarch.rpm \
c/cow-2.2-3.noarch.rpm \
c/crow-0.8-1.noarch.rpm \
d/dog-4.23-1.noarch.rpm \
d/dolphin-3.10.232-1.noarch.rpm \
d/duck-0.6-1.noarch.rpm \
d/duck-0.7-1.noarch.rpm \
d/duck-0.8-1.noarch.rpm \
e/elephant-8.3-1.noarch.rpm \
f/fox-1.1-2.noarch.rpm \
f/frog-0.1-1.noarch.rpm \
g/giraffe-0.67-2.noarch.rpm \
g/gorilla-0.62-1.noarch.rpm \
h/horse-0.22-2.noarch.rpm \
k/kangaroo-0.2-1.noarch.rpm \
k/kangaroo-0.3-1.noarch.rpm \
l/lion-0.4-1.noarch.rpm \
m/mouse-0.1.12-1.noarch.rpm \
p/penguin-0.9.1-1.noarch.rpm \
p/pike-2.2-1.noarch.rpm \
s/shark-0.1-1.noarch.rpm \
s/squirrel-0.1-1.noarch.rpm \
s/stork-0.12-2.noarch.rpm \
t/tiger-1.0-4.noarch.rpm \
t/trout-0.12-1.noarch.rpm \
w/walrus-0.71-1.noarch.rpm \
w/walrus-5.21-1.noarch.rpm \
w/whale-0.2-1.noarch.rpm \
w/wolf-9.4-2.noarch.rpm \
z/zebra-0.1-2.noarch.rpm \
)
for f in ${FILES[@]}; do
echo "FILE ${f}"
FPATH="test/Packages/${f}"
( http GET :/pulp/content/${FPATH} ) &
done
sleep 10
echo "Connections at end:"
psql -U pulp -d pulp --host 127.0.0.1 -c "SELECT count(pid) FROM pg_stat_activity WHERE datname = 'pulp' and query like '%remote%';"
```
**Notes**
* Restarting pulpcore-content releases the idle connections.
* Issuing the GET calls in the script above sequentially results in fewer stale connections (e.g., 22)
**Expected behavior**
Database connection must be closed as soon as the streamed Artifact has been successfully saved.
**Additional context**
See the discussion starting at https://bugzilla.redhat.com/show_bug.cgi?id=2062526#c19 for the investigation that leads us here. The BZ says "deadlock", this problem interferes with verifying that fix, and will have its own BZ "soon". Some of the discussion on '2526 contains machines and access-info, and is alas private. I have made as much of it public as I reasonably can.
| 2022-06-14T20:14:36 |
||
pulp/pulpcore | 2,855 | pulp__pulpcore-2855 | [
"2854"
] | 778a7304e0f5e87dffbdd3d80cc8d57801adcce2 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -95,33 +95,42 @@ def _import_file(fpath, resource_class, retry=False):
log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
resource = resource_class()
- log.info("...Importing resource {resource.__class__.__name__}.")
+ log.info(f"...Importing resource {resource.__class__.__name__}.")
# Load one batch-sized chunk of the specified import-file at a time. If requested,
# retry a batch if it looks like we collided with some other repo being imported with
# overlapping content.
for batch_str in _impfile_iterator(json_file):
data = Dataset().load(StringIO(batch_str))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- total_errors = a_result.totals["error"]
- log.info(
- f"...{total_errors} import-errors encountered importing "
- "{fpath}, attempt {curr_attempt}, retrying"
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
+ curr_attempt = 1
+
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ "...{total_errors} import-errors encountered importing {fpath}, "
+ "attempt {curr_attempt}, retrying".format(
+ total_errors=total_errors,
+ fpath=fpath,
+ curr_attempt=curr_attempt,
+ )
+ )
+
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
yield a_result
| Fix retry logic for PulpImpotr process.
**Version**
main, branches back to 3.14
**Describe the bug**
Conflicting PR merges removed the fix for #2589 from the pulp-import process. Put it back.
**Additional context**
PRs #2590 and #2570 collided in bad ways, that somehow didn't break the code. This is Bad.
| 2022-06-16T12:36:06 |
||
pulp/pulpcore | 2,857 | pulp__pulpcore-2857 | [
"2854"
] | 6d463bbc4c9f69467e7a0c08c16cec650737e37a | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -95,33 +95,42 @@ def _import_file(fpath, resource_class, retry=False):
log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
resource = resource_class()
- log.info("...Importing resource {resource.__class__.__name__}.")
+ log.info(f"...Importing resource {resource.__class__.__name__}.")
# Load one batch-sized chunk of the specified import-file at a time. If requested,
# retry a batch if it looks like we collided with some other repo being imported with
# overlapping content.
for batch_str in _impfile_iterator(json_file):
data = Dataset().load(StringIO(batch_str))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- total_errors = a_result.totals["error"]
- log.info(
- f"...{total_errors} import-errors encountered importing "
- "{fpath}, attempt {curr_attempt}, retrying"
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
+ curr_attempt = 1
+
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ "...{total_errors} import-errors encountered importing {fpath}, "
+ "attempt {curr_attempt}, retrying".format(
+ total_errors=total_errors,
+ fpath=fpath,
+ curr_attempt=curr_attempt,
+ )
+ )
+
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
yield a_result
| Fix retry logic for PulpImpotr process.
**Version**
main, branches back to 3.14
**Describe the bug**
Conflicting PR merges removed the fix for #2589 from the pulp-import process. Put it back.
**Additional context**
PRs #2590 and #2570 collided in bad ways, that somehow didn't break the code. This is Bad.
| 2022-06-16T14:22:13 |
||
pulp/pulpcore | 2,858 | pulp__pulpcore-2858 | [
"2854"
] | 555e211ef3c1422d5317307bcd644b8a1ca1ab2e | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -95,33 +95,42 @@ def _import_file(fpath, resource_class, retry=False):
log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
resource = resource_class()
- log.info("...Importing resource {resource.__class__.__name__}.")
+ log.info(f"...Importing resource {resource.__class__.__name__}.")
# Load one batch-sized chunk of the specified import-file at a time. If requested,
# retry a batch if it looks like we collided with some other repo being imported with
# overlapping content.
for batch_str in _impfile_iterator(json_file):
data = Dataset().load(StringIO(batch_str))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- total_errors = a_result.totals["error"]
- log.info(
- f"...{total_errors} import-errors encountered importing "
- "{fpath}, attempt {curr_attempt}, retrying"
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
+ curr_attempt = 1
+
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ "...{total_errors} import-errors encountered importing {fpath}, "
+ "attempt {curr_attempt}, retrying".format(
+ total_errors=total_errors,
+ fpath=fpath,
+ curr_attempt=curr_attempt,
+ )
+ )
+
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
yield a_result
| Fix retry logic for PulpImpotr process.
**Version**
main, branches back to 3.14
**Describe the bug**
Conflicting PR merges removed the fix for #2589 from the pulp-import process. Put it back.
**Additional context**
PRs #2590 and #2570 collided in bad ways, that somehow didn't break the code. This is Bad.
| 2022-06-16T14:22:28 |
||
pulp/pulpcore | 2,859 | pulp__pulpcore-2859 | [
"2854"
] | ccccebcff63825e7ed18fda16a5ce27ad560c283 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -95,33 +95,42 @@ def _import_file(fpath, resource_class, retry=False):
log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
resource = resource_class()
- log.info("...Importing resource {resource.__class__.__name__}.")
+ log.info(f"...Importing resource {resource.__class__.__name__}.")
# Load one batch-sized chunk of the specified import-file at a time. If requested,
# retry a batch if it looks like we collided with some other repo being imported with
# overlapping content.
for batch_str in _impfile_iterator(json_file):
data = Dataset().load(StringIO(batch_str))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- total_errors = a_result.totals["error"]
- log.info(
- f"...{total_errors} import-errors encountered importing "
- "{fpath}, attempt {curr_attempt}, retrying"
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
+ curr_attempt = 1
+
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ "...{total_errors} import-errors encountered importing {fpath}, "
+ "attempt {curr_attempt}, retrying".format(
+ total_errors=total_errors,
+ fpath=fpath,
+ curr_attempt=curr_attempt,
+ )
+ )
+
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
yield a_result
| Fix retry logic for PulpImpotr process.
**Version**
main, branches back to 3.14
**Describe the bug**
Conflicting PR merges removed the fix for #2589 from the pulp-import process. Put it back.
**Additional context**
PRs #2590 and #2570 collided in bad ways, that somehow didn't break the code. This is Bad.
| 2022-06-16T14:22:50 |
||
pulp/pulpcore | 2,860 | pulp__pulpcore-2860 | [
"2854"
] | 049fa4fe93f02ee9a4801dc559a935d5e2081ea8 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -95,33 +95,42 @@ def _import_file(fpath, resource_class, retry=False):
log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
resource = resource_class()
- log.info("...Importing resource {resource.__class__.__name__}.")
+ log.info(f"...Importing resource {resource.__class__.__name__}.")
# Load one batch-sized chunk of the specified import-file at a time. If requested,
# retry a batch if it looks like we collided with some other repo being imported with
# overlapping content.
for batch_str in _impfile_iterator(json_file):
data = Dataset().load(StringIO(batch_str))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- total_errors = a_result.totals["error"]
- log.info(
- f"...{total_errors} import-errors encountered importing "
- "{fpath}, attempt {curr_attempt}, retrying"
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
+ curr_attempt = 1
+
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ "...{total_errors} import-errors encountered importing {fpath}, "
+ "attempt {curr_attempt}, retrying".format(
+ total_errors=total_errors,
+ fpath=fpath,
+ curr_attempt=curr_attempt,
+ )
+ )
+
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
yield a_result
| Fix retry logic for PulpImpotr process.
**Version**
main, branches back to 3.14
**Describe the bug**
Conflicting PR merges removed the fix for #2589 from the pulp-import process. Put it back.
**Additional context**
PRs #2590 and #2570 collided in bad ways, that somehow didn't break the code. This is Bad.
| 2022-06-16T14:23:04 |
||
pulp/pulpcore | 2,861 | pulp__pulpcore-2861 | [
"2854"
] | ec32e2d758e7ba099515cc0d08b681c5da693097 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -95,33 +95,42 @@ def _import_file(fpath, resource_class, retry=False):
log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
resource = resource_class()
- log.info("...Importing resource {resource.__class__.__name__}.")
+ log.info(f"...Importing resource {resource.__class__.__name__}.")
# Load one batch-sized chunk of the specified import-file at a time. If requested,
# retry a batch if it looks like we collided with some other repo being imported with
# overlapping content.
for batch_str in _impfile_iterator(json_file):
data = Dataset().load(StringIO(batch_str))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- total_errors = a_result.totals["error"]
- log.info(
- f"...{total_errors} import-errors encountered importing "
- "{fpath}, attempt {curr_attempt}, retrying"
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
+ curr_attempt = 1
+
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ "...{total_errors} import-errors encountered importing {fpath}, "
+ "attempt {curr_attempt}, retrying".format(
+ total_errors=total_errors,
+ fpath=fpath,
+ curr_attempt=curr_attempt,
+ )
+ )
+
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
yield a_result
| Fix retry logic for PulpImpotr process.
**Version**
main, branches back to 3.14
**Describe the bug**
Conflicting PR merges removed the fix for #2589 from the pulp-import process. Put it back.
**Additional context**
PRs #2590 and #2570 collided in bad ways, that somehow didn't break the code. This is Bad.
| 2022-06-16T14:23:18 |
||
pulp/pulpcore | 2,862 | pulp__pulpcore-2862 | [
"2854"
] | 1c0804cdf543a0d955c53642d524403ad6d5c76a | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -95,33 +95,42 @@ def _import_file(fpath, resource_class, retry=False):
log.info(f"Importing file {fpath}.")
with open(fpath, "r") as json_file:
resource = resource_class()
- log.info("...Importing resource {resource.__class__.__name__}.")
+ log.info(f"...Importing resource {resource.__class__.__name__}.")
# Load one batch-sized chunk of the specified import-file at a time. If requested,
# retry a batch if it looks like we collided with some other repo being imported with
# overlapping content.
for batch_str in _impfile_iterator(json_file):
data = Dataset().load(StringIO(batch_str))
if retry:
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). If we're asked to
- # retry, we will try an import that will simply record errors as they happen
- # (rather than failing with an exception) first. If errors happen, we'll do one
- # retry before we give up on this repo-version's import.
- a_result = resource.import_data(data, raise_errors=False)
- if a_result.has_errors():
- total_errors = a_result.totals["error"]
- log.info(
- f"...{total_errors} import-errors encountered importing "
- "{fpath}, attempt {curr_attempt}, retrying"
- )
- # Second attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
+ curr_attempt = 1
+
+ while curr_attempt < MAX_ATTEMPTS:
+ curr_attempt += 1
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ total_errors = a_result.totals["error"]
+ log.info(
+ "...{total_errors} import-errors encountered importing {fpath}, "
+ "attempt {curr_attempt}, retrying".format(
+ total_errors=total_errors,
+ fpath=fpath,
+ curr_attempt=curr_attempt,
+ )
+ )
+
+ # Last attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
yield a_result
| Fix retry logic for PulpImpotr process.
**Version**
main, branches back to 3.14
**Describe the bug**
Conflicting PR merges removed the fix for #2589 from the pulp-import process. Put it back.
**Additional context**
PRs #2590 and #2570 collided in bad ways, that somehow didn't break the code. This is Bad.
| 2022-06-16T14:23:33 |
||
pulp/pulpcore | 2,925 | pulp__pulpcore-2925 | [
"2924"
] | 7a1207cfaee6904cc4e528b16456bde8df9c9ac5 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -138,6 +138,8 @@ def get_base_paths_blocking():
)
return base_paths
+ if request.method.lower() == "head":
+ return HTTPOk(headers={"Content-Type": "text/html"})
base_paths = await sync_to_async(get_base_paths_blocking)()
directory_list = ["{}/".format(path) for path in base_paths]
return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
| Content app is making queries on head requests
```
(pulp) [vagrant@pulp3-source-centos8-stream-fips ~]$ nc 127.0.0.1 4444 [8/8]
> /home/vagrant/devel/pulpcore/pulpcore/content/handler.py(144)list_distributions()
-> return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
(Pdb) directory_list
['58016a04-8204-4ced-8f87-5318716e525d/']
(Pdb) base_paths
['58016a04-8204-4ced-8f87-5318716e525d']
(Pdb) request.method
'HEAD'
(Pdb) c
```
| 2022-07-04T12:06:59 |
||
pulp/pulpcore | 2,937 | pulp__pulpcore-2937 | [
"2924"
] | 21bf693d7919ac4a81f79bd9dc062b73c613b238 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -132,6 +132,8 @@ def get_base_paths_blocking():
)
return base_paths
+ if request.method.lower() == "head":
+ return HTTPOk(headers={"Content-Type": "text/html"})
base_paths = await sync_to_async(get_base_paths_blocking)()
directory_list = ["{}/".format(path) for path in base_paths]
return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
| Content app is making queries on head requests
```
(pulp) [vagrant@pulp3-source-centos8-stream-fips ~]$ nc 127.0.0.1 4444 [8/8]
> /home/vagrant/devel/pulpcore/pulpcore/content/handler.py(144)list_distributions()
-> return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
(Pdb) directory_list
['58016a04-8204-4ced-8f87-5318716e525d/']
(Pdb) base_paths
['58016a04-8204-4ced-8f87-5318716e525d']
(Pdb) request.method
'HEAD'
(Pdb) c
```
| 2022-07-06T13:22:01 |
||
pulp/pulpcore | 2,938 | pulp__pulpcore-2938 | [
"2924"
] | 182c77cbff66791ece3f682d33a0459fe0630454 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -131,6 +131,8 @@ def get_base_paths_blocking():
)
return base_paths
+ if request.method.lower() == "head":
+ return HTTPOk(headers={"Content-Type": "text/html"})
base_paths = await sync_to_async(get_base_paths_blocking)()
directory_list = ["{}/".format(path) for path in base_paths]
return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
| Content app is making queries on head requests
```
(pulp) [vagrant@pulp3-source-centos8-stream-fips ~]$ nc 127.0.0.1 4444 [8/8]
> /home/vagrant/devel/pulpcore/pulpcore/content/handler.py(144)list_distributions()
-> return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
(Pdb) directory_list
['58016a04-8204-4ced-8f87-5318716e525d/']
(Pdb) base_paths
['58016a04-8204-4ced-8f87-5318716e525d']
(Pdb) request.method
'HEAD'
(Pdb) c
```
| 2022-07-06T13:22:39 |
||
pulp/pulpcore | 2,939 | pulp__pulpcore-2939 | [
"2924"
] | 7c8866aa2407abb8754f7ce8bc63647457be63bc | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -132,6 +132,8 @@ def get_base_paths_blocking():
)
return base_paths
+ if request.method.lower() == "head":
+ return HTTPOk(headers={"Content-Type": "text/html"})
base_paths = await sync_to_async(get_base_paths_blocking)()
directory_list = ["{}/".format(path) for path in base_paths]
return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
| Content app is making queries on head requests
```
(pulp) [vagrant@pulp3-source-centos8-stream-fips ~]$ nc 127.0.0.1 4444 [8/8]
> /home/vagrant/devel/pulpcore/pulpcore/content/handler.py(144)list_distributions()
-> return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
(Pdb) directory_list
['58016a04-8204-4ced-8f87-5318716e525d/']
(Pdb) base_paths
['58016a04-8204-4ced-8f87-5318716e525d']
(Pdb) request.method
'HEAD'
(Pdb) c
```
| 2022-07-06T13:22:58 |
||
pulp/pulpcore | 2,940 | pulp__pulpcore-2940 | [
"2924"
] | 6a4efbf67165bb746952ca04eb439779e0c9ee76 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -132,6 +132,8 @@ def get_base_paths_blocking():
)
return base_paths
+ if request.method.lower() == "head":
+ return HTTPOk(headers={"Content-Type": "text/html"})
base_paths = await sync_to_async(get_base_paths_blocking)()
directory_list = ["{}/".format(path) for path in base_paths]
return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
| Content app is making queries on head requests
```
(pulp) [vagrant@pulp3-source-centos8-stream-fips ~]$ nc 127.0.0.1 4444 [8/8]
> /home/vagrant/devel/pulpcore/pulpcore/content/handler.py(144)list_distributions()
-> return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
(Pdb) directory_list
['58016a04-8204-4ced-8f87-5318716e525d/']
(Pdb) base_paths
['58016a04-8204-4ced-8f87-5318716e525d']
(Pdb) request.method
'HEAD'
(Pdb) c
```
| 2022-07-06T13:23:21 |
||
pulp/pulpcore | 2,941 | pulp__pulpcore-2941 | [
"2924"
] | 6d76155fe039bc91cc6b11459f14bf823b7c962e | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -138,6 +138,8 @@ def get_base_paths_blocking():
)
return base_paths
+ if request.method.lower() == "head":
+ return HTTPOk(headers={"Content-Type": "text/html"})
base_paths = await sync_to_async(get_base_paths_blocking)()
directory_list = ["{}/".format(path) for path in base_paths]
return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
| Content app is making queries on head requests
```
(pulp) [vagrant@pulp3-source-centos8-stream-fips ~]$ nc 127.0.0.1 4444 [8/8]
> /home/vagrant/devel/pulpcore/pulpcore/content/handler.py(144)list_distributions()
-> return HTTPOk(headers={"Content-Type": "text/html"}, body=self.render_html(directory_list))
(Pdb) directory_list
['58016a04-8204-4ced-8f87-5318716e525d/']
(Pdb) base_paths
['58016a04-8204-4ced-8f87-5318716e525d']
(Pdb) request.method
'HEAD'
(Pdb) c
```
| 2022-07-06T13:23:56 |
||
pulp/pulpcore | 2,951 | pulp__pulpcore-2951 | [
"2933"
] | 1e3c570df7ee0de41443c7dbf0a4fd8bffb12cfe | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -37,31 +37,38 @@
log = logging.getLogger(__name__)
-def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRITE):
+def _validate_fs_export(content_artifacts):
"""
- Export a set of ContentArtifacts to the filesystem.
-
Args:
- path (str): A path to export the ContentArtifacts to
content_artifacts (django.db.models.QuerySet): Set of ContentArtifacts to export
Raises:
- ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
RuntimeError: If Artifacts are not downloaded or when trying to link non-fs files
"""
if content_artifacts.filter(artifact=None).exists():
raise RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
+
+def _export_to_file_system(path, relative_paths_to_artifacts, method=FS_EXPORT_METHODS.WRITE):
+ """
+ Export a set of artifacts to the filesystem.
+
+ Args:
+ path (str): A path to export the ContentArtifacts to
+ relative_paths_to_artifacts: A dict with {relative_path: artifact} mapping
+
+ Raises:
+ ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
+ """
+
if (
settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem"
and method != FS_EXPORT_METHODS.WRITE
):
raise RuntimeError(_("Only write is supported for non-filesystem storage."))
- for ca in content_artifacts.select_related("artifact").iterator():
- artifact = ca.artifact
- dest = os.path.join(path, ca.relative_path)
-
+ for relative_path, artifact in relative_paths_to_artifacts.items():
+ dest = os.path.join(path, relative_path)
os.makedirs(os.path.split(dest)[0], exist_ok=True)
if method == FS_EXPORT_METHODS.SYMLINK:
@@ -74,7 +81,6 @@ def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRI
with open(dest, "wb") as f, artifact.file as af:
for chunk in af.chunks(1024 * 1024):
f.write(chunk)
-
else:
raise RuntimeError(_("Unsupported export method '{}'.").format(method))
@@ -112,7 +118,21 @@ def fs_publication_export(exporter_pk, publication_pk):
content__in=publication.repository_version.content
)
- _export_to_file_system(exporter.path, content_artifacts)
+ _validate_fs_export(content_artifacts)
+
+ relative_path_to_artifacts = {}
+ if publication.pass_through:
+ relative_path_to_artifacts = {
+ ca.relative_path: ca.artifact
+ for ca in content_artifacts.select_related("artifact").iterator()
+ }
+
+ for pa in publication.published_artifact.select_related(
+ "content_artifact", "content_artifact__artifact"
+ ).iterator():
+ relative_path_to_artifacts[pa.relative_path] = pa.content_artifact.artifact
+
+ _export_to_file_system(exporter.path, relative_path_to_artifacts, exporter.method)
def fs_repo_version_export(exporter_pk, repo_version_pk):
@@ -141,8 +161,14 @@ def fs_repo_version_export(exporter_pk, repo_version_pk):
)
content_artifacts = ContentArtifact.objects.filter(content__in=repo_version.content)
+ _validate_fs_export(content_artifacts)
+
+ relative_path_to_artifacts = {
+ ca.relative_path: ca.artifact
+ for ca in content_artifacts.select_related("artifact").iterator()
+ }
- _export_to_file_system(exporter.path, content_artifacts)
+ _export_to_file_system(exporter.path, relative_path_to_artifacts, exporter.method)
def _get_versions_to_export(the_exporter, the_export):
| File System Exporter needs to use the correct relative path.
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
pulpcore 3.16 and above.
**Describe the bug**
Note: This is not exactly a bug since this functionality is listed as "Tech Preview", but Katello and customers now need it work. More over this is a feature that's already there in the code so marking it as bug seems appropriate to me .
File system exporter is not exporting the publications correctly.
**To Reproduce**
Steps to reproduce the behavior:
- Sync a RH repo that has packages with the following structure (example repo here is `openstack-tools`)
```bash
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/
Packages repodata
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/Packages/
b listing o p
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/Packages/b/
babel-2.3.4-1.el7ost.noarch.rpm listing
```
- Create a publication.
- Use the file system exporter to export this somewhere like `/var/lib/pulp/exports/foo` and give the publication href.
- Check the exported result
```
$ ls -l /var/lib/pulp/exports/foo
.....
babel-2.3.4-1.el7ost.noarch.rpm
.....
repodata
....
```
- The Packages directory is not retained while the primary xml in the repodata points to `Packages/b/babel-2.3.4-1.el7ost.noarch.rpm
**Expected behavior**
The Packages directory is correctly maintained when exporting.
**Additional context**
The relevant code is at https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L63 and https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L82.
The actual paths are correct in core_publishedartifact. So just need to make sure the relative_paths are correct in case of a publication.
```sql
# select relative_path from core_publishedartifact;
.....
Packages/b/babel-2.3.4-1.el7ost.noarch.rpm
....
```
| 2022-07-07T22:37:36 |
||
pulp/pulpcore | 2,965 | pulp__pulpcore-2965 | [
"2964"
] | 6287ef36d2fc20ca316e1e889296fc55072ff477 | diff --git a/pulpcore/download/factory.py b/pulpcore/download/factory.py
--- a/pulpcore/download/factory.py
+++ b/pulpcore/download/factory.py
@@ -142,7 +142,9 @@ def _make_aiohttp_session_from_remote(self):
timeout = aiohttp.ClientTimeout(
total=total, sock_connect=sock_connect, sock_read=sock_read, connect=connect
)
- return aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ return aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
def build(self, url, **kwargs):
"""
diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -166,7 +166,9 @@ def __init__(
else:
timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)
conn = aiohttp.TCPConnector({"force_close": True})
- self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ self.session = aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
self._close_session_on_finalize = True
self.auth = auth
self.proxy = proxy
| Aiohttp rewrites redirect URLs, breaking some requests to Amazon Web servers, unless you tell it not to. Let's do that.
**Version**
All
**Describe the bug**
Unless you tell it otherwise, aiotthp can potentially alter the redirect URL it receives from a remote webserver. This is legal as is the encoding of the new URL, but some web servers are temperamental and expect URL to be encoded in a particular way.
We ought to turn this off by default.
https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession.requote_redirect_url
**To Reproduce**
```
In [1]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession() as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-1-8662e4a98f2f>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 403
Content-type: text/xml
Body: <?xml version="1.0" encoding="UTF-8"?><Error><Code>AccessDenied</Code><Message>Access denied</Message></Error>
In [2]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession(requote_redirect_url=False) as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-2-32fd9ff02673>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 200
Content-type: application/x-gzip
```
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2104498
| 2022-07-13T05:44:04 |
||
pulp/pulpcore | 2,968 | pulp__pulpcore-2968 | [
"2964"
] | 03366247d888f4feb51b22c941dedce3c672bcbe | diff --git a/pulpcore/download/factory.py b/pulpcore/download/factory.py
--- a/pulpcore/download/factory.py
+++ b/pulpcore/download/factory.py
@@ -142,7 +142,9 @@ def _make_aiohttp_session_from_remote(self):
timeout = aiohttp.ClientTimeout(
total=total, sock_connect=sock_connect, sock_read=sock_read, connect=connect
)
- return aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ return aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
def build(self, url, **kwargs):
"""
diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -177,7 +177,9 @@ def __init__(
else:
timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)
conn = aiohttp.TCPConnector({"force_close": True})
- self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ self.session = aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
self._close_session_on_finalize = True
self.auth = auth
self.proxy = proxy
| Aiohttp rewrites redirect URLs, breaking some requests to Amazon Web servers, unless you tell it not to. Let's do that.
**Version**
All
**Describe the bug**
Unless you tell it otherwise, aiotthp can potentially alter the redirect URL it receives from a remote webserver. This is legal as is the encoding of the new URL, but some web servers are temperamental and expect URL to be encoded in a particular way.
We ought to turn this off by default.
https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession.requote_redirect_url
**To Reproduce**
```
In [1]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession() as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-1-8662e4a98f2f>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 403
Content-type: text/xml
Body: <?xml version="1.0" encoding="UTF-8"?><Error><Code>AccessDenied</Code><Message>Access denied</Message></Error>
In [2]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession(requote_redirect_url=False) as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-2-32fd9ff02673>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 200
Content-type: application/x-gzip
```
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2104498
| 2022-07-14T09:29:15 |
||
pulp/pulpcore | 2,969 | pulp__pulpcore-2969 | [
"2964"
] | 2d6cd6d5a81570c73c7496607e421b60f22c6000 | diff --git a/pulpcore/download/factory.py b/pulpcore/download/factory.py
--- a/pulpcore/download/factory.py
+++ b/pulpcore/download/factory.py
@@ -142,7 +142,9 @@ def _make_aiohttp_session_from_remote(self):
timeout = aiohttp.ClientTimeout(
total=total, sock_connect=sock_connect, sock_read=sock_read, connect=connect
)
- return aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ return aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
def build(self, url, **kwargs):
"""
diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -166,7 +166,9 @@ def __init__(
else:
timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)
conn = aiohttp.TCPConnector({"force_close": True})
- self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ self.session = aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
self._close_session_on_finalize = True
self.auth = auth
self.proxy = proxy
| Aiohttp rewrites redirect URLs, breaking some requests to Amazon Web servers, unless you tell it not to. Let's do that.
**Version**
All
**Describe the bug**
Unless you tell it otherwise, aiotthp can potentially alter the redirect URL it receives from a remote webserver. This is legal as is the encoding of the new URL, but some web servers are temperamental and expect URL to be encoded in a particular way.
We ought to turn this off by default.
https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession.requote_redirect_url
**To Reproduce**
```
In [1]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession() as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-1-8662e4a98f2f>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 403
Content-type: text/xml
Body: <?xml version="1.0" encoding="UTF-8"?><Error><Code>AccessDenied</Code><Message>Access denied</Message></Error>
In [2]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession(requote_redirect_url=False) as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-2-32fd9ff02673>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 200
Content-type: application/x-gzip
```
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2104498
| 2022-07-14T09:29:27 |
||
pulp/pulpcore | 2,971 | pulp__pulpcore-2971 | [
"2964"
] | bd06e922544c08b9031a892ab333f7b45a5dbdd7 | diff --git a/pulpcore/download/factory.py b/pulpcore/download/factory.py
--- a/pulpcore/download/factory.py
+++ b/pulpcore/download/factory.py
@@ -142,7 +142,9 @@ def _make_aiohttp_session_from_remote(self):
timeout = aiohttp.ClientTimeout(
total=total, sock_connect=sock_connect, sock_read=sock_read, connect=connect
)
- return aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ return aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
def build(self, url, **kwargs):
"""
diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -166,7 +166,9 @@ def __init__(
else:
timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)
conn = aiohttp.TCPConnector({"force_close": True})
- self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ self.session = aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
self._close_session_on_finalize = True
self.auth = auth
self.proxy = proxy
| Aiohttp rewrites redirect URLs, breaking some requests to Amazon Web servers, unless you tell it not to. Let's do that.
**Version**
All
**Describe the bug**
Unless you tell it otherwise, aiotthp can potentially alter the redirect URL it receives from a remote webserver. This is legal as is the encoding of the new URL, but some web servers are temperamental and expect URL to be encoded in a particular way.
We ought to turn this off by default.
https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession.requote_redirect_url
**To Reproduce**
```
In [1]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession() as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-1-8662e4a98f2f>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 403
Content-type: text/xml
Body: <?xml version="1.0" encoding="UTF-8"?><Error><Code>AccessDenied</Code><Message>Access denied</Message></Error>
In [2]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession(requote_redirect_url=False) as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-2-32fd9ff02673>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 200
Content-type: application/x-gzip
```
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2104498
| 2022-07-14T09:29:48 |
||
pulp/pulpcore | 2,972 | pulp__pulpcore-2972 | [
"2964"
] | 79b1591574b217b00d9b7517664683d931cd3250 | diff --git a/pulpcore/download/factory.py b/pulpcore/download/factory.py
--- a/pulpcore/download/factory.py
+++ b/pulpcore/download/factory.py
@@ -142,7 +142,9 @@ def _make_aiohttp_session_from_remote(self):
timeout = aiohttp.ClientTimeout(
total=total, sock_connect=sock_connect, sock_read=sock_read, connect=connect
)
- return aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ return aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
def build(self, url, **kwargs):
"""
diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -166,7 +166,9 @@ def __init__(
else:
timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)
conn = aiohttp.TCPConnector({"force_close": True})
- self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ self.session = aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
self._close_session_on_finalize = True
self.auth = auth
self.proxy = proxy
| Aiohttp rewrites redirect URLs, breaking some requests to Amazon Web servers, unless you tell it not to. Let's do that.
**Version**
All
**Describe the bug**
Unless you tell it otherwise, aiotthp can potentially alter the redirect URL it receives from a remote webserver. This is legal as is the encoding of the new URL, but some web servers are temperamental and expect URL to be encoded in a particular way.
We ought to turn this off by default.
https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession.requote_redirect_url
**To Reproduce**
```
In [1]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession() as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-1-8662e4a98f2f>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 403
Content-type: text/xml
Body: <?xml version="1.0" encoding="UTF-8"?><Error><Code>AccessDenied</Code><Message>Access denied</Message></Error>
In [2]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession(requote_redirect_url=False) as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-2-32fd9ff02673>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 200
Content-type: application/x-gzip
```
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2104498
| 2022-07-14T09:30:02 |
||
pulp/pulpcore | 2,973 | pulp__pulpcore-2973 | [
"2964"
] | 099a54d880f388b8d0024bdb2200152cf936d74e | diff --git a/pulpcore/download/factory.py b/pulpcore/download/factory.py
--- a/pulpcore/download/factory.py
+++ b/pulpcore/download/factory.py
@@ -142,7 +142,9 @@ def _make_aiohttp_session_from_remote(self):
timeout = aiohttp.ClientTimeout(
total=total, sock_connect=sock_connect, sock_read=sock_read, connect=connect
)
- return aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ return aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
def build(self, url, **kwargs):
"""
diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -166,7 +166,9 @@ def __init__(
else:
timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)
conn = aiohttp.TCPConnector({"force_close": True})
- self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ self.session = aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
self._close_session_on_finalize = True
self.auth = auth
self.proxy = proxy
| Aiohttp rewrites redirect URLs, breaking some requests to Amazon Web servers, unless you tell it not to. Let's do that.
**Version**
All
**Describe the bug**
Unless you tell it otherwise, aiotthp can potentially alter the redirect URL it receives from a remote webserver. This is legal as is the encoding of the new URL, but some web servers are temperamental and expect URL to be encoded in a particular way.
We ought to turn this off by default.
https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession.requote_redirect_url
**To Reproduce**
```
In [1]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession() as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-1-8662e4a98f2f>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 403
Content-type: text/xml
Body: <?xml version="1.0" encoding="UTF-8"?><Error><Code>AccessDenied</Code><Message>Access denied</Message></Error>
In [2]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession(requote_redirect_url=False) as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-2-32fd9ff02673>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 200
Content-type: application/x-gzip
```
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2104498
| 2022-07-14T09:30:19 |
||
pulp/pulpcore | 2,974 | pulp__pulpcore-2974 | [
"2964"
] | 9a1bebd7ac76f988c3a17b9a3e644f8999fa7639 | diff --git a/pulpcore/download/factory.py b/pulpcore/download/factory.py
--- a/pulpcore/download/factory.py
+++ b/pulpcore/download/factory.py
@@ -142,7 +142,9 @@ def _make_aiohttp_session_from_remote(self):
timeout = aiohttp.ClientTimeout(
total=total, sock_connect=sock_connect, sock_read=sock_read, connect=connect
)
- return aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ return aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
def build(self, url, **kwargs):
"""
diff --git a/pulpcore/download/http.py b/pulpcore/download/http.py
--- a/pulpcore/download/http.py
+++ b/pulpcore/download/http.py
@@ -166,7 +166,9 @@ def __init__(
else:
timeout = aiohttp.ClientTimeout(total=None, sock_connect=600, sock_read=600)
conn = aiohttp.TCPConnector({"force_close": True})
- self.session = aiohttp.ClientSession(connector=conn, timeout=timeout, headers=headers)
+ self.session = aiohttp.ClientSession(
+ connector=conn, timeout=timeout, headers=headers, requote_redirect_url=False
+ )
self._close_session_on_finalize = True
self.auth = auth
self.proxy = proxy
| Aiohttp rewrites redirect URLs, breaking some requests to Amazon Web servers, unless you tell it not to. Let's do that.
**Version**
All
**Describe the bug**
Unless you tell it otherwise, aiotthp can potentially alter the redirect URL it receives from a remote webserver. This is legal as is the encoding of the new URL, but some web servers are temperamental and expect URL to be encoded in a particular way.
We ought to turn this off by default.
https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientSession.requote_redirect_url
**To Reproduce**
```
In [1]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession() as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-1-8662e4a98f2f>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 403
Content-type: text/xml
Body: <?xml version="1.0" encoding="UTF-8"?><Error><Code>AccessDenied</Code><Message>Access denied</Message></Error>
In [2]: import aiohttp
...: import asyncio
...:
...: async def main():
...:
...: async with aiohttp.ClientSession(requote_redirect_url=False) as session:
...: async with session.get('https://releases.jfrog.io/artifactory/artifactory-pro-rpms/repodata/8c87521e43dbed223c90
...: e23922c0c4bbe7159112-primary.xml.gz') as response:
...:
...: print("Status:", response.status)
...: print("Content-type:", response.headers['content-type'])
...:
...: html = await response.text()
...: print("Body:", html)
...:
...: loop = asyncio.get_event_loop()
...: loop.run_until_complete(main())
<ipython-input-2-32fd9ff02673>:15: DeprecationWarning: There is no current event loop
loop = asyncio.get_event_loop()
Status: 200
Content-type: application/x-gzip
```
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2104498
| 2022-07-14T09:30:33 |
||
pulp/pulpcore | 2,992 | pulp__pulpcore-2992 | [
"2952"
] | 90142202f0ecc2e47dafbc2c517691fdc60b0eef | diff --git a/pulpcore/app/viewsets/custom_filters.py b/pulpcore/app/viewsets/custom_filters.py
--- a/pulpcore/app/viewsets/custom_filters.py
+++ b/pulpcore/app/viewsets/custom_filters.py
@@ -2,17 +2,21 @@
This module contains custom filters that might be used by more than one ViewSet.
"""
import re
+
+from collections import defaultdict
+from itertools import chain
from gettext import gettext as _
from urllib.parse import urlparse
from uuid import UUID
from django.urls import Resolver404, resolve
+from django.db.models import ObjectDoesNotExist
from django_filters import BaseInFilter, CharFilter, DateTimeFilter, Filter
from django_filters.fields import IsoDateTimeField
from rest_framework import serializers
from rest_framework.serializers import ValidationError as DRFValidationError
-from pulpcore.app.models import ContentArtifact, Label, RepositoryVersion
+from pulpcore.app.models import ContentArtifact, Label, RepositoryVersion, Publication
from pulpcore.app.viewsets import NamedModelViewSet
@@ -311,7 +315,7 @@ def filter(self, qs, value):
"""
Args:
qs (django.db.models.query.QuerySet): The Model queryset
- value (string): label search querry
+ value (string): label search query
Returns:
Queryset of the Models filtered by label(s)
@@ -351,3 +355,57 @@ def filter(self, qs, value):
qs = qs.filter(pulp_labels__in=labels)
return qs
+
+
+class DistributionWithContentFilter(Filter):
+ """A Filter class enabling filtering by content units served by distributions."""
+
+ def __init__(self, *args, **kwargs):
+ """Initialize a help message for the filter."""
+ kwargs.setdefault(
+ "help_text", _("Filter distributions based on the content served by them")
+ )
+ super().__init__(*args, **kwargs)
+
+ def filter(self, qs, value):
+ """Filter distributions by the provided content unit."""
+ if value is None:
+ return qs
+
+ # the same repository version can be referenced from multiple distributions; therefore,
+ # we are later appending distributions to a list value representing a single repository
+ # version
+ versions_distributions = defaultdict(list)
+
+ for dist in qs.exclude(publication=None).values("publication__repository_version", "pk"):
+ versions_distributions[dist["publication__repository_version"]].append(dist["pk"])
+
+ for dist in qs.exclude(repository_version=None).values("repository_version", "pk"):
+ if not dist.cast().SERVE_FROM_PUBLICATION:
+ versions_distributions[dist["repository_version"]].append(dist["pk"])
+
+ for dist in qs.exclude(repository=None).prefetch_related("repository__versions"):
+ if dist.cast().SERVE_FROM_PUBLICATION:
+ versions = dist.repository.versions.values_list("pk", flat=True)
+ publications = Publication.objects.filter(
+ repository_version__in=versions, complete=True
+ )
+
+ try:
+ publication = publications.select_related("repository_version").latest(
+ "repository_version", "pulp_created"
+ )
+ except ObjectDoesNotExist:
+ pass
+ else:
+ repo_version = publication.repository_version
+ versions_distributions[repo_version.pk].append(dist.pk)
+ else:
+ repo_version = dist.repository.latest_version()
+ versions_distributions[repo_version.pk].append(dist.pk)
+
+ content = NamedModelViewSet.get_resource(value)
+ versions = RepositoryVersion.objects.with_content([content.pk]).values_list("pk", flat=True)
+
+ distributions = chain.from_iterable(versions_distributions[version] for version in versions)
+ return qs.filter(pk__in=distributions)
diff --git a/pulpcore/app/viewsets/publication.py b/pulpcore/app/viewsets/publication.py
--- a/pulpcore/app/viewsets/publication.py
+++ b/pulpcore/app/viewsets/publication.py
@@ -30,6 +30,7 @@
)
from pulpcore.app.viewsets.base import DATETIME_FILTER_OPTIONS, NAME_FILTER_OPTIONS
from pulpcore.app.viewsets.custom_filters import (
+ DistributionWithContentFilter,
IsoDateTimeFilter,
LabelSelectFilter,
RepositoryVersionFilter,
@@ -327,6 +328,7 @@ class DistributionFilter(BaseFilterSet):
name = filters.CharFilter()
base_path = filters.CharFilter()
pulp_label_select = LabelSelectFilter()
+ with_content = DistributionWithContentFilter()
class Meta:
model = Distribution
| As a user, I want to know which distributions some content is in.
**Is your feature request related to a problem? Please describe.**
There is currently no easy way for clients to find which distributions provide a specific piece of content.
**Describe the solution you'd like**
Add a` ?content_in=` filter to the distributions APIs that allow users to specify a content HREF and get a list of distributions that contain the selected content.
**Additional context**
This is needed for repository management in galaxy ng.
| As this is a query to the Distribution, i'd name the filter like `with_content`, `has_content` or `serves_content` or something like that.
@lubosmj will take a crack at this.
Should we close this issue then: https://github.com/pulp/pulpcore/issues/2865 ?
> Should we close this issue then: #2865 ?
No, the ask about querying repositories is a separate (and not as obvious) one. We may however cross the distribution off that other issues name.
Does it make sense to also allow users to filter distributions by *not including* specific content? So, in the end, we will support queries like this: `distributions?content_in=!/pulp/href/123/`.
@lubosmj maybe in the future we will have a usecase for this, and it will be a separate filter, but for now i'd focus getting done `distributions?with_content=pulp/href/123/.` | 2022-07-20T14:01:48 |
|
pulp/pulpcore | 2,999 | pulp__pulpcore-2999 | [
"2998"
] | 2d68f26576aa344ade96947b81fe74772350974c | diff --git a/pulpcore/app/migrations/0093_add_info_field_repositoryversion.py b/pulpcore/app/migrations/0093_add_info_field_repositoryversion.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/migrations/0093_add_info_field_repositoryversion.py
@@ -0,0 +1,18 @@
+# Generated by Django 3.2.13 on 2022-08-02 15:11
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('core', '0092_alter_upload_options'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='repositoryversion',
+ name='info',
+ field=models.JSONField(default=dict),
+ ),
+ ]
diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -605,6 +605,7 @@ class RepositoryVersion(BaseModel):
number = models.PositiveIntegerField(db_index=True)
complete = models.BooleanField(db_index=True, default=False)
base_version = models.ForeignKey("RepositoryVersion", null=True, on_delete=models.SET_NULL)
+ info = models.JSONField(default=dict)
class Meta:
default_related_name = "versions"
| Add another field to the `RepositoryVersion` model to store additional information.
**Is your feature request related to a problem? Please describe.**
We are looking to implement an optimize sync feature in `pulp_deb`. In order to do that we want to store data in a `dict` format which contains information about the remote options used in the latest sync as well as `sha256` values for the repository files. These will be used to compare when trying to sync a repository again to see if something has changed and skip certain steps if it hasn't.
**Describe the solution you'd like**
Adding another field to `RepositoryVersion` model that can store a `.JSONField(default=dict)`. This way a repository can always look up the lastest version to get the previous data.
**Describe alternatives you've considered**
`pulp_rpm` uses their `Repository` model to save this data which works too. However there were concerns of the mutability of the data when stored this way.
Another solution would be adding a subclass of `RepositoryVersion` to `pulp_deb`. Since there is not any subclass of this model present in any plugin so far, this seems a bit overkill to just add one additional field.
**Additional context**
For context the PR for this feature in `pulp_deb`: https://github.com/pulp/pulp_deb/pull/570
| 2022-07-22T15:02:11 |
||
pulp/pulpcore | 3,001 | pulp__pulpcore-3001 | [
"3064"
] | 90142202f0ecc2e47dafbc2c517691fdc60b0eef | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -36,6 +36,8 @@
from pulpcore.constants import TASK_STATES
from pulpcore.tasking.tasks import dispatch
+from pulpcore.plugin.importexport import BaseContentResource
+
log = getLogger(__name__)
ARTIFACT_FILE = "pulpcore.app.modelresource.ArtifactResource.json"
@@ -236,13 +238,14 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
- content_count = 0
filename = f"{res_class.__module__}.{res_class.__name__}.json"
for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
- content_count += len(a_result.rows)
- resulting_content_ids.extend(
- row.object_id for row in a_result.rows if row.import_type in ("new", "update")
- )
+ if issubclass(res_class, BaseContentResource):
+ resulting_content_ids.extend(
+ row.object_id
+ for row in a_result.rows
+ if row.import_type in ("new", "update")
+ )
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
| Import attempts to add everything, not only content, to the target repository
| 2022-07-25T16:44:08 |
||
pulp/pulpcore | 3,012 | pulp__pulpcore-3012 | [
"3011"
] | e66682d89305e272c0c7a30270c6864520f2fd95 | diff --git a/pulpcore/app/management/commands/dump-permissions.py b/pulpcore/app/management/commands/dump-permissions.py
--- a/pulpcore/app/management/commands/dump-permissions.py
+++ b/pulpcore/app/management/commands/dump-permissions.py
@@ -153,7 +153,7 @@ def _get_group_object_permissions():
obj = _get_url(content_type_id, object_pk)
except Exception:
obj = f"{app_label}.{model}:{object_pk}"
- yield {"groupname": groupname, "object": obj, "permissions": permissions}
+ yield {"groupname": groupname, "object": obj, "permissions": permissions}
class Command(BaseCommand):
| django-admin dump-permissions command fails
`
django-admin dump-permissions --settings=pulpcore.app.settings
Traceback (most recent call last):
File "/var/lib/pulp/pulpvenv/bin/django-admin", line 8, in <module>
sys.exit(execute_from_command_line())
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/django/core/management/base.py", line 398, in execute
output = self.handle(*args, **options)
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/pulpcore/app/management/commands/dump-permissions.py", line 231, in handle
data["group_object_permissions"] = list(_get_group_object_permissions())
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/pulpcore/app/management/commands/dump-permissions.py", line 156, in _get_group_object_permissions
yield {"groupname": groupname, "object": obj, "permissions": permissions}
UnboundLocalError: local variable 'groupname' referenced before assignment
`
| 2022-07-26T12:36:57 |
||
pulp/pulpcore | 3,013 | pulp__pulpcore-3013 | [
"3011"
] | 2a88af3591d144d614503ba87b2299c330de3b1b | diff --git a/pulpcore/app/management/commands/dump-permissions.py b/pulpcore/app/management/commands/dump-permissions.py
--- a/pulpcore/app/management/commands/dump-permissions.py
+++ b/pulpcore/app/management/commands/dump-permissions.py
@@ -153,7 +153,7 @@ def _get_group_object_permissions():
obj = _get_url(content_type_id, object_pk)
except Exception:
obj = f"{app_label}.{model}:{object_pk}"
- yield {"groupname": groupname, "object": obj, "permissions": permissions}
+ yield {"groupname": groupname, "object": obj, "permissions": permissions}
class Command(BaseCommand):
| django-admin dump-permissions command fails
`
django-admin dump-permissions --settings=pulpcore.app.settings
Traceback (most recent call last):
File "/var/lib/pulp/pulpvenv/bin/django-admin", line 8, in <module>
sys.exit(execute_from_command_line())
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/django/core/management/base.py", line 398, in execute
output = self.handle(*args, **options)
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/pulpcore/app/management/commands/dump-permissions.py", line 231, in handle
data["group_object_permissions"] = list(_get_group_object_permissions())
File "/var/lib/pulp/pulpvenv/lib64/python3.8/site-packages/pulpcore/app/management/commands/dump-permissions.py", line 156, in _get_group_object_permissions
yield {"groupname": groupname, "object": obj, "permissions": permissions}
UnboundLocalError: local variable 'groupname' referenced before assignment
`
| 2022-07-26T13:28:49 |
||
pulp/pulpcore | 3,018 | pulp__pulpcore-3018 | [
"2933"
] | d1c47623718e316ccf14072f25d1c292376179a9 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -37,31 +37,38 @@
log = logging.getLogger(__name__)
-def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRITE):
+def _validate_fs_export(content_artifacts):
"""
- Export a set of ContentArtifacts to the filesystem.
-
Args:
- path (str): A path to export the ContentArtifacts to
content_artifacts (django.db.models.QuerySet): Set of ContentArtifacts to export
Raises:
- ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
RuntimeError: If Artifacts are not downloaded or when trying to link non-fs files
"""
if content_artifacts.filter(artifact=None).exists():
raise RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
+
+def _export_to_file_system(path, relative_paths_to_artifacts, method=FS_EXPORT_METHODS.WRITE):
+ """
+ Export a set of artifacts to the filesystem.
+
+ Args:
+ path (str): A path to export the ContentArtifacts to
+ relative_paths_to_artifacts: A dict with {relative_path: artifact} mapping
+
+ Raises:
+ ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
+ """
+
if (
settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem"
and method != FS_EXPORT_METHODS.WRITE
):
raise RuntimeError(_("Only write is supported for non-filesystem storage."))
- for ca in content_artifacts.select_related("artifact").iterator():
- artifact = ca.artifact
- dest = os.path.join(path, ca.relative_path)
-
+ for relative_path, artifact in relative_paths_to_artifacts.items():
+ dest = os.path.join(path, relative_path)
os.makedirs(os.path.split(dest)[0], exist_ok=True)
if method == FS_EXPORT_METHODS.SYMLINK:
@@ -74,7 +81,6 @@ def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRI
with open(dest, "wb") as f, artifact.file as af:
for chunk in af.chunks(1024 * 1024):
f.write(chunk)
-
else:
raise RuntimeError(_("Unsupported export method '{}'.").format(method))
@@ -112,7 +118,21 @@ def fs_publication_export(exporter_pk, publication_pk):
content__in=publication.repository_version.content
)
- _export_to_file_system(exporter.path, content_artifacts)
+ _validate_fs_export(content_artifacts)
+
+ relative_path_to_artifacts = {}
+ if publication.pass_through:
+ relative_path_to_artifacts = {
+ ca.relative_path: ca.artifact
+ for ca in content_artifacts.select_related("artifact").iterator()
+ }
+
+ for pa in publication.published_artifact.select_related(
+ "content_artifact", "content_artifact__artifact"
+ ).iterator():
+ relative_path_to_artifacts[pa.relative_path] = pa.content_artifact.artifact
+
+ _export_to_file_system(exporter.path, relative_path_to_artifacts, exporter.method)
def fs_repo_version_export(exporter_pk, repo_version_pk):
@@ -140,8 +160,14 @@ def fs_repo_version_export(exporter_pk, repo_version_pk):
)
content_artifacts = ContentArtifact.objects.filter(content__in=repo_version.content)
+ _validate_fs_export(content_artifacts)
+
+ relative_path_to_artifacts = {
+ ca.relative_path: ca.artifact
+ for ca in content_artifacts.select_related("artifact").iterator()
+ }
- _export_to_file_system(exporter.path, content_artifacts)
+ _export_to_file_system(exporter.path, relative_path_to_artifacts, exporter.method)
def _get_versions_to_export(the_exporter, the_export):
| File System Exporter needs to use the correct relative path.
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
pulpcore 3.16 and above.
**Describe the bug**
Note: This is not exactly a bug since this functionality is listed as "Tech Preview", but Katello and customers now need it work. More over this is a feature that's already there in the code so marking it as bug seems appropriate to me .
File system exporter is not exporting the publications correctly.
**To Reproduce**
Steps to reproduce the behavior:
- Sync a RH repo that has packages with the following structure (example repo here is `openstack-tools`)
```bash
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/
Packages repodata
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/Packages/
b listing o p
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/Packages/b/
babel-2.3.4-1.el7ost.noarch.rpm listing
```
- Create a publication.
- Use the file system exporter to export this somewhere like `/var/lib/pulp/exports/foo` and give the publication href.
- Check the exported result
```
$ ls -l /var/lib/pulp/exports/foo
.....
babel-2.3.4-1.el7ost.noarch.rpm
.....
repodata
....
```
- The Packages directory is not retained while the primary xml in the repodata points to `Packages/b/babel-2.3.4-1.el7ost.noarch.rpm
**Expected behavior**
The Packages directory is correctly maintained when exporting.
**Additional context**
The relevant code is at https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L63 and https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L82.
The actual paths are correct in core_publishedartifact. So just need to make sure the relative_paths are correct in case of a publication.
```sql
# select relative_path from core_publishedartifact;
.....
Packages/b/babel-2.3.4-1.el7ost.noarch.rpm
....
```
| 2022-07-27T15:22:25 |
||
pulp/pulpcore | 3,021 | pulp__pulpcore-3021 | [
"2933"
] | 925f33f2ef45a01e5d7964b98b9dcfed71207990 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -37,31 +37,38 @@
log = logging.getLogger(__name__)
-def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRITE):
+def _validate_fs_export(content_artifacts):
"""
- Export a set of ContentArtifacts to the filesystem.
-
Args:
- path (str): A path to export the ContentArtifacts to
content_artifacts (django.db.models.QuerySet): Set of ContentArtifacts to export
Raises:
- ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
RuntimeError: If Artifacts are not downloaded or when trying to link non-fs files
"""
if content_artifacts.filter(artifact=None).exists():
raise RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
+
+def _export_to_file_system(path, relative_paths_to_artifacts, method=FS_EXPORT_METHODS.WRITE):
+ """
+ Export a set of artifacts to the filesystem.
+
+ Args:
+ path (str): A path to export the ContentArtifacts to
+ relative_paths_to_artifacts: A dict with {relative_path: artifact} mapping
+
+ Raises:
+ ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
+ """
+
if (
settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem"
and method != FS_EXPORT_METHODS.WRITE
):
raise RuntimeError(_("Only write is supported for non-filesystem storage."))
- for ca in content_artifacts.select_related("artifact").iterator():
- artifact = ca.artifact
- dest = os.path.join(path, ca.relative_path)
-
+ for relative_path, artifact in relative_paths_to_artifacts.items():
+ dest = os.path.join(path, relative_path)
os.makedirs(os.path.split(dest)[0], exist_ok=True)
if method == FS_EXPORT_METHODS.SYMLINK:
@@ -74,7 +81,6 @@ def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRI
with open(dest, "wb") as f, artifact.file as af:
for chunk in af.chunks(1024 * 1024):
f.write(chunk)
-
else:
raise RuntimeError(_("Unsupported export method '{}'.").format(method))
@@ -112,7 +118,21 @@ def fs_publication_export(exporter_pk, publication_pk):
content__in=publication.repository_version.content
)
- _export_to_file_system(exporter.path, content_artifacts)
+ _validate_fs_export(content_artifacts)
+
+ relative_path_to_artifacts = {}
+ if publication.pass_through:
+ relative_path_to_artifacts = {
+ ca.relative_path: ca.artifact
+ for ca in content_artifacts.select_related("artifact").iterator()
+ }
+
+ for pa in publication.published_artifact.select_related(
+ "content_artifact", "content_artifact__artifact"
+ ).iterator():
+ relative_path_to_artifacts[pa.relative_path] = pa.content_artifact.artifact
+
+ _export_to_file_system(exporter.path, relative_path_to_artifacts, exporter.method)
def fs_repo_version_export(exporter_pk, repo_version_pk):
@@ -141,8 +161,14 @@ def fs_repo_version_export(exporter_pk, repo_version_pk):
)
content_artifacts = ContentArtifact.objects.filter(content__in=repo_version.content)
+ _validate_fs_export(content_artifacts)
+
+ relative_path_to_artifacts = {
+ ca.relative_path: ca.artifact
+ for ca in content_artifacts.select_related("artifact").iterator()
+ }
- _export_to_file_system(exporter.path, content_artifacts)
+ _export_to_file_system(exporter.path, relative_path_to_artifacts, exporter.method)
def _get_versions_to_export(the_exporter, the_export):
| File System Exporter needs to use the correct relative path.
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
pulpcore 3.16 and above.
**Describe the bug**
Note: This is not exactly a bug since this functionality is listed as "Tech Preview", but Katello and customers now need it work. More over this is a feature that's already there in the code so marking it as bug seems appropriate to me .
File system exporter is not exporting the publications correctly.
**To Reproduce**
Steps to reproduce the behavior:
- Sync a RH repo that has packages with the following structure (example repo here is `openstack-tools`)
```bash
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/
Packages repodata
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/Packages/
b listing o p
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/Packages/b/
babel-2.3.4-1.el7ost.noarch.rpm listing
```
- Create a publication.
- Use the file system exporter to export this somewhere like `/var/lib/pulp/exports/foo` and give the publication href.
- Check the exported result
```
$ ls -l /var/lib/pulp/exports/foo
.....
babel-2.3.4-1.el7ost.noarch.rpm
.....
repodata
....
```
- The Packages directory is not retained while the primary xml in the repodata points to `Packages/b/babel-2.3.4-1.el7ost.noarch.rpm
**Expected behavior**
The Packages directory is correctly maintained when exporting.
**Additional context**
The relevant code is at https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L63 and https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L82.
The actual paths are correct in core_publishedartifact. So just need to make sure the relative_paths are correct in case of a publication.
```sql
# select relative_path from core_publishedartifact;
.....
Packages/b/babel-2.3.4-1.el7ost.noarch.rpm
....
```
| 2022-07-27T18:29:00 |
||
pulp/pulpcore | 3,022 | pulp__pulpcore-3022 | [
"2933"
] | cab70f922d5f15aaf79b87a52d299b4a6ee1855d | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -37,31 +37,38 @@
log = logging.getLogger(__name__)
-def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRITE):
+def _validate_fs_export(content_artifacts):
"""
- Export a set of ContentArtifacts to the filesystem.
-
Args:
- path (str): A path to export the ContentArtifacts to
content_artifacts (django.db.models.QuerySet): Set of ContentArtifacts to export
Raises:
- ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
RuntimeError: If Artifacts are not downloaded or when trying to link non-fs files
"""
if content_artifacts.filter(artifact=None).exists():
RuntimeError(_("Cannot export artifacts that haven't been downloaded."))
+
+def _export_to_file_system(path, relative_paths_to_artifacts, method=FS_EXPORT_METHODS.WRITE):
+ """
+ Export a set of artifacts to the filesystem.
+
+ Args:
+ path (str): A path to export the ContentArtifacts to
+ relative_paths_to_artifacts: A dict with {relative_path: artifact} mapping
+
+ Raises:
+ ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
+ """
+
if (
settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem"
and method != FS_EXPORT_METHODS.WRITE
):
raise RuntimeError(_("Only write is supported for non-filesystem storage."))
- for ca in content_artifacts.select_related("artifact").iterator():
- artifact = ca.artifact
- dest = os.path.join(path, ca.relative_path)
-
+ for relative_path, artifact in relative_paths_to_artifacts.items():
+ dest = os.path.join(path, relative_path)
os.makedirs(os.path.split(dest)[0], exist_ok=True)
if method == FS_EXPORT_METHODS.SYMLINK:
@@ -74,7 +81,6 @@ def _export_to_file_system(path, content_artifacts, method=FS_EXPORT_METHODS.WRI
with open(dest, "wb") as f, artifact.file as af:
for chunk in af.chunks(1024 * 1024):
f.write(chunk)
-
else:
raise RuntimeError(_("Unsupported export method '{}'.").format(method))
@@ -112,7 +118,21 @@ def fs_publication_export(exporter_pk, publication_pk):
content__in=publication.repository_version.content
)
- _export_to_file_system(exporter.path, content_artifacts)
+ _validate_fs_export(content_artifacts)
+
+ relative_path_to_artifacts = {}
+ if publication.pass_through:
+ relative_path_to_artifacts = {
+ ca.relative_path: ca.artifact
+ for ca in content_artifacts.select_related("artifact").iterator()
+ }
+
+ for pa in publication.published_artifact.select_related(
+ "content_artifact", "content_artifact__artifact"
+ ).iterator():
+ relative_path_to_artifacts[pa.relative_path] = pa.content_artifact.artifact
+
+ _export_to_file_system(exporter.path, relative_path_to_artifacts, exporter.method)
def fs_repo_version_export(exporter_pk, repo_version_pk):
@@ -140,8 +160,14 @@ def fs_repo_version_export(exporter_pk, repo_version_pk):
)
content_artifacts = ContentArtifact.objects.filter(content__in=repo_version.content)
+ _validate_fs_export(content_artifacts)
+
+ relative_path_to_artifacts = {
+ ca.relative_path: ca.artifact
+ for ca in content_artifacts.select_related("artifact").iterator()
+ }
- _export_to_file_system(exporter.path, content_artifacts)
+ _export_to_file_system(exporter.path, relative_path_to_artifacts, exporter.method)
def _get_versions_to_export(the_exporter, the_export):
| File System Exporter needs to use the correct relative path.
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
pulpcore 3.16 and above.
**Describe the bug**
Note: This is not exactly a bug since this functionality is listed as "Tech Preview", but Katello and customers now need it work. More over this is a feature that's already there in the code so marking it as bug seems appropriate to me .
File system exporter is not exporting the publications correctly.
**To Reproduce**
Steps to reproduce the behavior:
- Sync a RH repo that has packages with the following structure (example repo here is `openstack-tools`)
```bash
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/
Packages repodata
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/Packages/
b listing o p
$ ls content/dist/rhel/server/7/7.5/x86_64/openstack-tools/10/os/Packages/b/
babel-2.3.4-1.el7ost.noarch.rpm listing
```
- Create a publication.
- Use the file system exporter to export this somewhere like `/var/lib/pulp/exports/foo` and give the publication href.
- Check the exported result
```
$ ls -l /var/lib/pulp/exports/foo
.....
babel-2.3.4-1.el7ost.noarch.rpm
.....
repodata
....
```
- The Packages directory is not retained while the primary xml in the repodata points to `Packages/b/babel-2.3.4-1.el7ost.noarch.rpm
**Expected behavior**
The Packages directory is correctly maintained when exporting.
**Additional context**
The relevant code is at https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L63 and https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L82.
The actual paths are correct in core_publishedartifact. So just need to make sure the relative_paths are correct in case of a publication.
```sql
# select relative_path from core_publishedartifact;
.....
Packages/b/babel-2.3.4-1.el7ost.noarch.rpm
....
```
| 2022-07-27T18:29:00 |
||
pulp/pulpcore | 3,031 | pulp__pulpcore-3031 | [
"3030"
] | 2d68f26576aa344ade96947b81fe74772350974c | diff --git a/pulpcore/app/views/orphans.py b/pulpcore/app/views/orphans.py
--- a/pulpcore/app/views/orphans.py
+++ b/pulpcore/app/views/orphans.py
@@ -19,6 +19,6 @@ def delete(self, request, format=None):
"""
Cleans up all the Content and Artifact orphans in the system
"""
- task = dispatch(orphan_cleanup)
+ task = dispatch(orphan_cleanup, exclusive_resources=["/pulp/api/v3/orphans/cleanup/"])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/orphans.py b/pulpcore/app/viewsets/orphans.py
--- a/pulpcore/app/viewsets/orphans.py
+++ b/pulpcore/app/viewsets/orphans.py
@@ -29,6 +29,7 @@ def cleanup(self, request):
task = dispatch(
orphan_cleanup,
+ exclusive_resources=["/pulp/api/v3/orphans/cleanup/"],
kwargs={"content_pks": content_pks, "orphan_protection_time": orphan_protection_time},
)
| Remove orphans: Repository matching query does not exist.
**Version**
```
$ rpm -qa | grep tfm-rubygem-pulp
tfm-rubygem-pulp_certguard_client-1.5.0-1.el7.noarch
tfm-rubygem-pulp_ansible_client-0.13.1-1.el7.noarch
tfm-rubygem-pulp_deb_client-2.18.0-1.el7.noarch
tfm-rubygem-pulp_rpm_client-3.17.4-1.el7.noarch
tfm-rubygem-pulp_python_client-3.6.0-1.el7.noarch
tfm-rubygem-pulp_ostree_client-2.0.0-0.1.a1.el7.noarch
tfm-rubygem-pulpcore_client-3.18.5-1.el7.noarch
tfm-rubygem-pulp_container_client-2.10.3-1.el7.noarch
tfm-rubygem-pulp_file_client-1.10.0-1.el7.noarch
$ rpm -qa | grep katello-4
tfm-rubygem-katello-4.5.0-1.el7.noarch
katello-4.5.0-1.el7.noarch
```
**Describe the bug**
Running `foreman-rake katello:delete_orphaned_content RAILS_ENV=production` creates two tasks.
One of the tasks gets stuck in _paused_ with the following error: `Repository matching query does not exist.`
**To Reproduce**
- Run
`foreman-rake katello:delete_orphaned_content RAILS_ENV=production`.
- After a few minutes run
`hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused"'`
**Expected behavior**
We expected both tasks to run successfully.
**Additional context**
We had to manually apply this patch: https://github.com/pulp/pulp_rpm/pull/2619 - before that, we had his same issue: https://github.com/pulp/pulp_rpm/issues/2459 .
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" '
[
{
"ID": "a8aae19c-7c62-4a60-ab12-935687e3723e",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-21 15:29:02 UTC",
"Ended at": null,
"Duration": "00:02:29.56437",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
```
$ http_pulp /pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/
HTTP/1.1 200 OK
Access-Control-Expose-Headers: Correlation-ID
Allow: GET, PATCH, DELETE, HEAD, OPTIONS
Connection: Keep-Alive
Content-Length: 3335
Content-Type: application/json
Correlation-ID: e1f2e082da6e401196007424799a05b5
Date: Thu, 21 Jul 2022 17:10:58 GMT
Keep-Alive: timeout=15, max=100
Referrer-Policy: same-origin
Server: gunicorn
Vary: Accept,Cookie
Via: 1.1 updates.eurotux.com
X-Content-Type-Options: nosniff
X-Frame-Options: DENY
{
"child_tasks": [],
"created_resources": [],
"error": {
"description": "Repository matching query does not exist.",
"traceback": " File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py\", line 410, in _perform_task\n result = func(*args, **kwargs)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/tasks/orphan.py\", line 66, in orphan_cleanup\n c.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 340, in cleanup_subrepos\n Variant.objects.filter(repository=subrepo).delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
},
"finished_at": "2022-07-21T15:30:11.075104Z",
"logging_cid": "ac4070d63c764578bb854f97873ba88a",
"name": "pulpcore.app.tasks.orphan.orphan_cleanup",
"parent_task": null,
"progress_reports": [
{
"code": "clean-up.content",
"done": 2000,
"message": "Clean up orphan Content",
"state": "running",
"suffix": null,
"total": 17098
}
],
"pulp_created": "2022-07-21T15:29:58.868921Z",
"pulp_href": "/pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/",
"reserved_resources_record": [],
"started_at": "2022-07-21T15:29:59.335110Z",
"state": "failed",
"task_group": null,
"worker": "/pulp/api/v3/workers/6ff07070-3e14-49b3-b508-a0c300683f28/"
}
```
edit: related to https://community.theforeman.org/t/delete-orphaned-content-fails/29494/6
https://bugzilla.redhat.com/show_bug.cgi?id=2115881
| Based o nthe traceback this seems to be a pulp-rpm issue, transferring.
`
File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
`
You said there are two tasks created and listed one, what's the other task? Is it two orphan cleanup tasks? It could be a race condition
It does not appear that we use exclusive resources to prevent more than one instance of orphan cleanup running simultaneously.
Hello @dralley,
Yes, it's another orphan cleanup task.
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -n -v
** Invoke katello:delete_orphaned_content (first_time)
** Invoke dynflow:client (first_time)
** Invoke environment (first_time)
** Execute (dry run) environment
** Execute (dry run) dynflow:client
** Execute (dry run) katello:delete_orphaned_content
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "running" or state = "paused" '
[
{
"ID": "078a72dc-8002-46bd-a988-7383f9604c42",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:51 UTC",
"Ended at": null,
"Duration": "00:00:09.569835",
"Owner": "foreman_admin",
"Task errors": [
]
},
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:00:09.974835",
"Owner": "foreman_admin",
"Task errors": [
]
}
]
```
A few minutes later:
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" or state = "running" '
[
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:02:28.169247",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
Apparently the task that was first launched got stuck.
So you're probably right and there's a race condition.
I also tried to run only one task at a time:
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -v --jobs 1
```
But I still have the same issue.
Any ideas on how to avoid (or debug) this possible race condition? | 2022-08-04T05:09:05 |
|
pulp/pulpcore | 3,032 | pulp__pulpcore-3032 | [
"3015"
] | a492fb7eaf753937891501710806184550d1d057 | diff --git a/pulpcore/app/apps.py b/pulpcore/app/apps.py
--- a/pulpcore/app/apps.py
+++ b/pulpcore/app/apps.py
@@ -212,14 +212,11 @@ def ready(self):
super().ready()
from . import checks # noqa
+ _configure_telemetry(self.apps)
+
post_migrate.connect(
_populate_system_id, sender=self, dispatch_uid="populate_system_id_identifier"
)
- post_migrate.connect(
- _populate_telemetry_periodic_task,
- sender=self,
- dispatch_uid="populate_telemetry_periodic_task_identifier",
- )
def _populate_access_policies(sender, apps, verbosity, **kwargs):
@@ -265,14 +262,24 @@ def _populate_system_id(sender, apps, verbosity, **kwargs):
SystemID().save()
-def _populate_telemetry_periodic_task(sender, apps, **kwargs):
- TaskSchedule = apps.get_model("core", "TaskSchedule")
- task_name = "pulpcore.app.tasks.telemetry.post_telemetry"
- dispatch_interval = timedelta(days=1)
- name = "Post Anonymous Telemetry Periodically"
- TaskSchedule.objects.update_or_create(
- name=name, defaults={"task_name": task_name, "dispatch_interval": dispatch_interval}
- )
+def _configure_telemetry(apps):
+ from django.db import connection
+ from pulpcore.app.util import get_telemetry_posting_url, PRODUCTION_URL
+
+ if "core_taskschedule" in connection.introspection.table_names():
+ url = get_telemetry_posting_url()
+ TaskSchedule = apps.get_model("core", "TaskSchedule")
+ task_name = "pulpcore.app.tasks.telemetry.post_telemetry"
+ dispatch_interval = timedelta(days=1)
+ name = "Post Anonymous Telemetry Periodically"
+ # Initially only dev systems receive posted data.
+ if url == PRODUCTION_URL:
+ TaskSchedule.objects.filter(task_name=task_name).delete()
+ else:
+ TaskSchedule.objects.update_or_create(
+ name=name, defaults={"task_name": task_name, "dispatch_interval": dispatch_interval}
+ )
+ connection.close()
def _populate_roles(sender, apps, verbosity, **kwargs):
diff --git a/pulpcore/app/tasks/telemetry.py b/pulpcore/app/tasks/telemetry.py
--- a/pulpcore/app/tasks/telemetry.py
+++ b/pulpcore/app/tasks/telemetry.py
@@ -7,6 +7,7 @@
from asgiref.sync import sync_to_async
from pulpcore.app.apps import pulp_plugin_configs
+from pulpcore.app.util import get_telemetry_posting_url
from pulpcore.app.models import SystemID
from pulpcore.app.models.status import ContentAppStatus
from pulpcore.app.models.task import Worker
@@ -15,10 +16,6 @@
logger = logging.getLogger(__name__)
-PRODUCTION_URL = "https://analytics-pulpproject-org.pulpproject.workers.dev/"
-DEV_URL = "https://dev-analytics-pulpproject-org.pulpproject.workers.dev/"
-
-
async def _num_hosts(qs):
hosts = set()
items = await sync_to_async(list)(qs.all())
@@ -67,20 +64,8 @@ async def _system_id():
return {"system_id": str(system_id_entry.pk)}
-def _get_posting_url():
- for app in pulp_plugin_configs():
- if ".dev" in app.version:
- return DEV_URL
-
- return PRODUCTION_URL
-
-
async def post_telemetry():
- url = _get_posting_url()
-
- if url == PRODUCTION_URL:
- return # Initially only dev systems receive posted data. If we got here, bail.
-
+ url = get_telemetry_posting_url()
data = {}
awaitables = (
diff --git a/pulpcore/app/util.py b/pulpcore/app/util.py
--- a/pulpcore/app/util.py
+++ b/pulpcore/app/util.py
@@ -19,6 +19,9 @@
# a little cache so viewset_for_model doesn't have iterate over every app every time
_model_viewset_cache = {}
+PRODUCTION_URL = "https://analytics-pulpproject-org.pulpproject.workers.dev/"
+DEV_URL = "https://dev-analytics-pulpproject-org.pulpproject.workers.dev/"
+
def get_url(model):
"""
@@ -243,3 +246,11 @@ def gpg_verify(public_keys, signature, detached_data=None):
if not verified.valid:
raise InvalidSignatureError(_("The signature is not valid."), verified=verified)
return verified
+
+
+def get_telemetry_posting_url():
+ for app in pulp_plugin_configs():
+ if ".dev" in app.version:
+ return DEV_URL
+
+ return PRODUCTION_URL
| Telemetry task gives false impression that telemetry data is being sent
I see in the logs that a telemetry task is being executed regularly (every day):
```
{
"child_tasks": [],
"created_resources": [],
"error": null,
"finished_at": "2022-07-25T21:32:09.435330Z",
"logging_cid": "",
"name": "pulpcore.app.tasks.telemetry.post_telemetry",
"parent_task": null,
"progress_reports": [],
"pulp_created": "2022-07-25T21:32:09.383372Z",
"pulp_href": "/pulp/api/v3/tasks/7f4c54d6-fcda-4006-9da1-3c6910a18171/",
"reserved_resources_record": [],
"started_at": "2022-07-25T21:32:09.419785Z",
"state": "completed",
"task_group": null,
"worker": "/pulp/api/v3/workers/c609a0d5-45cc-4756-af3c-4b50cd7520b1/"
}
```
It gave me the impression that Pulp was sending telemetry data. It was only when I dug into the code that I saw that it only does this for dev installs. Is there a way to not execute this task if telemetry data is not being sent?
| 2022-08-04T11:27:45 |
||
pulp/pulpcore | 3,033 | pulp__pulpcore-3033 | [
"3030"
] | 4aa4af0db680fb4e6f1cb0522fdfab75004899e0 | diff --git a/pulpcore/app/views/orphans.py b/pulpcore/app/views/orphans.py
--- a/pulpcore/app/views/orphans.py
+++ b/pulpcore/app/views/orphans.py
@@ -19,6 +19,6 @@ def delete(self, request, format=None):
"""
Cleans up all the Content and Artifact orphans in the system
"""
- task = dispatch(orphan_cleanup)
+ task = dispatch(orphan_cleanup, exclusive_resources=["/pulp/api/v3/orphans/cleanup/"])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/orphans.py b/pulpcore/app/viewsets/orphans.py
--- a/pulpcore/app/viewsets/orphans.py
+++ b/pulpcore/app/viewsets/orphans.py
@@ -26,6 +26,7 @@ def cleanup(self, request):
task = dispatch(
orphan_cleanup,
+ exclusive_resources=["/pulp/api/v3/orphans/cleanup/"],
kwargs={"content_pks": content_pks, "orphan_protection_time": orphan_protection_time},
)
| Remove orphans: Repository matching query does not exist.
**Version**
```
$ rpm -qa | grep tfm-rubygem-pulp
tfm-rubygem-pulp_certguard_client-1.5.0-1.el7.noarch
tfm-rubygem-pulp_ansible_client-0.13.1-1.el7.noarch
tfm-rubygem-pulp_deb_client-2.18.0-1.el7.noarch
tfm-rubygem-pulp_rpm_client-3.17.4-1.el7.noarch
tfm-rubygem-pulp_python_client-3.6.0-1.el7.noarch
tfm-rubygem-pulp_ostree_client-2.0.0-0.1.a1.el7.noarch
tfm-rubygem-pulpcore_client-3.18.5-1.el7.noarch
tfm-rubygem-pulp_container_client-2.10.3-1.el7.noarch
tfm-rubygem-pulp_file_client-1.10.0-1.el7.noarch
$ rpm -qa | grep katello-4
tfm-rubygem-katello-4.5.0-1.el7.noarch
katello-4.5.0-1.el7.noarch
```
**Describe the bug**
Running `foreman-rake katello:delete_orphaned_content RAILS_ENV=production` creates two tasks.
One of the tasks gets stuck in _paused_ with the following error: `Repository matching query does not exist.`
**To Reproduce**
- Run
`foreman-rake katello:delete_orphaned_content RAILS_ENV=production`.
- After a few minutes run
`hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused"'`
**Expected behavior**
We expected both tasks to run successfully.
**Additional context**
We had to manually apply this patch: https://github.com/pulp/pulp_rpm/pull/2619 - before that, we had his same issue: https://github.com/pulp/pulp_rpm/issues/2459 .
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" '
[
{
"ID": "a8aae19c-7c62-4a60-ab12-935687e3723e",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-21 15:29:02 UTC",
"Ended at": null,
"Duration": "00:02:29.56437",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
```
$ http_pulp /pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/
HTTP/1.1 200 OK
Access-Control-Expose-Headers: Correlation-ID
Allow: GET, PATCH, DELETE, HEAD, OPTIONS
Connection: Keep-Alive
Content-Length: 3335
Content-Type: application/json
Correlation-ID: e1f2e082da6e401196007424799a05b5
Date: Thu, 21 Jul 2022 17:10:58 GMT
Keep-Alive: timeout=15, max=100
Referrer-Policy: same-origin
Server: gunicorn
Vary: Accept,Cookie
Via: 1.1 updates.eurotux.com
X-Content-Type-Options: nosniff
X-Frame-Options: DENY
{
"child_tasks": [],
"created_resources": [],
"error": {
"description": "Repository matching query does not exist.",
"traceback": " File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py\", line 410, in _perform_task\n result = func(*args, **kwargs)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/tasks/orphan.py\", line 66, in orphan_cleanup\n c.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 340, in cleanup_subrepos\n Variant.objects.filter(repository=subrepo).delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
},
"finished_at": "2022-07-21T15:30:11.075104Z",
"logging_cid": "ac4070d63c764578bb854f97873ba88a",
"name": "pulpcore.app.tasks.orphan.orphan_cleanup",
"parent_task": null,
"progress_reports": [
{
"code": "clean-up.content",
"done": 2000,
"message": "Clean up orphan Content",
"state": "running",
"suffix": null,
"total": 17098
}
],
"pulp_created": "2022-07-21T15:29:58.868921Z",
"pulp_href": "/pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/",
"reserved_resources_record": [],
"started_at": "2022-07-21T15:29:59.335110Z",
"state": "failed",
"task_group": null,
"worker": "/pulp/api/v3/workers/6ff07070-3e14-49b3-b508-a0c300683f28/"
}
```
edit: related to https://community.theforeman.org/t/delete-orphaned-content-fails/29494/6
https://bugzilla.redhat.com/show_bug.cgi?id=2115881
| Based o nthe traceback this seems to be a pulp-rpm issue, transferring.
`
File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
`
You said there are two tasks created and listed one, what's the other task? Is it two orphan cleanup tasks? It could be a race condition
It does not appear that we use exclusive resources to prevent more than one instance of orphan cleanup running simultaneously.
Hello @dralley,
Yes, it's another orphan cleanup task.
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -n -v
** Invoke katello:delete_orphaned_content (first_time)
** Invoke dynflow:client (first_time)
** Invoke environment (first_time)
** Execute (dry run) environment
** Execute (dry run) dynflow:client
** Execute (dry run) katello:delete_orphaned_content
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "running" or state = "paused" '
[
{
"ID": "078a72dc-8002-46bd-a988-7383f9604c42",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:51 UTC",
"Ended at": null,
"Duration": "00:00:09.569835",
"Owner": "foreman_admin",
"Task errors": [
]
},
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:00:09.974835",
"Owner": "foreman_admin",
"Task errors": [
]
}
]
```
A few minutes later:
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" or state = "running" '
[
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:02:28.169247",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
Apparently the task that was first launched got stuck.
So you're probably right and there's a race condition.
I also tried to run only one task at a time:
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -v --jobs 1
```
But I still have the same issue.
Any ideas on how to avoid (or debug) this possible race condition? | 2022-08-04T17:03:31 |
|
pulp/pulpcore | 3,034 | pulp__pulpcore-3034 | [
"3030"
] | cc35536a6d606ecda57432a1218efe4c6ec02c24 | diff --git a/pulpcore/app/views/orphans.py b/pulpcore/app/views/orphans.py
--- a/pulpcore/app/views/orphans.py
+++ b/pulpcore/app/views/orphans.py
@@ -19,6 +19,6 @@ def delete(self, request, format=None):
"""
Cleans up all the Content and Artifact orphans in the system
"""
- task = dispatch(orphan_cleanup)
+ task = dispatch(orphan_cleanup, exclusive_resources=["/pulp/api/v3/orphans/cleanup/"])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/orphans.py b/pulpcore/app/viewsets/orphans.py
--- a/pulpcore/app/viewsets/orphans.py
+++ b/pulpcore/app/viewsets/orphans.py
@@ -26,6 +26,7 @@ def cleanup(self, request):
task = dispatch(
orphan_cleanup,
+ exclusive_resources=["/pulp/api/v3/orphans/cleanup/"],
kwargs={"content_pks": content_pks, "orphan_protection_time": orphan_protection_time},
)
| Remove orphans: Repository matching query does not exist.
**Version**
```
$ rpm -qa | grep tfm-rubygem-pulp
tfm-rubygem-pulp_certguard_client-1.5.0-1.el7.noarch
tfm-rubygem-pulp_ansible_client-0.13.1-1.el7.noarch
tfm-rubygem-pulp_deb_client-2.18.0-1.el7.noarch
tfm-rubygem-pulp_rpm_client-3.17.4-1.el7.noarch
tfm-rubygem-pulp_python_client-3.6.0-1.el7.noarch
tfm-rubygem-pulp_ostree_client-2.0.0-0.1.a1.el7.noarch
tfm-rubygem-pulpcore_client-3.18.5-1.el7.noarch
tfm-rubygem-pulp_container_client-2.10.3-1.el7.noarch
tfm-rubygem-pulp_file_client-1.10.0-1.el7.noarch
$ rpm -qa | grep katello-4
tfm-rubygem-katello-4.5.0-1.el7.noarch
katello-4.5.0-1.el7.noarch
```
**Describe the bug**
Running `foreman-rake katello:delete_orphaned_content RAILS_ENV=production` creates two tasks.
One of the tasks gets stuck in _paused_ with the following error: `Repository matching query does not exist.`
**To Reproduce**
- Run
`foreman-rake katello:delete_orphaned_content RAILS_ENV=production`.
- After a few minutes run
`hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused"'`
**Expected behavior**
We expected both tasks to run successfully.
**Additional context**
We had to manually apply this patch: https://github.com/pulp/pulp_rpm/pull/2619 - before that, we had his same issue: https://github.com/pulp/pulp_rpm/issues/2459 .
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" '
[
{
"ID": "a8aae19c-7c62-4a60-ab12-935687e3723e",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-21 15:29:02 UTC",
"Ended at": null,
"Duration": "00:02:29.56437",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
```
$ http_pulp /pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/
HTTP/1.1 200 OK
Access-Control-Expose-Headers: Correlation-ID
Allow: GET, PATCH, DELETE, HEAD, OPTIONS
Connection: Keep-Alive
Content-Length: 3335
Content-Type: application/json
Correlation-ID: e1f2e082da6e401196007424799a05b5
Date: Thu, 21 Jul 2022 17:10:58 GMT
Keep-Alive: timeout=15, max=100
Referrer-Policy: same-origin
Server: gunicorn
Vary: Accept,Cookie
Via: 1.1 updates.eurotux.com
X-Content-Type-Options: nosniff
X-Frame-Options: DENY
{
"child_tasks": [],
"created_resources": [],
"error": {
"description": "Repository matching query does not exist.",
"traceback": " File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py\", line 410, in _perform_task\n result = func(*args, **kwargs)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/tasks/orphan.py\", line 66, in orphan_cleanup\n c.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 340, in cleanup_subrepos\n Variant.objects.filter(repository=subrepo).delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
},
"finished_at": "2022-07-21T15:30:11.075104Z",
"logging_cid": "ac4070d63c764578bb854f97873ba88a",
"name": "pulpcore.app.tasks.orphan.orphan_cleanup",
"parent_task": null,
"progress_reports": [
{
"code": "clean-up.content",
"done": 2000,
"message": "Clean up orphan Content",
"state": "running",
"suffix": null,
"total": 17098
}
],
"pulp_created": "2022-07-21T15:29:58.868921Z",
"pulp_href": "/pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/",
"reserved_resources_record": [],
"started_at": "2022-07-21T15:29:59.335110Z",
"state": "failed",
"task_group": null,
"worker": "/pulp/api/v3/workers/6ff07070-3e14-49b3-b508-a0c300683f28/"
}
```
edit: related to https://community.theforeman.org/t/delete-orphaned-content-fails/29494/6
https://bugzilla.redhat.com/show_bug.cgi?id=2115881
| Based o nthe traceback this seems to be a pulp-rpm issue, transferring.
`
File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
`
You said there are two tasks created and listed one, what's the other task? Is it two orphan cleanup tasks? It could be a race condition
It does not appear that we use exclusive resources to prevent more than one instance of orphan cleanup running simultaneously.
Hello @dralley,
Yes, it's another orphan cleanup task.
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -n -v
** Invoke katello:delete_orphaned_content (first_time)
** Invoke dynflow:client (first_time)
** Invoke environment (first_time)
** Execute (dry run) environment
** Execute (dry run) dynflow:client
** Execute (dry run) katello:delete_orphaned_content
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "running" or state = "paused" '
[
{
"ID": "078a72dc-8002-46bd-a988-7383f9604c42",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:51 UTC",
"Ended at": null,
"Duration": "00:00:09.569835",
"Owner": "foreman_admin",
"Task errors": [
]
},
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:00:09.974835",
"Owner": "foreman_admin",
"Task errors": [
]
}
]
```
A few minutes later:
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" or state = "running" '
[
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:02:28.169247",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
Apparently the task that was first launched got stuck.
So you're probably right and there's a race condition.
I also tried to run only one task at a time:
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -v --jobs 1
```
But I still have the same issue.
Any ideas on how to avoid (or debug) this possible race condition? | 2022-08-04T17:03:44 |
|
pulp/pulpcore | 3,035 | pulp__pulpcore-3035 | [
"3030"
] | d2e7ac18053b5105ac83433c9ed07ad3c292cb89 | diff --git a/pulpcore/app/views/orphans.py b/pulpcore/app/views/orphans.py
--- a/pulpcore/app/views/orphans.py
+++ b/pulpcore/app/views/orphans.py
@@ -19,6 +19,6 @@ def delete(self, request, format=None):
"""
Cleans up all the Content and Artifact orphans in the system
"""
- task = dispatch(orphan_cleanup)
+ task = dispatch(orphan_cleanup, exclusive_resources=["/pulp/api/v3/orphans/cleanup/"])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/orphans.py b/pulpcore/app/viewsets/orphans.py
--- a/pulpcore/app/viewsets/orphans.py
+++ b/pulpcore/app/viewsets/orphans.py
@@ -29,6 +29,7 @@ def cleanup(self, request):
task = dispatch(
orphan_cleanup,
+ exclusive_resources=["/pulp/api/v3/orphans/cleanup/"],
kwargs={"content_pks": content_pks, "orphan_protection_time": orphan_protection_time},
)
| Remove orphans: Repository matching query does not exist.
**Version**
```
$ rpm -qa | grep tfm-rubygem-pulp
tfm-rubygem-pulp_certguard_client-1.5.0-1.el7.noarch
tfm-rubygem-pulp_ansible_client-0.13.1-1.el7.noarch
tfm-rubygem-pulp_deb_client-2.18.0-1.el7.noarch
tfm-rubygem-pulp_rpm_client-3.17.4-1.el7.noarch
tfm-rubygem-pulp_python_client-3.6.0-1.el7.noarch
tfm-rubygem-pulp_ostree_client-2.0.0-0.1.a1.el7.noarch
tfm-rubygem-pulpcore_client-3.18.5-1.el7.noarch
tfm-rubygem-pulp_container_client-2.10.3-1.el7.noarch
tfm-rubygem-pulp_file_client-1.10.0-1.el7.noarch
$ rpm -qa | grep katello-4
tfm-rubygem-katello-4.5.0-1.el7.noarch
katello-4.5.0-1.el7.noarch
```
**Describe the bug**
Running `foreman-rake katello:delete_orphaned_content RAILS_ENV=production` creates two tasks.
One of the tasks gets stuck in _paused_ with the following error: `Repository matching query does not exist.`
**To Reproduce**
- Run
`foreman-rake katello:delete_orphaned_content RAILS_ENV=production`.
- After a few minutes run
`hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused"'`
**Expected behavior**
We expected both tasks to run successfully.
**Additional context**
We had to manually apply this patch: https://github.com/pulp/pulp_rpm/pull/2619 - before that, we had his same issue: https://github.com/pulp/pulp_rpm/issues/2459 .
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" '
[
{
"ID": "a8aae19c-7c62-4a60-ab12-935687e3723e",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-21 15:29:02 UTC",
"Ended at": null,
"Duration": "00:02:29.56437",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
```
$ http_pulp /pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/
HTTP/1.1 200 OK
Access-Control-Expose-Headers: Correlation-ID
Allow: GET, PATCH, DELETE, HEAD, OPTIONS
Connection: Keep-Alive
Content-Length: 3335
Content-Type: application/json
Correlation-ID: e1f2e082da6e401196007424799a05b5
Date: Thu, 21 Jul 2022 17:10:58 GMT
Keep-Alive: timeout=15, max=100
Referrer-Policy: same-origin
Server: gunicorn
Vary: Accept,Cookie
Via: 1.1 updates.eurotux.com
X-Content-Type-Options: nosniff
X-Frame-Options: DENY
{
"child_tasks": [],
"created_resources": [],
"error": {
"description": "Repository matching query does not exist.",
"traceback": " File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py\", line 410, in _perform_task\n result = func(*args, **kwargs)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/tasks/orphan.py\", line 66, in orphan_cleanup\n c.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 340, in cleanup_subrepos\n Variant.objects.filter(repository=subrepo).delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
},
"finished_at": "2022-07-21T15:30:11.075104Z",
"logging_cid": "ac4070d63c764578bb854f97873ba88a",
"name": "pulpcore.app.tasks.orphan.orphan_cleanup",
"parent_task": null,
"progress_reports": [
{
"code": "clean-up.content",
"done": 2000,
"message": "Clean up orphan Content",
"state": "running",
"suffix": null,
"total": 17098
}
],
"pulp_created": "2022-07-21T15:29:58.868921Z",
"pulp_href": "/pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/",
"reserved_resources_record": [],
"started_at": "2022-07-21T15:29:59.335110Z",
"state": "failed",
"task_group": null,
"worker": "/pulp/api/v3/workers/6ff07070-3e14-49b3-b508-a0c300683f28/"
}
```
edit: related to https://community.theforeman.org/t/delete-orphaned-content-fails/29494/6
https://bugzilla.redhat.com/show_bug.cgi?id=2115881
| Based o nthe traceback this seems to be a pulp-rpm issue, transferring.
`
File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
`
You said there are two tasks created and listed one, what's the other task? Is it two orphan cleanup tasks? It could be a race condition
It does not appear that we use exclusive resources to prevent more than one instance of orphan cleanup running simultaneously.
Hello @dralley,
Yes, it's another orphan cleanup task.
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -n -v
** Invoke katello:delete_orphaned_content (first_time)
** Invoke dynflow:client (first_time)
** Invoke environment (first_time)
** Execute (dry run) environment
** Execute (dry run) dynflow:client
** Execute (dry run) katello:delete_orphaned_content
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "running" or state = "paused" '
[
{
"ID": "078a72dc-8002-46bd-a988-7383f9604c42",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:51 UTC",
"Ended at": null,
"Duration": "00:00:09.569835",
"Owner": "foreman_admin",
"Task errors": [
]
},
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:00:09.974835",
"Owner": "foreman_admin",
"Task errors": [
]
}
]
```
A few minutes later:
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" or state = "running" '
[
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:02:28.169247",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
Apparently the task that was first launched got stuck.
So you're probably right and there's a race condition.
I also tried to run only one task at a time:
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -v --jobs 1
```
But I still have the same issue.
Any ideas on how to avoid (or debug) this possible race condition? | 2022-08-04T17:03:58 |
|
pulp/pulpcore | 3,037 | pulp__pulpcore-3037 | [
"3030"
] | d3446789667c8f5bf6d7f230f4d8fbd24e6eecf1 | diff --git a/pulpcore/app/views/orphans.py b/pulpcore/app/views/orphans.py
--- a/pulpcore/app/views/orphans.py
+++ b/pulpcore/app/views/orphans.py
@@ -24,7 +24,5 @@ def delete(self, request, format=None):
"The `DELETE /pulp/api/v3/orphans/` call is deprecated. Use"
"`POST /pulp/api/v3/orphans/cleanup/` instead."
)
-
- task = dispatch(orphan_cleanup)
-
+ task = dispatch(orphan_cleanup, exclusive_resources=["/pulp/api/v3/orphans/cleanup/"])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/orphans.py b/pulpcore/app/viewsets/orphans.py
--- a/pulpcore/app/viewsets/orphans.py
+++ b/pulpcore/app/viewsets/orphans.py
@@ -26,6 +26,7 @@ def cleanup(self, request):
task = dispatch(
orphan_cleanup,
+ exclusive_resources=["/pulp/api/v3/orphans/cleanup/"],
kwargs={"content_pks": content_pks, "orphan_protection_time": orphan_protection_time},
)
| Remove orphans: Repository matching query does not exist.
**Version**
```
$ rpm -qa | grep tfm-rubygem-pulp
tfm-rubygem-pulp_certguard_client-1.5.0-1.el7.noarch
tfm-rubygem-pulp_ansible_client-0.13.1-1.el7.noarch
tfm-rubygem-pulp_deb_client-2.18.0-1.el7.noarch
tfm-rubygem-pulp_rpm_client-3.17.4-1.el7.noarch
tfm-rubygem-pulp_python_client-3.6.0-1.el7.noarch
tfm-rubygem-pulp_ostree_client-2.0.0-0.1.a1.el7.noarch
tfm-rubygem-pulpcore_client-3.18.5-1.el7.noarch
tfm-rubygem-pulp_container_client-2.10.3-1.el7.noarch
tfm-rubygem-pulp_file_client-1.10.0-1.el7.noarch
$ rpm -qa | grep katello-4
tfm-rubygem-katello-4.5.0-1.el7.noarch
katello-4.5.0-1.el7.noarch
```
**Describe the bug**
Running `foreman-rake katello:delete_orphaned_content RAILS_ENV=production` creates two tasks.
One of the tasks gets stuck in _paused_ with the following error: `Repository matching query does not exist.`
**To Reproduce**
- Run
`foreman-rake katello:delete_orphaned_content RAILS_ENV=production`.
- After a few minutes run
`hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused"'`
**Expected behavior**
We expected both tasks to run successfully.
**Additional context**
We had to manually apply this patch: https://github.com/pulp/pulp_rpm/pull/2619 - before that, we had his same issue: https://github.com/pulp/pulp_rpm/issues/2459 .
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" '
[
{
"ID": "a8aae19c-7c62-4a60-ab12-935687e3723e",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-21 15:29:02 UTC",
"Ended at": null,
"Duration": "00:02:29.56437",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
```
$ http_pulp /pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/
HTTP/1.1 200 OK
Access-Control-Expose-Headers: Correlation-ID
Allow: GET, PATCH, DELETE, HEAD, OPTIONS
Connection: Keep-Alive
Content-Length: 3335
Content-Type: application/json
Correlation-ID: e1f2e082da6e401196007424799a05b5
Date: Thu, 21 Jul 2022 17:10:58 GMT
Keep-Alive: timeout=15, max=100
Referrer-Policy: same-origin
Server: gunicorn
Vary: Accept,Cookie
Via: 1.1 updates.eurotux.com
X-Content-Type-Options: nosniff
X-Frame-Options: DENY
{
"child_tasks": [],
"created_resources": [],
"error": {
"description": "Repository matching query does not exist.",
"traceback": " File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py\", line 410, in _perform_task\n result = func(*args, **kwargs)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/tasks/orphan.py\", line 66, in orphan_cleanup\n c.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 340, in cleanup_subrepos\n Variant.objects.filter(repository=subrepo).delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
},
"finished_at": "2022-07-21T15:30:11.075104Z",
"logging_cid": "ac4070d63c764578bb854f97873ba88a",
"name": "pulpcore.app.tasks.orphan.orphan_cleanup",
"parent_task": null,
"progress_reports": [
{
"code": "clean-up.content",
"done": 2000,
"message": "Clean up orphan Content",
"state": "running",
"suffix": null,
"total": 17098
}
],
"pulp_created": "2022-07-21T15:29:58.868921Z",
"pulp_href": "/pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/",
"reserved_resources_record": [],
"started_at": "2022-07-21T15:29:59.335110Z",
"state": "failed",
"task_group": null,
"worker": "/pulp/api/v3/workers/6ff07070-3e14-49b3-b508-a0c300683f28/"
}
```
edit: related to https://community.theforeman.org/t/delete-orphaned-content-fails/29494/6
https://bugzilla.redhat.com/show_bug.cgi?id=2115881
| Based o nthe traceback this seems to be a pulp-rpm issue, transferring.
`
File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
`
You said there are two tasks created and listed one, what's the other task? Is it two orphan cleanup tasks? It could be a race condition
It does not appear that we use exclusive resources to prevent more than one instance of orphan cleanup running simultaneously.
Hello @dralley,
Yes, it's another orphan cleanup task.
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -n -v
** Invoke katello:delete_orphaned_content (first_time)
** Invoke dynflow:client (first_time)
** Invoke environment (first_time)
** Execute (dry run) environment
** Execute (dry run) dynflow:client
** Execute (dry run) katello:delete_orphaned_content
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "running" or state = "paused" '
[
{
"ID": "078a72dc-8002-46bd-a988-7383f9604c42",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:51 UTC",
"Ended at": null,
"Duration": "00:00:09.569835",
"Owner": "foreman_admin",
"Task errors": [
]
},
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:00:09.974835",
"Owner": "foreman_admin",
"Task errors": [
]
}
]
```
A few minutes later:
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" or state = "running" '
[
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:02:28.169247",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
Apparently the task that was first launched got stuck.
So you're probably right and there's a race condition.
I also tried to run only one task at a time:
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -v --jobs 1
```
But I still have the same issue.
Any ideas on how to avoid (or debug) this possible race condition? | 2022-08-04T17:28:00 |
|
pulp/pulpcore | 3,039 | pulp__pulpcore-3039 | [
"3030"
] | 1bac8e9eabd51f18f0d5c100aa81390c7c615183 | diff --git a/pulpcore/app/views/orphans.py b/pulpcore/app/views/orphans.py
--- a/pulpcore/app/views/orphans.py
+++ b/pulpcore/app/views/orphans.py
@@ -25,6 +25,6 @@ def delete(self, request, format=None):
"`POST /pulp/api/v3/orphans/cleanup/` instead."
)
- task = dispatch(orphan_cleanup)
+ task = dispatch(orphan_cleanup, exclusive_resources=["/pulp/api/v3/orphans/cleanup/"])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/orphans.py b/pulpcore/app/viewsets/orphans.py
--- a/pulpcore/app/viewsets/orphans.py
+++ b/pulpcore/app/viewsets/orphans.py
@@ -26,6 +26,7 @@ def cleanup(self, request):
task = dispatch(
orphan_cleanup,
+ exclusive_resources=["/pulp/api/v3/orphans/cleanup/"],
kwargs={"content_pks": content_pks, "orphan_protection_time": orphan_protection_time},
)
| Remove orphans: Repository matching query does not exist.
**Version**
```
$ rpm -qa | grep tfm-rubygem-pulp
tfm-rubygem-pulp_certguard_client-1.5.0-1.el7.noarch
tfm-rubygem-pulp_ansible_client-0.13.1-1.el7.noarch
tfm-rubygem-pulp_deb_client-2.18.0-1.el7.noarch
tfm-rubygem-pulp_rpm_client-3.17.4-1.el7.noarch
tfm-rubygem-pulp_python_client-3.6.0-1.el7.noarch
tfm-rubygem-pulp_ostree_client-2.0.0-0.1.a1.el7.noarch
tfm-rubygem-pulpcore_client-3.18.5-1.el7.noarch
tfm-rubygem-pulp_container_client-2.10.3-1.el7.noarch
tfm-rubygem-pulp_file_client-1.10.0-1.el7.noarch
$ rpm -qa | grep katello-4
tfm-rubygem-katello-4.5.0-1.el7.noarch
katello-4.5.0-1.el7.noarch
```
**Describe the bug**
Running `foreman-rake katello:delete_orphaned_content RAILS_ENV=production` creates two tasks.
One of the tasks gets stuck in _paused_ with the following error: `Repository matching query does not exist.`
**To Reproduce**
- Run
`foreman-rake katello:delete_orphaned_content RAILS_ENV=production`.
- After a few minutes run
`hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused"'`
**Expected behavior**
We expected both tasks to run successfully.
**Additional context**
We had to manually apply this patch: https://github.com/pulp/pulp_rpm/pull/2619 - before that, we had his same issue: https://github.com/pulp/pulp_rpm/issues/2459 .
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" '
[
{
"ID": "a8aae19c-7c62-4a60-ab12-935687e3723e",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-21 15:29:02 UTC",
"Ended at": null,
"Duration": "00:02:29.56437",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
```
$ http_pulp /pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/
HTTP/1.1 200 OK
Access-Control-Expose-Headers: Correlation-ID
Allow: GET, PATCH, DELETE, HEAD, OPTIONS
Connection: Keep-Alive
Content-Length: 3335
Content-Type: application/json
Correlation-ID: e1f2e082da6e401196007424799a05b5
Date: Thu, 21 Jul 2022 17:10:58 GMT
Keep-Alive: timeout=15, max=100
Referrer-Policy: same-origin
Server: gunicorn
Vary: Accept,Cookie
Via: 1.1 updates.eurotux.com
X-Content-Type-Options: nosniff
X-Frame-Options: DENY
{
"child_tasks": [],
"created_resources": [],
"error": {
"description": "Repository matching query does not exist.",
"traceback": " File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py\", line 410, in _perform_task\n result = func(*args, **kwargs)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/tasks/orphan.py\", line 66, in orphan_cleanup\n c.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 340, in cleanup_subrepos\n Variant.objects.filter(repository=subrepo).delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
},
"finished_at": "2022-07-21T15:30:11.075104Z",
"logging_cid": "ac4070d63c764578bb854f97873ba88a",
"name": "pulpcore.app.tasks.orphan.orphan_cleanup",
"parent_task": null,
"progress_reports": [
{
"code": "clean-up.content",
"done": 2000,
"message": "Clean up orphan Content",
"state": "running",
"suffix": null,
"total": 17098
}
],
"pulp_created": "2022-07-21T15:29:58.868921Z",
"pulp_href": "/pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/",
"reserved_resources_record": [],
"started_at": "2022-07-21T15:29:59.335110Z",
"state": "failed",
"task_group": null,
"worker": "/pulp/api/v3/workers/6ff07070-3e14-49b3-b508-a0c300683f28/"
}
```
edit: related to https://community.theforeman.org/t/delete-orphaned-content-fails/29494/6
https://bugzilla.redhat.com/show_bug.cgi?id=2115881
| Based o nthe traceback this seems to be a pulp-rpm issue, transferring.
`
File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n"
`
You said there are two tasks created and listed one, what's the other task? Is it two orphan cleanup tasks? It could be a race condition
It does not appear that we use exclusive resources to prevent more than one instance of orphan cleanup running simultaneously.
Hello @dralley,
Yes, it's another orphan cleanup task.
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -n -v
** Invoke katello:delete_orphaned_content (first_time)
** Invoke dynflow:client (first_time)
** Invoke environment (first_time)
** Execute (dry run) environment
** Execute (dry run) dynflow:client
** Execute (dry run) katello:delete_orphaned_content
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "running" or state = "paused" '
[
{
"ID": "078a72dc-8002-46bd-a988-7383f9604c42",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:51 UTC",
"Ended at": null,
"Duration": "00:00:09.569835",
"Owner": "foreman_admin",
"Task errors": [
]
},
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "running",
"Result": "pending",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:00:09.974835",
"Owner": "foreman_admin",
"Task errors": [
]
}
]
```
A few minutes later:
```
$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" or state = "running" '
[
{
"ID": "9fb6a3ee-5428-4c5e-b42f-22de91545a4d",
"Action": "Remove orphans ",
"State": "paused",
"Result": "error",
"Started at": "2022-07-26 16:35:50 UTC",
"Ended at": null,
"Duration": "00:02:28.169247",
"Owner": "foreman_admin",
"Task errors": [
"Repository matching query does not exist."
]
}
]
```
Apparently the task that was first launched got stuck.
So you're probably right and there's a race condition.
I also tried to run only one task at a time:
```
$ foreman-rake katello:delete_orphaned_content RAILS_ENV=production -v --jobs 1
```
But I still have the same issue.
Any ideas on how to avoid (or debug) this possible race condition? | 2022-08-05T10:35:39 |
|
pulp/pulpcore | 3,062 | pulp__pulpcore-3062 | [
"3063"
] | d128d92d2a35ccb889e51d6852e3cb02f6ed5b82 | diff --git a/pulpcore/app/tasks/base.py b/pulpcore/app/tasks/base.py
--- a/pulpcore/app/tasks/base.py
+++ b/pulpcore/app/tasks/base.py
@@ -35,8 +35,7 @@ def general_create(app_label, serializer_name, *args, **kwargs):
serializer_class = get_plugin_config(app_label).named_serializers[serializer_name]
serializer = serializer_class(data=data, context=context)
serializer.is_valid(raise_exception=True)
- serializer.save()
- instance = serializer_class.Meta.model.objects.get(pk=serializer.instance.pk).cast()
+ instance = serializer.save().cast()
resource = CreatedResource(content_object=instance)
resource.save()
| general_create performes one unnecessary db lookup
`Serializer.save()` returns the instance we want already. No need to fetch it again from the db.
| 2022-08-11T07:46:05 |
||
pulp/pulpcore | 3,076 | pulp__pulpcore-3076 | [
"3075"
] | 4740c551f51685916048b6fa6b1c499ba0e9a200 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -123,14 +123,17 @@ def _import_file(fpath, resource_class, retry=False):
curr_attempt=curr_attempt,
)
)
-
- # Last attempt, we raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = resource.import_data(data, raise_errors=True)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(f"FATAL import-failure importing {fpath}")
- raise
+ else:
+ break
+ else:
+ # The while condition is not fulfilled, so we proceed to the last attempt,
+ # we raise an exception on any problem. This will either succeed, or log a
+ # fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(f"FATAL import-failure importing {fpath}")
+ raise
else:
a_result = resource.import_data(data, raise_errors=True)
yield a_result
| The same content is imported multiple times in a row
Exported resources are imported `MAX_ATTEMPTS` times in a row which is unnecessary:
https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/importer.py#L107-L133
Introducing a new `yield`ing else branch would help to resolve this problem.
| 2022-08-15T14:08:27 |
||
pulp/pulpcore | 3,083 | pulp__pulpcore-3083 | [
"3081"
] | e76e23be391c1c3604f1c62063f11513d96e837d | diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py
--- a/pulpcore/app/serializers/content.py
+++ b/pulpcore/app/serializers/content.py
@@ -47,22 +47,37 @@ def __init__(self, *args, **kwargs):
@transaction.atomic
def create(self, validated_data):
"""
- Create the content and associate it with its Artifact.
+ Create the content and associate it with its Artifact, or retrieve the existing content.
Args:
validated_data (dict): Data to save to the database
"""
- artifact = validated_data.pop("artifact")
- if "relative_path" not in self.fields or self.fields["relative_path"].write_only:
- relative_path = validated_data.pop("relative_path")
+ content = self.retrieve(validated_data)
+
+ if content is not None:
+ content.touch()
else:
- relative_path = validated_data.get("relative_path")
- content = self.Meta.model.objects.create(**validated_data)
- models.ContentArtifact.objects.create(
- artifact=artifact, content=content, relative_path=relative_path
- )
+ artifact = validated_data.pop("artifact")
+ if "relative_path" not in self.fields or self.fields["relative_path"].write_only:
+ relative_path = validated_data.pop("relative_path")
+ else:
+ relative_path = validated_data.get("relative_path")
+ content = self.Meta.model.objects.create(**validated_data)
+ models.ContentArtifact.objects.create(
+ artifact=artifact, content=content, relative_path=relative_path
+ )
+
return content
+ def retrieve(self, validated_data):
+ """
+ Retrieve existing content unit if it exists, else return None.
+
+ This method is plugin-specific and implementing it for a specific content type
+ allows for uploading already existing content units of that type.
+ """
+ return None
+
class Meta:
model = models.Content
fields = BaseContentSerializer.Meta.fields + ("artifact", "relative_path")
| Reuse existing artifact and content units in case the user provided proof that they have the bits
On content upload, pulp should not reject the request for any uniqueness constraint violation, but realize that the user was able to provide the bits and therefor grant using the existing records, which means not throwing an exception and returning the href of the existing object, possibly also adding it into a repository if it was specified.
This is a generalization of https://github.com/pulp/pulp_file/issues/774
| 2022-08-16T10:59:27 |
||
pulp/pulpcore | 3,084 | pulp__pulpcore-3084 | [
"3080"
] | bc001ea6acbce15f41ca4976e6c9ce7c82fc016e | diff --git a/pulpcore/app/apps.py b/pulpcore/app/apps.py
--- a/pulpcore/app/apps.py
+++ b/pulpcore/app/apps.py
@@ -245,15 +245,19 @@ def _populate_access_policies(sender, apps, verbosity, **kwargs):
)
)
if not created and not db_access_policy.customized:
+ dirty = False
for key, value in access_policy.items():
- setattr(db_access_policy, key, value)
- db_access_policy.save()
- if verbosity >= 1:
- print(
- "Access policy for {viewset_name} updated.".format(
- viewset_name=viewset_name
+ if getattr(db_access_policy, key, None) != value:
+ setattr(db_access_policy, key, value)
+ dirty = True
+ if dirty:
+ db_access_policy.save()
+ if verbosity >= 1:
+ print(
+ "Access policy for {viewset_name} updated.".format(
+ viewset_name=viewset_name
+ )
)
- )
def _populate_system_id(sender, apps, verbosity, **kwargs):
diff --git a/pulpcore/app/migrations/0050_namespace_access_policies.py b/pulpcore/app/migrations/0050_namespace_access_policies.py
--- a/pulpcore/app/migrations/0050_namespace_access_policies.py
+++ b/pulpcore/app/migrations/0050_namespace_access_policies.py
@@ -12,7 +12,7 @@ def namespace_access_policies_up(apps, schema_editor):
def namespace_access_policies_down(apps, schema_editor):
AccessPolicy = apps.get_model('core', 'AccessPolicy')
- task_policy = AccessPolicy.objects.get(viewset_name="tasks").delete()
+ task_policy = AccessPolicy.objects.get(viewset_name="tasks")
task_policy.viewset_name = "TaskViewSet"
task_policy.save()
diff --git a/pulpcore/app/viewsets/access_policy.py b/pulpcore/app/viewsets/access_policy.py
--- a/pulpcore/app/viewsets/access_policy.py
+++ b/pulpcore/app/viewsets/access_policy.py
@@ -51,7 +51,10 @@ def reset(self, request, pk=None):
for plugin_config in pulp_plugin_configs():
for viewset_batch in plugin_config.named_viewsets.values():
for viewset in viewset_batch:
- if get_view_urlpattern(viewset) == access_policy.viewset_name:
+ if (
+ hasattr(viewset, "DEFAULT_ACCESS_POLICY")
+ and get_view_urlpattern(viewset) == access_policy.viewset_name
+ ):
default_access_policy = viewset.DEFAULT_ACCESS_POLICY
access_policy.statements = default_access_policy["statements"]
access_policy.creation_hooks = default_access_policy.get(
| pulp rpm content upload fails with "add_for_object_creator not registered" error
**Version**
```
"versions": [
{
"component": "core",
"version": "3.20.0",
"package": "pulpcore"
},
{
"component": "rpm",
"version": "3.17.9",
"package": "pulp-rpm"
}
],
```
**Describe the bug**
When trying to upload new rpm content, upload fails with following error:
```
# pulp rpm content upload --file /etc/pulp/scrape/splunk/splunkforwarder-9.0.0.1-9e907cedecb1-linux-2.6-x86_64.rpm --relative-path splunkforwarder-9.0.0.1-9e907cedecb1-linux-2.6-x86_64.rpm
Uploading file /etc/pulp/scrape/splunk/splunkforwarder-9.0.0.1-9e907cedecb1-linux-2.6-x86_64.rpm
.............................................Upload complete. Creating artifact.
Error: {"detail":"Creation hook 'add_for_object_creator' was not registered for this view set."}
```
**To Reproduce**
Install pulpcore with pulp_rpm plugin and try to upload rpm content
**Expected behavior**
No error is emitted
**Additional context**
We did upgrade from pulp 3.17.3 to 3.20.0 . It seems that the mentioned `add_for_object_creator` was removed in https://github.com/pulp/pulpcore/commit/4d50a746a30ba95ffab2dd3fbbc8d72df2c50c48 but maybe some migration scripts must be adjusted to ensure smooth upgrade with removal of that `add_for_object_creator` hook?
| Can you verify that it works once you called `pulp access-policy reset --viewset-name content/file/packages` (not entirely sure about the viewset name...)?
Long story short, it unfortunately didn't help. More details below. Btw I see the problematic `add_for_object_creator` still referenced here, can it point to the problem? https://github.com/pulp/pulpcore/blob/aefd552c6fc2ec0f281e01681b8e9da96b9f9556/pulpcore/app/migrations/0042_rbac_for_tasks.py#L32
**Details:**
I listed all policies (via `pulp access-policy list`) and spotted that the `tasks` policy mentioned the `add_for_object_creator` function:
```
{
"pulp_href": "/pulp/api/v3/access_policies/8be638d0-e6e3-4158-8bd9-29a009467a28/",
"pulp_created": "2021-08-19T13:14:17.036741Z",
"permissions_assignment": [
{
"function": "add_for_object_creator",
"parameters": null,
"permissions": [
"core.view_task",
"core.change_task",
"core.delete_task"
]
}
],
"creation_hooks": [
{
"function": "add_for_object_creator",
"parameters": null,
"permissions": [
"core.view_task",
"core.change_task",
"core.delete_task"
]
}
],
"statements": [
{
"action": [
"list"
],
"effect": "allow",
"principal": "authenticated"
},
{
"action": [
"retrieve"
],
"effect": "allow",
"condition": "has_model_or_obj_perms:core.view_task",
"principal": "authenticated"
},
{
"action": [
"destroy"
],
"effect": "allow",
"condition": "has_model_or_obj_perms:core.delete_task",
"principal": "authenticated"
},
{
"action": [
"update",
"partial_update"
],
"effect": "allow",
"condition": "has_model_or_obj_perms:core.change_task",
"principal": "authenticated"
}
],
"viewset_name": "tasks",
"customized": true,
"queryset_scoping": null
}
```
After `pulp access-policy reset --viewset-name tasks` :
```
{
"pulp_href": "/pulp/api/v3/access_policies/8be638d0-e6e3-4158-8bd9-29a009467a28/",
"pulp_created": "2021-08-19T13:14:17.036741Z",
"permissions_assignment": [
{
"function": "add_roles_for_object_creator",
"parameters": {
"roles": "core.task_owner"
}
}
],
"creation_hooks": [
{
"function": "add_roles_for_object_creator",
"parameters": {
"roles": "core.task_owner"
}
}
],
"statements": [
{
"action": [
"list"
],
"principal": "authenticated",
"effect": "allow"
},
{
"action": [
"retrieve",
"my_permissions"
],
"principal": "authenticated",
"effect": "allow",
"condition": "has_model_or_obj_perms:core.view_task"
},
{
"action": [
"destroy"
],
"principal": "authenticated",
"effect": "allow",
"condition": "has_model_or_obj_perms:core.delete_task"
},
{
"action": [
"update",
"partial_update"
],
"principal": "authenticated",
"effect": "allow",
"condition": "has_model_or_obj_perms:core.change_task"
},
{
"action": [
"purge"
],
"principal": "authenticated",
"effect": "allow"
},
{
"action": [
"list_roles",
"add_role",
"remove_role"
],
"principal": "authenticated",
"effect": "allow",
"condition": "has_model_or_obj_perms:core.manage_roles_task"
}
],
"viewset_name": "tasks",
"customized": false,
"queryset_scoping": {
"function": "scope_queryset"
}
}
```
However it seems it still didn't help:
```
# pulp rpm content upload --file /etc/pulp/scrape/splunk/splunkforwarder-9.0.0.1-9e907cedecb1-linux-2.6-x86_64.rpm --relative-path splunkforwarder-9.0.0.1-9e907cedecb1-linux-2.6-x86_64.rpm
Uploading file /etc/pulp/scrape/splunk/splunkforwarder-9.0.0.1-9e907cedecb1-linux-2.6-x86_64.rpm
.............................................Upload complete. Creating artifact.
Error: {"detail":"Creation hook 'add_for_object_creator' was not registered for this view set."}
(pulp3)
```
Btw I spotted following log entries in system logs, not sure if it is related but it also mentions `add_for_object_creator` issues.
```
Aug 16 12:33:19 pulp3-dev01 gunicorn[503584]: pulp [f3165800d91440a5abe3fd6e411565c3]: 127.0.0.1 - admin [16/Aug/2022:10:33:19 +0000] "PUT /pulp/api/v3/uploads/20083ccd-b93f-41b3-89d9-b971326aaf9a/ HTTP/1.1" 200 135 "-" "Pulp-CLI/0.14.0"
Aug 16 12:33:19 pulp3-dev01 gunicorn[503584]: pulp [7d236724f2e74ed389c0405d74c184a7]: django.request:ERROR: Internal Server Error: /pulp/api/v3/uploads/20083ccd-b93f-41b3-89d9-b971326aaf9a/commit/
Aug 16 12:33:19 pulp3-dev01 gunicorn[503584]: pulp [7d236724f2e74ed389c0405d74c184a7]: 127.0.0.1 - admin [16/Aug/2022:10:33:19 +0000] "POST /pulp/api/v3/uploads/20083ccd-b93f-41b3-89d9-b971326aaf9a/commit/ HTTP/1.1" 500 89 "-" "Pulp-CLI/0.14.0"
Aug 16 12:33:21 pulp3-dev01 pulpcore-worker[503628]: pulp [None]: pulpcore.tasking.tasks:WARNING: Dispatching scheduled task pulpcore.app.tasks.telemetry.post_telemetry failed. Creation hook 'add_for_object_creator' was not registered for this view set.
Aug 16 12:33:21 pulp3-dev01 pulpcore-worker[503645]: pulp [None]: pulpcore.tasking.tasks:WARNING: Dispatching scheduled task pulpcore.app.tasks.telemetry.post_telemetry failed. Creation hook 'add_for_object_creator' was not registered for this view set.
Aug 16 12:33:22 pulp3-dev01 gunicorn[503584]: pulp [53dbc6f538454d23b6c84737a56dd3a3]: 127.0.0.1 - admin [16/Aug/2022:10:33:22 +0000] "DELETE /pulp/api/v3/uploads/20083ccd-b93f-41b3-89d9-b971326aaf9a/ HTTP/1.1" 204 0 "-" "Pulp-CLI/0.14.0"
Aug 16 12:33:31 pulp3-dev01 pulpcore-worker[503628]: pulp [None]: pulpcore.tasking.tasks:WARNING: Dispatching scheduled task pulpcore.app.tasks.telemetry.post_telemetry failed. Creation hook 'add_for_object_creator' was not registered for this view set.
Aug 16 12:33:31 pulp3-dev01 pulpcore-worker[503645]: pulp [None]: pulpcore.tasking.tasks:WARNING: Dispatching scheduled task pulpcore.app.tasks.telemetry.post_telemetry failed. Creation hook 'add_for_object_creator' was not registered for this view set.
Aug 16 12:33:41 pulp3-dev01 pulpcore-worker[503628]: pulp [None]: pulpcore.tasking.tasks:WARNING: Dispatching scheduled task pulpcore.app.tasks.telemetry.post_telemetry failed. Creation hook 'add_for_object_creator' was not registered for this view set.
Aug 16 12:33:41 pulp3-dev01 pulpcore-worker[503645]: pulp [None]: pulpcore.tasking.tasks:WARNING: Dispatching scheduled task pulpcore.app.tasks.telemetry.post_telemetry failed. Creation hook 'add_for_object_creator' was not registered for this view set.
Aug 16 12:33:51 pulp3-dev01 pulpcore-worker[503628]: pulp [None]: pulpcore.tasking.tasks:WARNING: Dispatching scheduled task pulpcore.app.tasks.telemetry.post_telemetry failed. Creation hook 'add_for_object_creator' was not registered for this view set.
Aug 16 12:33:51 pulp3-dev01 pulpcore-worker[503645]: pulp [None]: pulpcore.tasking.tasks:WARNING: Dispatching scheduled task pulpcore.app.tasks.telemetry.post_telemetry failed. Creation hook 'add_for_object_creator' was not registered for this view set.
Aug 16 12:34:01 pulp3-dev01 pulpcore-worker[503628]: pulp [None]: pulpcore.tasking.tasks:WARNING: Dispatching scheduled task pulpcore.app.tasks.telemetry.post_telemetry failed. Creation hook 'add_for_object_creator' was not registered for this view set.
```
Can you restart the services after changing / resetting the access policy?
Thanks for the hint and after the restart (`systemctl restart pulpcore-api.service pulpcore-worker@*.service pulpcore-content.service`), everything seems to work like a charm. Thank you!
Thank you so much for finding this!
On last question (and i think, i know the answer): Did you run `pulpcore-manager migrate` after upgrading?
It seems that the post migrate hook may have missed to update that access-policy.
Yep, I tried to run `pulpcore-manager migrate` multiple times but it didn't help, so I opened this bug report :)
I recorded the solution here:
https://discourse.pulpproject.org/t/operations-fail-with-creation-hook-add-for-object-creator-was-not-registered-for-this-view-set/565 | 2022-08-16T12:53:10 |
|
pulp/pulpcore | 3,090 | pulp__pulpcore-3090 | [
"2445"
] | 534e8b84b79b4e43d47d73a31e9f1c8e53da0610 | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -212,7 +212,7 @@ async def run(self):
history.add(stage)
if i < len(stages) - 1:
if settings.PROFILE_STAGES_API:
- out_q = ProfilingQueue.make_and_record_queue(stages[i + 1], i + 1, maxsize)
+ out_q = await ProfilingQueue.make_and_record_queue(stages[i + 1], i + 1, maxsize)
else:
out_q = asyncio.Queue(maxsize=maxsize)
else:
diff --git a/pulpcore/plugin/stages/profiler.py b/pulpcore/plugin/stages/profiler.py
--- a/pulpcore/plugin/stages/profiler.py
+++ b/pulpcore/plugin/stages/profiler.py
@@ -1,3 +1,4 @@
+from asgiref.sync import sync_to_async
from asyncio import Queue
import pathlib
import time
@@ -66,15 +67,22 @@ def put_nowait(self, item):
except KeyError:
pass
else:
- service_time = now - item.extra_data["last_get_time"]
- sql = (
- "INSERT INTO traffic (uuid, waiting_time, service_time) VALUES ("
- "'{uuid}','{waiting_time}','{service_time}')"
- )
- formatted_sql = sql.format(
- uuid=self.stage_uuid, waiting_time=last_waiting_time, service_time=service_time
- )
- CONN.cursor().execute(formatted_sql)
+ last_get_time = item.extra_data["last_get_time"]
+ # the extra_data dictionary might be initialized from within the plugin as
+ # 'defaultdict' returning empty lists by default; with this if statement,
+ # we prevent errors like "(unsupported operand type(s) for -: 'float' and 'list')"
+ if last_get_time or last_get_time == 0:
+ service_time = now - last_get_time
+ sql = (
+ "INSERT INTO traffic (uuid, waiting_time, service_time) VALUES ("
+ "'{uuid}','{waiting_time}','{service_time}')"
+ )
+ formatted_sql = sql.format(
+ uuid=self.stage_uuid,
+ waiting_time=last_waiting_time,
+ service_time=service_time,
+ )
+ CONN.cursor().execute(formatted_sql)
interarrival_time = now - self.last_arrival_time
sql = (
@@ -92,7 +100,7 @@ def put_nowait(self, item):
return super().put_nowait(item)
@staticmethod
- def make_and_record_queue(stage, num, maxsize):
+ async def make_and_record_queue(stage, num, maxsize):
"""
Create a ProfileQueue that is associated with the stage it feeds and record it in sqlite3.
@@ -105,7 +113,7 @@ def make_and_record_queue(stage, num, maxsize):
ProfilingQueue: The configured ProfilingQueue that was also recorded in the db.
"""
if CONN is None:
- create_profile_db_and_connection()
+ await create_profile_db_and_connection()
stage_id = uuid.uuid4()
stage_name = ".".join([stage.__class__.__module__, stage.__class__.__name__])
sql = "INSERT INTO stages (uuid, name, num) VALUES ('{uuid}','{stage}','{num}')"
@@ -116,7 +124,7 @@ def make_and_record_queue(stage, num, maxsize):
return in_q
-def create_profile_db_and_connection():
+async def create_profile_db_and_connection():
"""
Create a profile db from this tasks UUID and a sqlite3 connection to that databases.
@@ -139,7 +147,7 @@ def create_profile_db_and_connection():
"""
debug_data_dir = "/var/lib/pulp/debug/"
pathlib.Path(debug_data_dir).mkdir(parents=True, exist_ok=True)
- current_task = Task.current()
+ current_task = await sync_to_async(Task.current)()
if current_task:
db_path = debug_data_dir + str(current_task.pk)
else:
| When profiling pipeline stages, an error is raised
Based on https://docs.pulpproject.org/pulpcore/en/latest/plugins/api-reference/profiling.html?highlight=profiling, I have enabled profiling by setting `PROFILE_STAGES_API = True`. When I executed the sync pipeline, the following error was raised:
```
"description": "You cannot call this from an async context - use a thread or sync_to_async.",
"traceback": " File \"/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py\", line 442, in _perform_task\n result = func(*args, **kwargs)\n File \"/home/vagrant/devel/pulp_ostree/pulp_ostree/app/tasks/synchronizing.py\", line 71, in synchronize\n return dv.create()\n File \"/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/declarative_version.py\", line 161, in create\n loop.run_until_complete(pipeline)\n File \"/usr/lib64/python3.10/asyncio/base_events.py\", line 641, in run_until_complete\n return future.result()\n File \"/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py\", line 215, in create_pipeline\n out_q = ProfilingQueue.make_and_record_queue(stages[i + 1], i + 1, maxsize)\n File \"/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/profiler.py\", line 108, in make_and_record_queue\n create_profile_db_and_connection()\n File \"/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/profiler.py\", line 142, in create_profile_db_and_connection\n current_task = Task.current()\n File \"/home/vagrant/devel/pulpcore/pulpcore/app/models/task.py\", line 230, in current\n task = Task.objects.get(pk=task_id)\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/manager.py\", line 85, in manager_method\n return getattr(self.get_queryset(), name)(*args, **kwargs)\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py\", line 431, in get\n num = len(clone)\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py\", line 262, in __len__\n self._fetch_all()\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py\", line 1324, in _fetch_all\n self._result_cache = list(self._iterable_class(self))\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py\", line 51, in __iter__\n results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/sql/compiler.py\", line 1173, in execute_sql\n cursor = self.connection.cursor()\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/utils/asyncio.py\", line 31, in inner\n raise SynchronousOnlyOperation(message)\n"
```
| The following changes helped me to resolve the error:
```diff
diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
index 0fd2a3ab9..444e087ef 100644
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -212,7 +212,7 @@ async def create_pipeline(stages, maxsize=1):
history.add(stage)
if i < len(stages) - 1:
if settings.PROFILE_STAGES_API:
- out_q = ProfilingQueue.make_and_record_queue(stages[i + 1], i + 1, maxsize)
+ out_q = await ProfilingQueue.make_and_record_queue(stages[i + 1], i + 1, maxsize)
else:
out_q = asyncio.Queue(maxsize=maxsize)
else:
diff --git a/pulpcore/plugin/stages/profiler.py b/pulpcore/plugin/stages/profiler.py
index de42ba63f..2f854013d 100644
--- a/pulpcore/plugin/stages/profiler.py
+++ b/pulpcore/plugin/stages/profiler.py
@@ -1,3 +1,5 @@
+from asgiref.sync import sync_to_async
+
from asyncio import Queue
import pathlib
import time
@@ -92,7 +94,7 @@ class ProfilingQueue(Queue):
return super().put_nowait(item)
@staticmethod
- def make_and_record_queue(stage, num, maxsize):
+ async def make_and_record_queue(stage, num, maxsize):
"""
Create a ProfileQueue that is associated with the stage it feeds and record it in sqlite3.
@@ -105,7 +107,7 @@ class ProfilingQueue(Queue):
ProfilingQueue: The configured ProfilingQueue that was also recorded in the db.
"""
if CONN is None:
- create_profile_db_and_connection()
+ await create_profile_db_and_connection()
stage_id = uuid.uuid4()
stage_name = ".".join([stage.__class__.__module__, stage.__class__.__name__])
sql = "INSERT INTO stages (uuid, name, num) VALUES ('{uuid}','{stage}','{num}')"
@@ -116,7 +118,7 @@ class ProfilingQueue(Queue):
return in_q
-def create_profile_db_and_connection():
+async def create_profile_db_and_connection():
"""
Create a profile db from this tasks UUID and a sqlite3 connection to that databases.
@@ -139,7 +141,7 @@ def create_profile_db_and_connection():
"""
debug_data_dir = "/var/lib/pulp/debug/"
pathlib.Path(debug_data_dir).mkdir(parents=True, exist_ok=True)
- current_task = Task.current()
+ current_task = await sync_to_async(Task.current)()
if current_task:
db_path = debug_data_dir + str(current_task.pk)
else:
``` | 2022-08-18T08:30:09 |
|
pulp/pulpcore | 3,092 | pulp__pulpcore-3092 | [
"3091"
] | 534e8b84b79b4e43d47d73a31e9f1c8e53da0610 | diff --git a/pulpcore/app/migrations/0094_protect_repository_content.py b/pulpcore/app/migrations/0094_protect_repository_content.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/migrations/0094_protect_repository_content.py
@@ -0,0 +1,29 @@
+# Generated by Django 3.2.15 on 2022-08-18 08:10
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('core', '0093_add_info_field_repositoryversion'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='repositorycontent',
+ name='content',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='version_memberships', to='core.content'),
+ ),
+ migrations.AlterField(
+ model_name='repositorycontent',
+ name='version_added',
+ field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, related_name='added_memberships', to='core.repositoryversion'),
+ ),
+ migrations.AlterField(
+ model_name='repositorycontent',
+ name='version_removed',
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.RESTRICT, related_name='removed_memberships', to='core.repositoryversion'),
+ ),
+ ]
diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -521,15 +521,21 @@ class RepositoryContent(BaseModel):
Content.
"""
+ # Content can only be removed once it's no longer referenced by any repository
content = models.ForeignKey(
- "Content", on_delete=models.CASCADE, related_name="version_memberships"
+ "Content", on_delete=models.PROTECT, related_name="version_memberships"
)
repository = models.ForeignKey(Repository, on_delete=models.CASCADE)
+ # version_added and version_removed need to be properly handled in _squash before the version
+ # can be deleted
version_added = models.ForeignKey(
- "RepositoryVersion", related_name="added_memberships", on_delete=models.CASCADE
+ "RepositoryVersion", related_name="added_memberships", on_delete=models.RESTRICT
)
version_removed = models.ForeignKey(
- "RepositoryVersion", null=True, related_name="removed_memberships", on_delete=models.CASCADE
+ "RepositoryVersion",
+ null=True,
+ related_name="removed_memberships",
+ on_delete=models.RESTRICT,
)
class Meta:
| Add database layer protection to RepositoryContent
| 2022-08-18T09:53:24 |
||
pulp/pulpcore | 3,093 | pulp__pulpcore-3093 | [
"2811"
] | 39e56c85fee53623c7e9e658848a7c6a7ff339c3 | diff --git a/pulpcore/app/mime_types.py b/pulpcore/app/mime_types.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/mime_types.py
@@ -0,0 +1,197 @@
+import os
+
+# The mapping was retrieved from the following sources:
+# 1. https://docs.python.org/3/library/mimetypes.html#mimetypes.types_map
+# 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types
+TYPES_MAP = {
+ ".3g2": "audio/3gpp2",
+ ".3gp": "audio/3gpp",
+ ".3gpp": "audio/3gpp",
+ ".3gpp2": "audio/3gpp2",
+ ".7z": "application/x-7z-compressed",
+ ".a": "application/octet-stream",
+ ".aac": "audio/aac",
+ ".abw": "application/x-abiword",
+ ".adts": "audio/aac",
+ ".ai": "application/postscript",
+ ".aif": "audio/x-aiff",
+ ".aifc": "audio/x-aiff",
+ ".aiff": "audio/x-aiff",
+ ".arc": "application/x-freearc",
+ ".ass": "audio/aac",
+ ".au": "audio/basic",
+ ".avif": "image/avif",
+ ".azw": "application/vnd.amazon.ebook",
+ ".bat": "text/plain",
+ ".bcpio": "application/x-bcpio",
+ ".bin": "application/octet-stream",
+ ".bmp": "image/x-ms-bmp",
+ ".bz": "application/x-bzip",
+ ".bz2": "application/x-bzip2",
+ ".c": "text/plain",
+ ".cda": "application/x-cdf",
+ ".cdf": "application/x-netcdf",
+ ".cpio": "application/x-cpio",
+ ".csh": "application/x-csh",
+ ".css": "text/css",
+ ".csv": "text/csv",
+ ".dll": "application/octet-stream",
+ ".doc": "application/msword",
+ ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ ".dot": "application/msword",
+ ".dvi": "application/x-dvi",
+ ".eml": "message/rfc822",
+ ".eot": "application/vnd.ms-fontobject",
+ ".eps": "application/postscript",
+ ".epub": "application/epub+zip",
+ ".etx": "text/x-setext",
+ ".exe": "application/octet-stream",
+ ".gif": "image/gif",
+ ".gtar": "application/x-gtar",
+ ".gz": "application/gzip",
+ ".gzip": "application/gzip",
+ ".h": "text/plain",
+ ".h5": "application/x-hdf5",
+ ".hdf": "application/x-hdf",
+ ".heic": "image/heic",
+ ".heif": "image/heif",
+ ".htm": "text/html",
+ ".html": "text/html",
+ ".ico": "image/vnd.microsoft.icon",
+ ".ics": "text/calendar",
+ ".ief": "image/ief",
+ ".jar": "application/java-archive",
+ ".jpe": "image/jpeg",
+ ".jpeg": "image/jpeg",
+ ".jpg": "image/jpeg",
+ ".js": "application/javascript",
+ ".json": "application/json",
+ ".jsonld": "application/ld+json",
+ ".ksh": "text/plain",
+ ".latex": "application/x-latex",
+ ".loas": "audio/aac",
+ ".m1v": "video/mpeg",
+ ".m3u": "application/vnd.apple.mpegurl",
+ ".m3u8": "application/vnd.apple.mpegurl",
+ ".man": "application/x-troff-man",
+ ".me": "application/x-troff-me",
+ ".mht": "message/rfc822",
+ ".mhtml": "message/rfc822",
+ ".mid": "audio/x-midi",
+ ".midi": "audio/x-midi",
+ ".mif": "application/x-mif",
+ ".mjs": "application/javascript",
+ ".mov": "video/quicktime",
+ ".movie": "video/x-sgi-movie",
+ ".mp2": "audio/mpeg",
+ ".mp3": "audio/mpeg",
+ ".mp4": "video/mp4",
+ ".mpa": "video/mpeg",
+ ".mpe": "video/mpeg",
+ ".mpeg": "video/mpeg",
+ ".mpg": "video/mpeg",
+ ".mpkg": "application/vnd.apple.installer+xml",
+ ".ms": "application/x-troff-ms",
+ ".nc": "application/x-netcdf",
+ ".nws": "message/rfc822",
+ ".o": "application/octet-stream",
+ ".obj": "application/octet-stream",
+ ".oda": "application/oda",
+ ".odp": "application/vnd.oasis.opendocument.presentation",
+ ".ods": "application/vnd.oasis.opendocument.spreadsheet",
+ ".odt": "application/vnd.oasis.opendocument.text",
+ ".oga": "audio/ogg",
+ ".ogv": "video/ogg",
+ ".ogx": "application/ogg",
+ ".opus": "audio/opus",
+ ".otf": "font/otf",
+ ".p12": "application/x-pkcs12",
+ ".p7c": "application/pkcs7-mime",
+ ".pbm": "image/x-portable-bitmap",
+ ".pdf": "application/pdf",
+ ".pfx": "application/x-pkcs12",
+ ".pgm": "image/x-portable-graymap",
+ ".php": "application/x-httpd-php",
+ ".pl": "text/plain",
+ ".png": "image/png",
+ ".pnm": "image/x-portable-anymap",
+ ".pot": "application/vnd.ms-powerpoint",
+ ".ppa": "application/vnd.ms-powerpoint",
+ ".ppm": "image/x-portable-pixmap",
+ ".pps": "application/vnd.ms-powerpoint",
+ ".ppt": "application/vnd.ms-powerpoint",
+ ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ ".ps": "application/postscript",
+ ".pwz": "application/vnd.ms-powerpoint",
+ ".py": "text/x-python",
+ ".pyc": "application/x-python-code",
+ ".pyo": "application/x-python-code",
+ ".qt": "video/quicktime",
+ ".ra": "audio/x-pn-realaudio",
+ ".ram": "application/x-pn-realaudio",
+ ".rar": "application/vnd.rar",
+ ".ras": "image/x-cmu-raster",
+ ".rdf": "application/xml",
+ ".rgb": "image/x-rgb",
+ ".roff": "application/x-troff",
+ ".rtf": "application/rtf",
+ ".rtx": "text/richtext",
+ ".sgm": "text/x-sgml",
+ ".sgml": "text/x-sgml",
+ ".sh": "application/x-sh",
+ ".shar": "application/x-shar",
+ ".snd": "audio/basic",
+ ".so": "application/octet-stream",
+ ".src": "application/x-wais-source",
+ ".sv4cpio": "application/x-sv4cpio",
+ ".sv4crc": "application/x-sv4crc",
+ ".svg": "image/svg+xml",
+ ".swf": "application/x-shockwave-flash",
+ ".t": "application/x-troff",
+ ".tar": "application/x-tar",
+ ".tcl": "application/x-tcl",
+ ".tex": "application/x-tex",
+ ".texi": "application/x-texinfo",
+ ".texinfo": "application/x-texinfo",
+ ".tif": "image/tiff",
+ ".tiff": "image/tiff",
+ ".tr": "application/x-troff",
+ ".ts": "video/mp2t",
+ ".tsv": "text/tab-separated-values",
+ ".ttf": "font/ttf",
+ ".txt": "text/plain",
+ ".ustar": "application/x-ustar",
+ ".vcf": "text/x-vcard",
+ ".vsd": "application/vnd.visio",
+ ".wasm": "application/wasm",
+ ".wav": "audio/x-wav",
+ ".weba": "audio/webm",
+ ".webm": "video/webm",
+ ".webmanifest": "application/manifest+json",
+ ".webp": "image/webp",
+ ".wiz": "application/msword",
+ ".woff": "font/woff",
+ ".woff2": "font/woff2",
+ ".wsdl": "application/xml",
+ ".xbm": "image/x-xbitmap",
+ ".xhtml": "application/xhtml+xml",
+ ".xlb": "application/vnd.ms-excel",
+ ".xls": "application/vnd.ms-excel",
+ ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ ".xml": "text/xml",
+ ".xpdl": "application/xml",
+ ".xpm": "image/x-xpixmap",
+ ".xsl": "application/xml",
+ ".xul": "application/vnd.mozilla.xul+xml",
+ ".xwd": "image/x-xwindowdump",
+ ".xz": "application/x-xz",
+ ".zip": "application/zip",
+ ".zst": "application/zstd",
+ ".zstd": "application/zstd",
+}
+
+
+def get_type(url):
+ """Return the mime-type of a file specified by its URL."""
+ _, ext = os.path.splitext(url)
+ return TYPES_MAP.get(ext.lower())
diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -1,6 +1,5 @@
import asyncio
import logging
-import mimetypes
import os
import re
from gettext import gettext as _
@@ -44,6 +43,8 @@
Remote,
RemoteArtifact,
)
+from pulpcore.app import mime_types # noqa: E402: module level not at top of file
+
from pulpcore.exceptions import UnsupportedDigestValidationError # noqa: E402
from jinja2 import Template # noqa: E402: module level not at top of file
@@ -334,7 +335,7 @@ def response_headers(path):
Returns:
headers (dict): A dictionary of response headers.
"""
- content_type, encoding = mimetypes.guess_type(path)
+ content_type = mime_types.get_type(path)
headers = {}
if content_type:
headers["Content-Type"] = content_type
| Wrong ContentType header returned for some files via content app
**Version**
All versions
**Describe the bug**
.xml.gz Files being served by the content app have a `ContentType` header of `text/xml` instead of `application/gzip`.
**To Reproduce**
Sync and publish any RPM repo, use HTTPie to request one of the files, observe the headers
**Expected behavior**
`ContentType` should be `application/gzip`, the type of the outer file rather than that of the decompressed contents.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2064092
| This is caused by an invalid value returned from `mimetypes.guess_type` (https://docs.python.org/3/library/mimetypes.html#mimetypes.guess_type).
Seems like the `mimetypes` module fetches types from the system (https://stackoverflow.com/a/40540381/3907906).
Should the mimetype not be determined by the content being served? Or maybe the published artifact. Guessing the mimetype from the artifact is probably not a good idea. Also when on cloud storage, it means that we need to load the artifact into pulp for guessing. That is even worse.
There is also a good discussion about the similar problem of serving `.xml.gz` files in pulp2: https://pulp.plan.io/issues/1781#note-24. Adjacent comments reference the PR that resolves the issue.
Their workflow however does not work in our use case. In addition to that, I cannot simply add a new mime type to the existing database of types like this: `mimetypes.add_type("application/gzip", ".xml.gz")`. The type is still not correctly detected and it returns `('text/xml', 'gzip')` all the time.
Loading artifacts from cloud storage is also a no-go for me.
Actually, it looks like we are guessing the mime-type just by means of the filename requested. So it is not a data from cloud issue. But still we are guessing where the content object could tell us the right answer.
> Should the mimetype not be determined by the content being served?
Currently, the type is determined by a file extension: https://github.com/pulp/pulpcore/blob/01557ca70f0863ec006977fb9bca6f8af8285dd6/pulpcore/content/handler.py#L337
| 2022-08-18T14:15:50 |
|
pulp/pulpcore | 3,102 | pulp__pulpcore-3102 | [
"3015"
] | e0f9ef465f9b64272d62108370200609d343efa6 | diff --git a/pulpcore/app/apps.py b/pulpcore/app/apps.py
--- a/pulpcore/app/apps.py
+++ b/pulpcore/app/apps.py
@@ -212,14 +212,11 @@ def ready(self):
super().ready()
from . import checks # noqa
+ _configure_telemetry(self.apps)
+
post_migrate.connect(
_populate_system_id, sender=self, dispatch_uid="populate_system_id_identifier"
)
- post_migrate.connect(
- _populate_telemetry_periodic_task,
- sender=self,
- dispatch_uid="populate_telemetry_periodic_task_identifier",
- )
def _populate_access_policies(sender, apps, verbosity, **kwargs):
@@ -265,14 +262,24 @@ def _populate_system_id(sender, apps, verbosity, **kwargs):
SystemID().save()
-def _populate_telemetry_periodic_task(sender, apps, **kwargs):
- TaskSchedule = apps.get_model("core", "TaskSchedule")
- task_name = "pulpcore.app.tasks.telemetry.post_telemetry"
- dispatch_interval = timedelta(days=1)
- name = "Post Anonymous Telemetry Periodically"
- TaskSchedule.objects.update_or_create(
- name=name, defaults={"task_name": task_name, "dispatch_interval": dispatch_interval}
- )
+def _configure_telemetry(apps):
+ from django.db import connection
+ from pulpcore.app.util import get_telemetry_posting_url, PRODUCTION_URL
+
+ if "core_taskschedule" in connection.introspection.table_names():
+ url = get_telemetry_posting_url()
+ TaskSchedule = apps.get_model("core", "TaskSchedule")
+ task_name = "pulpcore.app.tasks.telemetry.post_telemetry"
+ dispatch_interval = timedelta(days=1)
+ name = "Post Anonymous Telemetry Periodically"
+ # Initially only dev systems receive posted data.
+ if url == PRODUCTION_URL:
+ TaskSchedule.objects.filter(task_name=task_name).delete()
+ else:
+ TaskSchedule.objects.update_or_create(
+ name=name, defaults={"task_name": task_name, "dispatch_interval": dispatch_interval}
+ )
+ connection.close()
def _populate_roles(sender, apps, verbosity, **kwargs):
diff --git a/pulpcore/app/tasks/telemetry.py b/pulpcore/app/tasks/telemetry.py
--- a/pulpcore/app/tasks/telemetry.py
+++ b/pulpcore/app/tasks/telemetry.py
@@ -7,6 +7,7 @@
from asgiref.sync import sync_to_async
from pulpcore.app.apps import pulp_plugin_configs
+from pulpcore.app.util import get_telemetry_posting_url
from pulpcore.app.models import SystemID
from pulpcore.app.models.status import ContentAppStatus
from pulpcore.app.models.task import Worker
@@ -15,10 +16,6 @@
logger = logging.getLogger(__name__)
-PRODUCTION_URL = "https://analytics-pulpproject-org.pulpproject.workers.dev/"
-DEV_URL = "https://dev-analytics-pulpproject-org.pulpproject.workers.dev/"
-
-
async def _num_hosts(qs):
hosts = set()
items = await sync_to_async(list)(qs.all())
@@ -67,20 +64,8 @@ async def _system_id():
return {"system_id": str(system_id_entry.pk)}
-def _get_posting_url():
- for app in pulp_plugin_configs():
- if ".dev" in app.version:
- return DEV_URL
-
- return PRODUCTION_URL
-
-
async def post_telemetry():
- url = _get_posting_url()
-
- if url == PRODUCTION_URL:
- return # Initially only dev systems receive posted data. If we got here, bail.
-
+ url = get_telemetry_posting_url()
data = {}
awaitables = (
diff --git a/pulpcore/app/util.py b/pulpcore/app/util.py
--- a/pulpcore/app/util.py
+++ b/pulpcore/app/util.py
@@ -16,6 +16,9 @@
# a little cache so viewset_for_model doesn't have iterate over every app every time
_model_viewset_cache = {}
+PRODUCTION_URL = "https://analytics-pulpproject-org.pulpproject.workers.dev/"
+DEV_URL = "https://dev-analytics-pulpproject-org.pulpproject.workers.dev/"
+
def get_url(model):
"""
@@ -204,3 +207,11 @@ def verify_signature(filepath, public_key, detached_data=None):
raise InvalidSignatureError(
f"The file '{filepath}' does not contain a valid signature."
)
+
+
+def get_telemetry_posting_url():
+ for app in pulp_plugin_configs():
+ if ".dev" in app.version:
+ return DEV_URL
+
+ return PRODUCTION_URL
| Telemetry task gives false impression that telemetry data is being sent
I see in the logs that a telemetry task is being executed regularly (every day):
```
{
"child_tasks": [],
"created_resources": [],
"error": null,
"finished_at": "2022-07-25T21:32:09.435330Z",
"logging_cid": "",
"name": "pulpcore.app.tasks.telemetry.post_telemetry",
"parent_task": null,
"progress_reports": [],
"pulp_created": "2022-07-25T21:32:09.383372Z",
"pulp_href": "/pulp/api/v3/tasks/7f4c54d6-fcda-4006-9da1-3c6910a18171/",
"reserved_resources_record": [],
"started_at": "2022-07-25T21:32:09.419785Z",
"state": "completed",
"task_group": null,
"worker": "/pulp/api/v3/workers/c609a0d5-45cc-4756-af3c-4b50cd7520b1/"
}
```
It gave me the impression that Pulp was sending telemetry data. It was only when I dug into the code that I saw that it only does this for dev installs. Is there a way to not execute this task if telemetry data is not being sent?
| 2022-08-18T19:05:05 |
||
pulp/pulpcore | 3,112 | pulp__pulpcore-3112 | [
"3111"
] | bc001ea6acbce15f41ca4976e6c9ce7c82fc016e | diff --git a/pulpcore/plugin/stages/content_stages.py b/pulpcore/plugin/stages/content_stages.py
--- a/pulpcore/plugin/stages/content_stages.py
+++ b/pulpcore/plugin/stages/content_stages.py
@@ -165,23 +165,26 @@ def process_batch():
# on select-for-update. So, we select-for-update, in pulp_id order, the
# rows we're about to update as one db-call, and then do the update in a
# second.
+ #
+ # NOTE: select-for-update requires being in an atomic-transaction. We are
+ # **already in an atomic transaction** at this point as a result of the
+ # "with transaction.atomic():", above.
ids = [k.pulp_id for k in to_update_ca_bulk]
- with transaction.atomic():
- # "len()" forces the QA to be evaluated. Using exist() or count() won't
- # work for us - Django is smart enough to either not-order, or even
- # not-emit, a select-for-update in these cases.
- #
- # To maximize performance, we make sure to only ask for pulp_ids, and
- # avoid instantiating a python-object for the affected CAs by using
- # values_list()
- len(
- ContentArtifact.objects.filter(pulp_id__in=ids)
- .only("pulp_id")
- .order_by("pulp_id")
- .select_for_update()
- .values_list()
- )
- ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
+ # "len()" forces the QuerySet to be evaluated. Using exist() or count() won't
+ # work for us - Django is smart enough to either not-order, or even
+ # not-emit, a select-for-update in these cases.
+ #
+ # To maximize performance, we make sure to only ask for pulp_ids, and
+ # avoid instantiating a python-object for the affected CAs by using
+ # values_list()
+ subq = (
+ ContentArtifact.objects.filter(pulp_id__in=ids)
+ .only("pulp_id")
+ .order_by("pulp_id")
+ .select_for_update()
+ )
+ len(subq.values_list())
+ ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
# To avoid a similar deadlock issue when calling get_or_create, we sort the
# "new" CAs to make sure inserts happen in a defined order. Since we can't
| bulk_update() can still deadlock in content_stages
**Version**
pulpcore/3.14.16 (but problem exists in main)
**Describe the bug**
Syncing multiple repositories with high content-overlaps under high concurrency, continues to hit occasional deadlock.
Traceback and postgres log for the particular failure:
```
2022-08-10 15:51:29 EDT ERROR: deadlock detected
2022-08-10 15:51:29 EDT DETAIL: Process 55740 waits for ShareLock on transaction 61803; blocked by process 55746.
Process 55746 waits for ShareLock on transaction 61805; blocked by process 55740.
Process 55740: SELECT ....
Process 55746: COMMIT
2022-08-10 15:51:29 EDT HINT: See server log for query details.
2022-08-10 15:51:29 EDT CONTEXT: while locking tuple (209,51) in relation "core_contentartifact"
```
```
pulpcore-worker-5[54158]: pulp [88649f1c-3393-4693-aab7-d73b62eeda62]: pulpcore.tasking.pulpcore_worker:INFO: Task 407bb67b-65d0-4d65-b9c8-b1aa1f2c87fd failed (deadlock detected
pulpcore-worker-5[54158]: DETAIL: Process 55740 waits for ShareLock on transaction 61803; blocked by process 55746.
pulpcore-worker-5[54158]: Process 55746 waits for ShareLock on transaction 61805; blocked by process 55740.
pulpcore-worker-5[54158]: HINT: See server log for query details.
pulpcore-worker-5[54158]: CONTEXT: while locking tuple (209,51) in relation "core_contentartifact"
pulpcore-worker-5[54158]: )
pulpcore-worker-5[54158]: pulp [88649f1c-3393-4693-aab7-d73b62eeda62]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 342, in _perform_task
pulpcore-worker-5[54158]: result = func(*args, **kwargs)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulp_rpm/app/tasks/synchronizing.py", line 494, in synchronize
pulpcore-worker-5[54158]: version = dv.create()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/declarative_version.py", line 151, in create
pulpcore-worker-5[54158]: loop.run_until_complete(pipeline)
pulpcore-worker-5[54158]: File "/usr/lib64/python3.6/asyncio/base_events.py", line 484, in run_until_complete
pulpcore-worker-5[54158]: return future.result()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/api.py", line 225, in create_pipeline
pulpcore-worker-5[54158]: await asyncio.gather(*futures)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/api.py", line 43, in __call__
pulpcore-worker-5[54158]: await self.run()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/content_stages.py", line 178, in run
pulpcore-worker-5[54158]: .order_by("pulp_id")
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/query.py", line 256, in __len__
pulpcore-worker-5[54158]: self._fetch_all()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/query.py", line 1242, in _fetch_all
pulpcore-worker-5[54158]: self._result_cache = list(self._iterable_class(self))
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/query.py", line 144, in __iter__
pulpcore-worker-5[54158]: return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/sql/compiler.py", line 1094, in results_iter
pulpcore-worker-5[54158]: results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/sql/compiler.py", line 1142, in execute_sql
pulpcore-worker-5[54158]: cursor.execute(sql, params)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 67, in execute
pulpcore-worker-5[54158]: return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 76, in _execute_with_wrappers
pulpcore-worker-5[54158]: return executor(sql, params, many, context)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
pulpcore-worker-5[54158]: return self.cursor.execute(sql, params)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/utils.py", line 89, in __exit__
pulpcore-worker-5[54158]: raise dj_exc_value.with_traceback(traceback) from exc_value
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
pulpcore-worker-5[54158]: return self.cursor.execute(sql, params)
```
**To Reproduce**
See the QE test-setup from https://bugzilla.redhat.com/show_bug.cgi?id=2062526. We have not been able to force the problem with a synthetic test case.
**Expected behavior**
All the repositories should sync all their content without any of the sync-processes being failed due to detected deadlocks.
| https://bugzilla.redhat.com/show_bug.cgi?id=2082209 | 2022-08-19T18:51:26 |
|
pulp/pulpcore | 3,116 | pulp__pulpcore-3116 | [
"3115"
] | f85381e36fdbd05426ec46d40035e787c39dd05d | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -289,6 +289,8 @@
TASK_DIAGNOSTICS = False
+TELEMETRY = True
+
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://dynaconf.readthedocs.io/en/latest/guides/django.html
from dynaconf import DjangoDynaconf, Validator # noqa
diff --git a/pulpcore/app/tasks/telemetry.py b/pulpcore/app/tasks/telemetry.py
--- a/pulpcore/app/tasks/telemetry.py
+++ b/pulpcore/app/tasks/telemetry.py
@@ -9,7 +9,6 @@
from google.protobuf.json_format import MessageToJson
from pulpcore.app.apps import pulp_plugin_configs
-from pulpcore.app.util import get_telemetry_posting_url
from pulpcore.app.models import SystemID
from pulpcore.app.models.status import ContentAppStatus
from pulpcore.app.models.task import Worker
@@ -19,6 +18,18 @@
logger = logging.getLogger(__name__)
+PRODUCTION_URL = "https://analytics.pulpproject.org/"
+DEV_URL = "https://dev.analytics.pulpproject.org/"
+
+
+def get_telemetry_posting_url():
+ for app in pulp_plugin_configs():
+ if ".dev" in app.version:
+ return DEV_URL
+
+ return PRODUCTION_URL
+
+
async def _num_hosts(qs):
hosts = set()
items = await sync_to_async(list)(qs.all())
diff --git a/pulpcore/app/util.py b/pulpcore/app/util.py
--- a/pulpcore/app/util.py
+++ b/pulpcore/app/util.py
@@ -17,12 +17,9 @@
from pulpcore.app import models
from pulpcore.exceptions.validation import InvalidSignatureError
-# a little cache so viewset_for_model doesn't have iterate over every app every time
+# a little cache so viewset_for_model doesn't have to iterate over every app every time
_model_viewset_cache = {}
-PRODUCTION_URL = "https://analytics.pulpproject.org/"
-DEV_URL = "https://dev.analytics.pulpproject.org/"
-
def get_url(model):
"""
@@ -249,23 +246,13 @@ def gpg_verify(public_keys, signature, detached_data=None):
return verified
-def get_telemetry_posting_url():
- for app in pulp_plugin_configs():
- if ".dev" in app.version:
- return DEV_URL
-
- return PRODUCTION_URL
-
-
def configure_telemetry():
- url = get_telemetry_posting_url()
task_name = "pulpcore.app.tasks.telemetry.post_telemetry"
dispatch_interval = timedelta(days=1)
name = "Post Anonymous Telemetry Periodically"
- # Initially only dev systems send data.
- if url == PRODUCTION_URL:
- models.TaskSchedule.objects.filter(task_name=task_name).delete()
- else:
+ if settings.TELEMETRY:
models.TaskSchedule.objects.update_or_create(
name=name, defaults={"task_name": task_name, "dispatch_interval": dispatch_interval}
)
+ else:
+ models.TaskSchedule.objects.filter(task_name=task_name).delete()
| Adds anonymous telemetry posting along with TELEMETRY setting to disable it
By default Pulp systems will post anonymous data to https://analytics.pulpproject.org/ to help in project decision making and growth. Background and discussion on this feature's aspect has been discussed here: https://discourse.pulpproject.org/t/proposal-telemetry/259
This change should do a few things:
* Introduce a new setting named `TELEMETRY` which when `True` would submit telemetry and not otherwise.
* Clearly identify via a release note that this has been added.
* Clearly identify to users how they can disable it.
| 2022-08-22T20:08:55 |
||
pulp/pulpcore | 3,120 | pulp__pulpcore-3120 | [
"3113"
] | 253265fa66e1e55bbe5984511a5d882b6486aaa1 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -265,20 +265,22 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
with open(os.path.join(temp_dir, mapping_path), "r") as mapping_file:
mapping = json.load(mapping_file)
+ content_count = 0
if mapping:
# use the content mapping to map content to repos
for repo_name, content_ids in mapping.items():
repo = _destination_repo(importer, repo_name)
content = Content.objects.filter(upstream_id__in=content_ids)
+ content_count += content.count()
with repo.new_version() as new_version:
new_version.set_content(content)
else:
# just map all the content to our destination repo
content = Content.objects.filter(pk__in=resulting_content_ids)
+ content_count += content.count()
with dest_repo.new_version() as new_version:
new_version.set_content(content)
- content_count = content.count()
pb.total = content_count
pb.done = content_count
pb.state = TASK_STATES.COMPLETED
| The import procedure incorrectly counts the number of imported content
https://github.com/pulp/pulpcore/blob/90142202f0ecc2e47dafbc2c517691fdc60b0eef/pulpcore/app/tasks/importer.py#L269-L278
We are overwriting the content_count variable in a loop over and over again. We should instead accumulate the number of imported content and then save it to a progress bar.
| 2022-08-23T09:08:02 |
||
pulp/pulpcore | 3,126 | pulp__pulpcore-3126 | [
"3122"
] | bc001ea6acbce15f41ca4976e6c9ce7c82fc016e | diff --git a/pulpcore/app/apps.py b/pulpcore/app/apps.py
--- a/pulpcore/app/apps.py
+++ b/pulpcore/app/apps.py
@@ -1,7 +1,6 @@
from collections import defaultdict
from gettext import gettext as _
from importlib import import_module
-from datetime import timedelta
from django import apps
from django.core.exceptions import ImproperlyConfigured
@@ -212,8 +211,6 @@ def ready(self):
super().ready()
from . import checks # noqa
- _configure_telemetry(self.apps)
-
post_migrate.connect(
_populate_system_id, sender=self, dispatch_uid="populate_system_id_identifier"
)
@@ -262,26 +259,6 @@ def _populate_system_id(sender, apps, verbosity, **kwargs):
SystemID().save()
-def _configure_telemetry(apps):
- from django.db import connection
- from pulpcore.app.util import get_telemetry_posting_url, PRODUCTION_URL
-
- if "core_taskschedule" in connection.introspection.table_names():
- url = get_telemetry_posting_url()
- TaskSchedule = apps.get_model("core", "TaskSchedule")
- task_name = "pulpcore.app.tasks.telemetry.post_telemetry"
- dispatch_interval = timedelta(days=1)
- name = "Post Anonymous Telemetry Periodically"
- # Initially only dev systems receive posted data.
- if url == PRODUCTION_URL:
- TaskSchedule.objects.filter(task_name=task_name).delete()
- else:
- TaskSchedule.objects.update_or_create(
- name=name, defaults={"task_name": task_name, "dispatch_interval": dispatch_interval}
- )
- connection.close()
-
-
def _populate_roles(sender, apps, verbosity, **kwargs):
role_prefix = f"{sender.label}."
# collect all plugin defined roles
diff --git a/pulpcore/app/util.py b/pulpcore/app/util.py
--- a/pulpcore/app/util.py
+++ b/pulpcore/app/util.py
@@ -3,6 +3,7 @@
import tempfile
from contextlib import ExitStack
+from datetime import timedelta
import gnupg
from django.conf import settings
@@ -254,3 +255,17 @@ def get_telemetry_posting_url():
return DEV_URL
return PRODUCTION_URL
+
+
+def configure_telemetry():
+ url = get_telemetry_posting_url()
+ task_name = "pulpcore.app.tasks.telemetry.post_telemetry"
+ dispatch_interval = timedelta(days=1)
+ name = "Post Anonymous Telemetry Periodically"
+ # Initially only dev systems send data.
+ if url == PRODUCTION_URL:
+ models.TaskSchedule.objects.filter(task_name=task_name).delete()
+ else:
+ models.TaskSchedule.objects.update_or_create(
+ name=name, defaults={"task_name": task_name, "dispatch_interval": dispatch_interval}
+ )
diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -33,6 +33,7 @@
from pulpcore.app.models import Worker, Task # noqa: E402: module level not at top of file
+from pulpcore.app.util import configure_telemetry # noqa: E402: module level not at top of file
from pulpcore.app.role_util import ( # noqa: E402: module level not at top of file
get_users_with_perms,
)
@@ -58,6 +59,10 @@
TASK_SCHEDULING_LOCK = 42
+def startup_hook():
+ configure_telemetry()
+
+
class PGAdvisoryLock:
"""
A context manager that will hold a postgres advisory lock non-blocking.
@@ -132,6 +137,8 @@ def __init__(self):
os.set_blocking(sentinel_w, False)
signal.set_wakeup_fd(sentinel_w)
+ startup_hook()
+
def _signal_handler(self, thesignal, frame):
# Reset signal handlers to default
# If you kill the process a second time it's not graceful anymore.
| evr="" is sent to the database on creating a Package
**Version**
current main branches of everything.
**Describe the bug**
It looks like the evr field is translated into the insert query even if it was not set from the validated_data. Also it seems if should be populated by a database trigger and never be saved directly.
```
[('INSERT INTO "rpm_package" ("content_ptr_id", "name", "epoch", "version", "release", "arch", "evr", "pkgId", "checksum_type", "summary", "description", "url", "changelogs", "files", "requires", "provides", "conflicts", "obsoletes", "suggests", "enhances", "recommends", "supplements", "location_base", "location_href", "rpm_buildhost", "rpm_group", "rpm_license", "rpm_packager", "rpm_sourcerpm", "rpm_vendor", "rpm_header_start", "rpm_header_end", "size_archive", "size_installed", "size_package", "time_build", "time_file", "is_modular") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)', (UUID('ff1f1dd3-d4bf-4104-a2ea-7baed11dc6e8'), 'lemon', '0', '0', '1', 'noarch', '', '847df5d46b4d8ab2a7af5bfc0ccfb4681786a2b40b0f3a2b9a56bbed8dfeeee6', 'sha256', 'A lemon', '', 'http://lem.on/Fruits/', '[]', '[]', '[]', '[["citrussy", null, null, null, null, false], ["fruity", null, null, null, null, false], ["lemon", null, null, null, null, false], ["lemon", "EQ", "0", "0", "1", false]]', '[]', '[]', '[]', '[]', '[]', '[]', '', 'lemon-0-1.noarch.rpm', '68aa50d573f8', 'Fruits', 'No Age Limitation', 'Mr. Bartender', 'lemon-0-1.src.rpm', 'Lem:ON!', 4504, 5924, 124, 0, 6040, 1657826273, 1661253000, False))]
```
**To Reproduce**
run the pulp-cli test for rpm content
```
pytest -m pulp_rpm -k content
```
**Expected behavior**
test should pass
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
| This turned out to be a pulpcore issue.
In pulpcore we start doing orm calls before all apps are ready. This results in django-readonly-fields doing its ninja-move to swap out the SQL Compiler before django computes it and caches it in https://github.com/django/django/blob/main/django/db/backends/base/operations.py#L361 .
Lesson learned: Do not touch the orm before everything is ready.
Plan to resolve: Move the installation of the telemetry schedule into a new worker-startup hook. | 2022-08-24T10:24:46 |
|
pulp/pulpcore | 3,127 | pulp__pulpcore-3127 | [
"2530"
] | bc001ea6acbce15f41ca4976e6c9ce7c82fc016e | diff --git a/pulpcore/app/viewsets/__init__.py b/pulpcore/app/viewsets/__init__.py
--- a/pulpcore/app/viewsets/__init__.py
+++ b/pulpcore/app/viewsets/__init__.py
@@ -61,6 +61,7 @@
ReadOnlyRepositoryViewSet,
RemoteFilter,
RemoteViewSet,
+ ListRemoteViewSet,
RepositoryViewSet,
RepositoryVersionViewSet,
ListRepositoryVersionViewSet,
diff --git a/pulpcore/app/viewsets/repository.py b/pulpcore/app/viewsets/repository.py
--- a/pulpcore/app/viewsets/repository.py
+++ b/pulpcore/app/viewsets/repository.py
@@ -236,6 +236,29 @@ class Meta:
fields = {"name": NAME_FILTER_OPTIONS, "pulp_last_updated": DATETIME_FILTER_OPTIONS}
+class ListRemoteViewSet(NamedModelViewSet, mixins.ListModelMixin):
+ endpoint_name = "remotes"
+ queryset = Remote.objects.all()
+ serializer_class = RemoteSerializer
+ filterset_class = RemoteFilter
+
+ DEFAULT_ACCESS_POLICY = {
+ "statements": [
+ {
+ "action": ["list"],
+ "principal": "authenticated",
+ "effect": "allow",
+ },
+ ],
+ "queryset_scoping": {"function": "scope_queryset"},
+ }
+
+ @classmethod
+ def routable(cls):
+ """Do not hide from the routers."""
+ return True
+
+
class RemoteViewSet(
NamedModelViewSet,
mixins.CreateModelMixin,
| As a user, I can list all Remotes
**Is your feature request related to a problem? Please describe.**
It would be nice to have one endpoint to list all Remote objects in Pulp. Currently there are endpoints for listing the detail types, but not the master model.
**Describe the solution you'd like**
All remotes listed at `/pulp/api/v3/remotes/` similar to the distributions being listed at `/pulp/api/v3/distributions/`.
| I would hope that we can move away from having separate `/remotes/` entirely and replace it with a `/remote_repositories/` endpoint as outlined here https://pulp.plan.io/issues/6353#note-26
This is basically the same as this issue https://github.com/pulp/pulpcore/issues/2529 - again, the endpoint exists, it's just that a user doesn't have the permission, and I believe there is no role that provides this permission.
We cannot do proper scoping by the permission to view on these generic endpoints. That is why we plainly reject non-admin access.
I'm confused, I thought we did allow non-admin users to list these endpoints. It should use the permissions from each plugin to create an aggregate list of remotes you can see.
You are right, the repository list endpoint has queryset scoping. I forgot about that one, and just remembered that it was complicated.
I think queryset scoping can be applied to all NamedModelViewSet and can be achieved in a similar way as in here https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/repository.py#L68 https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/publication.py#L353
Given our earlier discussion on the issue I believe this work can be picked up @MichalPysik
@mdellweg @gerrod3 Agreed?
Yes, a default access policy with queryset-scoping should be added to the Remote view to allow for non-admin users to list all their remotes.
I think the viewset providing the generic remote list is actually routed by accident. It also allows to create untyped remotes. I think we have a bug to be fixed first. | 2022-08-24T12:19:08 |
|
pulp/pulpcore | 3,128 | pulp__pulpcore-3128 | [
"3122"
] | 53eb8eb215cf1911ebc6deabb0465d5a6743e9f2 | diff --git a/pulpcore/app/apps.py b/pulpcore/app/apps.py
--- a/pulpcore/app/apps.py
+++ b/pulpcore/app/apps.py
@@ -1,7 +1,6 @@
from collections import defaultdict
from gettext import gettext as _
from importlib import import_module
-from datetime import timedelta
from django import apps
from django.core.exceptions import ImproperlyConfigured
@@ -212,8 +211,6 @@ def ready(self):
super().ready()
from . import checks # noqa
- _configure_telemetry(self.apps)
-
post_migrate.connect(
_populate_system_id, sender=self, dispatch_uid="populate_system_id_identifier"
)
@@ -262,26 +259,6 @@ def _populate_system_id(sender, apps, verbosity, **kwargs):
SystemID().save()
-def _configure_telemetry(apps):
- from django.db import connection
- from pulpcore.app.util import get_telemetry_posting_url, PRODUCTION_URL
-
- if "core_taskschedule" in connection.introspection.table_names():
- url = get_telemetry_posting_url()
- TaskSchedule = apps.get_model("core", "TaskSchedule")
- task_name = "pulpcore.app.tasks.telemetry.post_telemetry"
- dispatch_interval = timedelta(days=1)
- name = "Post Anonymous Telemetry Periodically"
- # Initially only dev systems receive posted data.
- if url == PRODUCTION_URL:
- TaskSchedule.objects.filter(task_name=task_name).delete()
- else:
- TaskSchedule.objects.update_or_create(
- name=name, defaults={"task_name": task_name, "dispatch_interval": dispatch_interval}
- )
- connection.close()
-
-
def _populate_roles(sender, apps, verbosity, **kwargs):
role_prefix = f"{sender.label}."
# collect all plugin defined roles
diff --git a/pulpcore/app/util.py b/pulpcore/app/util.py
--- a/pulpcore/app/util.py
+++ b/pulpcore/app/util.py
@@ -1,6 +1,7 @@
import os
import tempfile
+from datetime import timedelta
import gnupg
from django.conf import settings
@@ -215,3 +216,17 @@ def get_telemetry_posting_url():
return DEV_URL
return PRODUCTION_URL
+
+
+def configure_telemetry():
+ url = get_telemetry_posting_url()
+ task_name = "pulpcore.app.tasks.telemetry.post_telemetry"
+ dispatch_interval = timedelta(days=1)
+ name = "Post Anonymous Telemetry Periodically"
+ # Initially only dev systems send data.
+ if url == PRODUCTION_URL:
+ models.TaskSchedule.objects.filter(task_name=task_name).delete()
+ else:
+ models.TaskSchedule.objects.update_or_create(
+ name=name, defaults={"task_name": task_name, "dispatch_interval": dispatch_interval}
+ )
diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -33,6 +33,7 @@
from pulpcore.app.models import Worker, Task # noqa: E402: module level not at top of file
+from pulpcore.app.util import configure_telemetry # noqa: E402: module level not at top of file
from pulpcore.app.role_util import ( # noqa: E402: module level not at top of file
get_users_with_perms,
)
@@ -58,6 +59,10 @@
TASK_SCHEDULING_LOCK = 42
+def startup_hook():
+ configure_telemetry()
+
+
class PGAdvisoryLock:
"""
A context manager that will hold a postgres advisory lock non-blocking.
@@ -132,6 +137,8 @@ def __init__(self):
os.set_blocking(sentinel_w, False)
signal.set_wakeup_fd(sentinel_w)
+ startup_hook()
+
def _signal_handler(self, thesignal, frame):
# Reset signal handlers to default
# If you kill the process a second time it's not graceful anymore.
| evr="" is sent to the database on creating a Package
**Version**
current main branches of everything.
**Describe the bug**
It looks like the evr field is translated into the insert query even if it was not set from the validated_data. Also it seems if should be populated by a database trigger and never be saved directly.
```
[('INSERT INTO "rpm_package" ("content_ptr_id", "name", "epoch", "version", "release", "arch", "evr", "pkgId", "checksum_type", "summary", "description", "url", "changelogs", "files", "requires", "provides", "conflicts", "obsoletes", "suggests", "enhances", "recommends", "supplements", "location_base", "location_href", "rpm_buildhost", "rpm_group", "rpm_license", "rpm_packager", "rpm_sourcerpm", "rpm_vendor", "rpm_header_start", "rpm_header_end", "size_archive", "size_installed", "size_package", "time_build", "time_file", "is_modular") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)', (UUID('ff1f1dd3-d4bf-4104-a2ea-7baed11dc6e8'), 'lemon', '0', '0', '1', 'noarch', '', '847df5d46b4d8ab2a7af5bfc0ccfb4681786a2b40b0f3a2b9a56bbed8dfeeee6', 'sha256', 'A lemon', '', 'http://lem.on/Fruits/', '[]', '[]', '[]', '[["citrussy", null, null, null, null, false], ["fruity", null, null, null, null, false], ["lemon", null, null, null, null, false], ["lemon", "EQ", "0", "0", "1", false]]', '[]', '[]', '[]', '[]', '[]', '[]', '', 'lemon-0-1.noarch.rpm', '68aa50d573f8', 'Fruits', 'No Age Limitation', 'Mr. Bartender', 'lemon-0-1.src.rpm', 'Lem:ON!', 4504, 5924, 124, 0, 6040, 1657826273, 1661253000, False))]
```
**To Reproduce**
run the pulp-cli test for rpm content
```
pytest -m pulp_rpm -k content
```
**Expected behavior**
test should pass
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
| This turned out to be a pulpcore issue.
In pulpcore we start doing orm calls before all apps are ready. This results in django-readonly-fields doing its ninja-move to swap out the SQL Compiler before django computes it and caches it in https://github.com/django/django/blob/main/django/db/backends/base/operations.py#L361 .
Lesson learned: Do not touch the orm before everything is ready.
Plan to resolve: Move the installation of the telemetry schedule into a new worker-startup hook. | 2022-08-24T13:12:17 |
|
pulp/pulpcore | 3,131 | pulp__pulpcore-3131 | [
"3111"
] | c2ea3635e1d65930788931225d5b4c8f7cb5591b | diff --git a/pulpcore/plugin/stages/content_stages.py b/pulpcore/plugin/stages/content_stages.py
--- a/pulpcore/plugin/stages/content_stages.py
+++ b/pulpcore/plugin/stages/content_stages.py
@@ -165,23 +165,26 @@ def process_batch():
# on select-for-update. So, we select-for-update, in pulp_id order, the
# rows we're about to update as one db-call, and then do the update in a
# second.
+ #
+ # NOTE: select-for-update requires being in an atomic-transaction. We are
+ # **already in an atomic transaction** at this point as a result of the
+ # "with transaction.atomic():", above.
ids = [k.pulp_id for k in to_update_ca_bulk]
- with transaction.atomic():
- # "len()" forces the QA to be evaluated. Using exist() or count() won't
- # work for us - Django is smart enough to either not-order, or even
- # not-emit, a select-for-update in these cases.
- #
- # To maximize performance, we make sure to only ask for pulp_ids, and
- # avoid instantiating a python-object for the affected CAs by using
- # values_list()
- len(
- ContentArtifact.objects.filter(pulp_id__in=ids)
- .only("pulp_id")
- .order_by("pulp_id")
- .select_for_update()
- .values_list()
- )
- ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
+ # "len()" forces the QuerySet to be evaluated. Using exist() or count() won't
+ # work for us - Django is smart enough to either not-order, or even
+ # not-emit, a select-for-update in these cases.
+ #
+ # To maximize performance, we make sure to only ask for pulp_ids, and
+ # avoid instantiating a python-object for the affected CAs by using
+ # values_list()
+ subq = (
+ ContentArtifact.objects.filter(pulp_id__in=ids)
+ .only("pulp_id")
+ .order_by("pulp_id")
+ .select_for_update()
+ )
+ len(subq.values_list())
+ ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
# To avoid a similar deadlock issue when calling get_or_create, we sort the
# "new" CAs to make sure inserts happen in a defined order. Since we can't
| bulk_update() can still deadlock in content_stages
**Version**
pulpcore/3.14.16 (but problem exists in main)
**Describe the bug**
Syncing multiple repositories with high content-overlaps under high concurrency, continues to hit occasional deadlock.
Traceback and postgres log for the particular failure:
```
2022-08-10 15:51:29 EDT ERROR: deadlock detected
2022-08-10 15:51:29 EDT DETAIL: Process 55740 waits for ShareLock on transaction 61803; blocked by process 55746.
Process 55746 waits for ShareLock on transaction 61805; blocked by process 55740.
Process 55740: SELECT ....
Process 55746: COMMIT
2022-08-10 15:51:29 EDT HINT: See server log for query details.
2022-08-10 15:51:29 EDT CONTEXT: while locking tuple (209,51) in relation "core_contentartifact"
```
```
pulpcore-worker-5[54158]: pulp [88649f1c-3393-4693-aab7-d73b62eeda62]: pulpcore.tasking.pulpcore_worker:INFO: Task 407bb67b-65d0-4d65-b9c8-b1aa1f2c87fd failed (deadlock detected
pulpcore-worker-5[54158]: DETAIL: Process 55740 waits for ShareLock on transaction 61803; blocked by process 55746.
pulpcore-worker-5[54158]: Process 55746 waits for ShareLock on transaction 61805; blocked by process 55740.
pulpcore-worker-5[54158]: HINT: See server log for query details.
pulpcore-worker-5[54158]: CONTEXT: while locking tuple (209,51) in relation "core_contentartifact"
pulpcore-worker-5[54158]: )
pulpcore-worker-5[54158]: pulp [88649f1c-3393-4693-aab7-d73b62eeda62]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 342, in _perform_task
pulpcore-worker-5[54158]: result = func(*args, **kwargs)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulp_rpm/app/tasks/synchronizing.py", line 494, in synchronize
pulpcore-worker-5[54158]: version = dv.create()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/declarative_version.py", line 151, in create
pulpcore-worker-5[54158]: loop.run_until_complete(pipeline)
pulpcore-worker-5[54158]: File "/usr/lib64/python3.6/asyncio/base_events.py", line 484, in run_until_complete
pulpcore-worker-5[54158]: return future.result()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/api.py", line 225, in create_pipeline
pulpcore-worker-5[54158]: await asyncio.gather(*futures)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/api.py", line 43, in __call__
pulpcore-worker-5[54158]: await self.run()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/content_stages.py", line 178, in run
pulpcore-worker-5[54158]: .order_by("pulp_id")
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/query.py", line 256, in __len__
pulpcore-worker-5[54158]: self._fetch_all()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/query.py", line 1242, in _fetch_all
pulpcore-worker-5[54158]: self._result_cache = list(self._iterable_class(self))
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/query.py", line 144, in __iter__
pulpcore-worker-5[54158]: return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/sql/compiler.py", line 1094, in results_iter
pulpcore-worker-5[54158]: results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/sql/compiler.py", line 1142, in execute_sql
pulpcore-worker-5[54158]: cursor.execute(sql, params)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 67, in execute
pulpcore-worker-5[54158]: return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 76, in _execute_with_wrappers
pulpcore-worker-5[54158]: return executor(sql, params, many, context)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
pulpcore-worker-5[54158]: return self.cursor.execute(sql, params)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/utils.py", line 89, in __exit__
pulpcore-worker-5[54158]: raise dj_exc_value.with_traceback(traceback) from exc_value
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
pulpcore-worker-5[54158]: return self.cursor.execute(sql, params)
```
**To Reproduce**
See the QE test-setup from https://bugzilla.redhat.com/show_bug.cgi?id=2062526. We have not been able to force the problem with a synthetic test case.
**Expected behavior**
All the repositories should sync all their content without any of the sync-processes being failed due to detected deadlocks.
| https://bugzilla.redhat.com/show_bug.cgi?id=2082209 | 2022-08-24T16:20:51 |
|
pulp/pulpcore | 3,132 | pulp__pulpcore-3132 | [
"3111"
] | 649cb43321706bebff3cbcfbd6d861bde81fd4a7 | diff --git a/pulpcore/plugin/stages/content_stages.py b/pulpcore/plugin/stages/content_stages.py
--- a/pulpcore/plugin/stages/content_stages.py
+++ b/pulpcore/plugin/stages/content_stages.py
@@ -165,23 +165,26 @@ def process_batch():
# on select-for-update. So, we select-for-update, in pulp_id order, the
# rows we're about to update as one db-call, and then do the update in a
# second.
+ #
+ # NOTE: select-for-update requires being in an atomic-transaction. We are
+ # **already in an atomic transaction** at this point as a result of the
+ # "with transaction.atomic():", above.
ids = [k.pulp_id for k in to_update_ca_bulk]
- with transaction.atomic():
- # "len()" forces the QA to be evaluated. Using exist() or count() won't
- # work for us - Django is smart enough to either not-order, or even
- # not-emit, a select-for-update in these cases.
- #
- # To maximize performance, we make sure to only ask for pulp_ids, and
- # avoid instantiating a python-object for the affected CAs by using
- # values_list()
- len(
- ContentArtifact.objects.filter(pulp_id__in=ids)
- .only("pulp_id")
- .order_by("pulp_id")
- .select_for_update()
- .values_list()
- )
- ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
+ # "len()" forces the QuerySet to be evaluated. Using exist() or count() won't
+ # work for us - Django is smart enough to either not-order, or even
+ # not-emit, a select-for-update in these cases.
+ #
+ # To maximize performance, we make sure to only ask for pulp_ids, and
+ # avoid instantiating a python-object for the affected CAs by using
+ # values_list()
+ subq = (
+ ContentArtifact.objects.filter(pulp_id__in=ids)
+ .only("pulp_id")
+ .order_by("pulp_id")
+ .select_for_update()
+ )
+ len(subq.values_list())
+ ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
# To avoid a similar deadlock issue when calling get_or_create, we sort the
# "new" CAs to make sure inserts happen in a defined order. Since we can't
| bulk_update() can still deadlock in content_stages
**Version**
pulpcore/3.14.16 (but problem exists in main)
**Describe the bug**
Syncing multiple repositories with high content-overlaps under high concurrency, continues to hit occasional deadlock.
Traceback and postgres log for the particular failure:
```
2022-08-10 15:51:29 EDT ERROR: deadlock detected
2022-08-10 15:51:29 EDT DETAIL: Process 55740 waits for ShareLock on transaction 61803; blocked by process 55746.
Process 55746 waits for ShareLock on transaction 61805; blocked by process 55740.
Process 55740: SELECT ....
Process 55746: COMMIT
2022-08-10 15:51:29 EDT HINT: See server log for query details.
2022-08-10 15:51:29 EDT CONTEXT: while locking tuple (209,51) in relation "core_contentartifact"
```
```
pulpcore-worker-5[54158]: pulp [88649f1c-3393-4693-aab7-d73b62eeda62]: pulpcore.tasking.pulpcore_worker:INFO: Task 407bb67b-65d0-4d65-b9c8-b1aa1f2c87fd failed (deadlock detected
pulpcore-worker-5[54158]: DETAIL: Process 55740 waits for ShareLock on transaction 61803; blocked by process 55746.
pulpcore-worker-5[54158]: Process 55746 waits for ShareLock on transaction 61805; blocked by process 55740.
pulpcore-worker-5[54158]: HINT: See server log for query details.
pulpcore-worker-5[54158]: CONTEXT: while locking tuple (209,51) in relation "core_contentartifact"
pulpcore-worker-5[54158]: )
pulpcore-worker-5[54158]: pulp [88649f1c-3393-4693-aab7-d73b62eeda62]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 342, in _perform_task
pulpcore-worker-5[54158]: result = func(*args, **kwargs)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulp_rpm/app/tasks/synchronizing.py", line 494, in synchronize
pulpcore-worker-5[54158]: version = dv.create()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/declarative_version.py", line 151, in create
pulpcore-worker-5[54158]: loop.run_until_complete(pipeline)
pulpcore-worker-5[54158]: File "/usr/lib64/python3.6/asyncio/base_events.py", line 484, in run_until_complete
pulpcore-worker-5[54158]: return future.result()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/api.py", line 225, in create_pipeline
pulpcore-worker-5[54158]: await asyncio.gather(*futures)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/api.py", line 43, in __call__
pulpcore-worker-5[54158]: await self.run()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/pulpcore/plugin/stages/content_stages.py", line 178, in run
pulpcore-worker-5[54158]: .order_by("pulp_id")
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/query.py", line 256, in __len__
pulpcore-worker-5[54158]: self._fetch_all()
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/query.py", line 1242, in _fetch_all
pulpcore-worker-5[54158]: self._result_cache = list(self._iterable_class(self))
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/query.py", line 144, in __iter__
pulpcore-worker-5[54158]: return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/sql/compiler.py", line 1094, in results_iter
pulpcore-worker-5[54158]: results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/models/sql/compiler.py", line 1142, in execute_sql
pulpcore-worker-5[54158]: cursor.execute(sql, params)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 67, in execute
pulpcore-worker-5[54158]: return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 76, in _execute_with_wrappers
pulpcore-worker-5[54158]: return executor(sql, params, many, context)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
pulpcore-worker-5[54158]: return self.cursor.execute(sql, params)
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/utils.py", line 89, in __exit__
pulpcore-worker-5[54158]: raise dj_exc_value.with_traceback(traceback) from exc_value
pulpcore-worker-5[54158]: File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
pulpcore-worker-5[54158]: return self.cursor.execute(sql, params)
```
**To Reproduce**
See the QE test-setup from https://bugzilla.redhat.com/show_bug.cgi?id=2062526. We have not been able to force the problem with a synthetic test case.
**Expected behavior**
All the repositories should sync all their content without any of the sync-processes being failed due to detected deadlocks.
| https://bugzilla.redhat.com/show_bug.cgi?id=2082209 | 2022-08-24T16:21:07 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.