repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
pulp/pulpcore | 3,850 | pulp__pulpcore-3850 | [
"3786"
] | 2e614cec5ab112623ca5e90790635d5cac6bd05b | diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py
--- a/pulpcore/app/views/repair.py
+++ b/pulpcore/app/views/repair.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.views import APIView
from pulpcore.app.response import OperationPostponedResponse
@@ -26,6 +27,9 @@ def post(self, request):
verify_checksums = serializer.validated_data["verify_checksums"]
- task = dispatch(repair_all_artifacts, args=[verify_checksums])
+ uri = "/api/v3/repair/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/reclaim.py b/pulpcore/app/viewsets/reclaim.py
--- a/pulpcore/app/viewsets/reclaim.py
+++ b/pulpcore/app/viewsets/reclaim.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.viewsets import ViewSet
from pulpcore.app.response import OperationPostponedResponse
@@ -35,8 +36,17 @@ def reclaim(self, request):
repos.append(rv.repository)
keeplist_rv_pks.append(rv.pk)
+ if repos:
+ exclusive_resources = None
+ else:
+ uri = "/api/v3/repositories/reclaim_space/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ exclusive_resources = [uri]
+
task = dispatch(
reclaim_space,
+ exclusive_resources=exclusive_resources,
shared_resources=repos,
kwargs={
"repo_pks": reclaim_repo_pks,
| Global tasks like repair or reclaim should probably not run in parallel
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
**Describe the bug**
Global tasks that involve all repos like repair or reclaim should run one at a time, like orphan cleanup https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/orphans.py#L29
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
| 2023-05-17T14:41:24 |
||
pulp/pulpcore | 3,851 | pulp__pulpcore-3851 | [
"1947"
] | ed0cd8480063dd4f9c11f876d7fa2b932ae06637 | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,6 +2,8 @@
from gettext import gettext as _
+from aiohttp.client_exceptions import ClientResponseError
+
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -122,15 +124,23 @@ def _download_artifact(self, artifact, checksum, file_path):
if remote.policy == "immediate":
self.stdout.write(_("Restoring missing file {}").format(file_path))
downloader = remote.get_downloader(ra)
- dl_result = downloader.fetch()
- # FIXME in case url is not available anymore this will break
- if dl_result.artifact_attributes["sha256"] == artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = artifact.file.name
- artifact.file.save(filename, src, save=False)
- setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
- restored = True
- break
+ try:
+ dl_result = downloader.fetch()
+ except ClientResponseError as e:
+ self.stdout.write(
+ _("Redownload failed from '{}': {}.").format(ra.url, str(e))
+ )
+ else:
+ if dl_result.artifact_attributes["sha256"] == artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = artifact.file.name
+ artifact.file.delete(save=False)
+ artifact.file.save(filename, src, save=False)
+ setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
+ restored = True
+ break
+ self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,6 +4,7 @@
import asyncio
import hashlib
+from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.db import transaction
from rest_framework.serializers import ValidationError
@@ -66,22 +67,32 @@ async def _repair_ca(content_artifact, repaired=None):
log.warning(
"Artifact {} is unrepairable - no remote source".format(content_artifact.artifact)
)
+ log.warning(
+ "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ )
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
async for remote_artifact in remote_artifacts:
detail_remote = await sync_to_async(remote_artifact.remote.cast)()
downloader = detail_remote.get_downloader(remote_artifact)
- dl_result = await downloader.run()
- if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = content_artifact.artifact.file.name
- await sync_to_async(content_artifact.artifact.file.delete)(save=False)
- await sync_to_async(content_artifact.artifact.file.save)(filename, src, save=False)
- if repaired is not None:
- await repaired.aincrement()
- return True
- log.warn(_("Redownload failed from {}.").format(remote_artifact.url))
-
+ try:
+ dl_result = await downloader.run()
+ except ClientResponseError as e:
+ log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
+ else:
+ if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = content_artifact.artifact.file.name
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
+ await sync_to_async(content_artifact.artifact.file.save)(
+ filename, src, save=False
+ )
+ if repaired is not None:
+ await repaired.aincrement()
+ return True
+ log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair feature should remove unrepairable corrupted file
Author: @ipanova ([email protected])
Redmine Issue: 7835, https://pulp.plan.io/issues/7835
---
In case repair feature cannot repair a corrupted file because RemoteArtifact is missing or in case url is not longer valid, it should remove from the fs the corrupted file
```
(06:27:25 PM) ipanova: dalley: maybe before returning False we could just remove the unrepaired corrupted file from the FS https://github.com/pulp/pulpcore/pull/991/files#diff-03bf88cd6f4c5051a7b1478ba05ea63287e7bb4c29feb00413b80ba35d22ccf2R73
(06:28:05 PM) dalley: +1
```
| From: @ipanova ([email protected])
Date: 2020-11-23T17:37:12Z
---
This should also be cleaned up in the django populate-missing-checksums command since it is using some parts of the repair feature
From: @dralley (dalley)
Date: 2020-11-24T16:14:35Z
---
We should consider if we can/should do anything else in case it is a repository metadata artifact.
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution! | 2023-05-17T15:04:15 |
|
pulp/pulpcore | 3,857 | pulp__pulpcore-3857 | [
"3786"
] | 7502a4d36d3be7209f85ebe48fc83c1615b60448 | diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py
--- a/pulpcore/app/views/repair.py
+++ b/pulpcore/app/views/repair.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.views import APIView
from pulpcore.app.response import OperationPostponedResponse
@@ -26,6 +27,9 @@ def post(self, request):
verify_checksums = serializer.validated_data["verify_checksums"]
- task = dispatch(repair_all_artifacts, args=[verify_checksums])
+ uri = "/api/v3/repair/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/reclaim.py b/pulpcore/app/viewsets/reclaim.py
--- a/pulpcore/app/viewsets/reclaim.py
+++ b/pulpcore/app/viewsets/reclaim.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.viewsets import ViewSet
from pulpcore.app.response import OperationPostponedResponse
@@ -35,8 +36,17 @@ def reclaim(self, request):
repos.append(rv.repository)
keeplist_rv_pks.append(rv.pk)
+ if repos:
+ exclusive_resources = None
+ else:
+ uri = "/api/v3/repositories/reclaim_space/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ exclusive_resources = [uri]
+
task = dispatch(
reclaim_space,
+ exclusive_resources=exclusive_resources,
shared_resources=repos,
kwargs={
"repo_pks": reclaim_repo_pks,
| Global tasks like repair or reclaim should probably not run in parallel
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
**Describe the bug**
Global tasks that involve all repos like repair or reclaim should run one at a time, like orphan cleanup https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/orphans.py#L29
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
| 2023-05-18T01:03:03 |
||
pulp/pulpcore | 3,875 | pulp__pulpcore-3875 | [
"3876"
] | 0a50a368b5090135076b608c61db01514350d46d | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -289,6 +289,7 @@ def _match_distribution(cls, path):
.select_related(
"repository",
"repository_version",
+ "repository_version__repository",
"publication",
"remote",
"pulp_domain",
| The repository of a repository version should be pre-selected when looking up the distribution
https://github.com/pulp/pulpcore/blob/0a50a368b5090135076b608c61db01514350d46d/pulpcore/content/handler.py#L291
| 2023-05-24T14:16:23 |
||
pulp/pulpcore | 3,877 | pulp__pulpcore-3877 | [
"3876"
] | 21e5d9eb2947877129c4cac5fc42fd0785a145ca | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -289,6 +289,7 @@ def _match_distribution(cls, path):
.select_related(
"repository",
"repository_version",
+ "repository_version__repository",
"publication",
"remote",
"pulp_domain",
| The repository of a repository version should be pre-selected when looking up the distribution
https://github.com/pulp/pulpcore/blob/0a50a368b5090135076b608c61db01514350d46d/pulpcore/content/handler.py#L291
| 2023-05-25T10:34:49 |
||
pulp/pulpcore | 3,878 | pulp__pulpcore-3878 | [
"3876"
] | 286824c963df13b62fd33a144e3e1e78bf5007e2 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -289,6 +289,7 @@ def _match_distribution(cls, path):
.select_related(
"repository",
"repository_version",
+ "repository_version__repository",
"publication",
"remote",
"pulp_domain",
| The repository of a repository version should be pre-selected when looking up the distribution
https://github.com/pulp/pulpcore/blob/0a50a368b5090135076b608c61db01514350d46d/pulpcore/content/handler.py#L291
| 2023-05-25T10:35:02 |
||
pulp/pulpcore | 3,879 | pulp__pulpcore-3879 | [
"1947"
] | a377119ab923e0c942a56a18660db6d96aad3d5b | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,6 +2,8 @@
from gettext import gettext as _
+from aiohttp.client_exceptions import ClientResponseError
+
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -124,15 +126,23 @@ def _download_artifact(self, artifact, checksum, file_path):
if remote.policy == "immediate":
self.stdout.write(_("Restoring missing file {}").format(file_path))
downloader = remote.get_downloader(ra)
- dl_result = downloader.fetch()
- # FIXME in case url is not available anymore this will break
- if dl_result.artifact_attributes["sha256"] == artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = artifact.file.name
- artifact.file.save(filename, src, save=False)
- setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
- restored = True
- break
+ try:
+ dl_result = downloader.fetch()
+ except ClientResponseError as e:
+ self.stdout.write(
+ _("Redownload failed from '{}': {}.").format(ra.url, str(e))
+ )
+ else:
+ if dl_result.artifact_attributes["sha256"] == artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = artifact.file.name
+ artifact.file.delete(save=False)
+ artifact.file.save(filename, src, save=False)
+ setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
+ restored = True
+ break
+ self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,6 +4,7 @@
import asyncio
import hashlib
+from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.core.files.storage import default_storage
from django.db import transaction
@@ -69,22 +70,32 @@ async def _repair_ca(content_artifact, repaired=None):
log.warn(
_("Artifact {} is unrepairable - no remote source".format(content_artifact.artifact))
)
+ log.warning(
+ "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ )
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
async for remote_artifact in remote_artifacts:
detail_remote = await sync_to_async(remote_artifact.remote.cast)()
downloader = detail_remote.get_downloader(remote_artifact)
- dl_result = await downloader.run()
- if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = content_artifact.artifact.file.name
- await sync_to_async(content_artifact.artifact.file.delete)(save=False)
- await sync_to_async(content_artifact.artifact.file.save)(filename, src, save=False)
- if repaired is not None:
- await repaired.aincrement()
- return True
- log.warn(_("Redownload failed from {}.").format(remote_artifact.url))
-
+ try:
+ dl_result = await downloader.run()
+ except ClientResponseError as e:
+ log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
+ else:
+ if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = content_artifact.artifact.file.name
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
+ await sync_to_async(content_artifact.artifact.file.save)(
+ filename, src, save=False
+ )
+ if repaired is not None:
+ await repaired.aincrement()
+ return True
+ log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair feature should remove unrepairable corrupted file
Author: @ipanova ([email protected])
Redmine Issue: 7835, https://pulp.plan.io/issues/7835
---
In case repair feature cannot repair a corrupted file because RemoteArtifact is missing or in case url is not longer valid, it should remove from the fs the corrupted file
```
(06:27:25 PM) ipanova: dalley: maybe before returning False we could just remove the unrepaired corrupted file from the FS https://github.com/pulp/pulpcore/pull/991/files#diff-03bf88cd6f4c5051a7b1478ba05ea63287e7bb4c29feb00413b80ba35d22ccf2R73
(06:28:05 PM) dalley: +1
```
| From: @ipanova ([email protected])
Date: 2020-11-23T17:37:12Z
---
This should also be cleaned up in the django populate-missing-checksums command since it is using some parts of the repair feature
From: @dralley (dalley)
Date: 2020-11-24T16:14:35Z
---
We should consider if we can/should do anything else in case it is a repository metadata artifact.
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution! | 2023-05-25T12:46:07 |
|
pulp/pulpcore | 3,880 | pulp__pulpcore-3880 | [
"1947"
] | f80c4ecaae4d28771058ef4734bbe767ab6b9c0a | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,6 +2,8 @@
from gettext import gettext as _
+from aiohttp.client_exceptions import ClientResponseError
+
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -122,15 +124,23 @@ def _download_artifact(self, artifact, checksum, file_path):
if remote.policy == "immediate":
self.stdout.write(_("Restoring missing file {}").format(file_path))
downloader = remote.get_downloader(ra)
- dl_result = downloader.fetch()
- # FIXME in case url is not available anymore this will break
- if dl_result.artifact_attributes["sha256"] == artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = artifact.file.name
- artifact.file.save(filename, src, save=False)
- setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
- restored = True
- break
+ try:
+ dl_result = downloader.fetch()
+ except ClientResponseError as e:
+ self.stdout.write(
+ _("Redownload failed from '{}': {}.").format(ra.url, str(e))
+ )
+ else:
+ if dl_result.artifact_attributes["sha256"] == artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = artifact.file.name
+ artifact.file.delete(save=False)
+ artifact.file.save(filename, src, save=False)
+ setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
+ restored = True
+ break
+ self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,6 +4,7 @@
import asyncio
import hashlib
+from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.db import transaction
from rest_framework.serializers import ValidationError
@@ -69,22 +70,32 @@ async def _repair_ca(content_artifact, repaired=None):
log.warning(
"Artifact {} is unrepairable - no remote source".format(content_artifact.artifact)
)
+ log.warning(
+ "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ )
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
async for remote_artifact in remote_artifacts:
detail_remote = await sync_to_async(remote_artifact.remote.cast)()
downloader = detail_remote.get_downloader(remote_artifact)
- dl_result = await downloader.run()
- if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = content_artifact.artifact.file.name
- await sync_to_async(content_artifact.artifact.file.delete)(save=False)
- await sync_to_async(content_artifact.artifact.file.save)(filename, src, save=False)
- if repaired is not None:
- await repaired.aincrement()
- return True
- log.warn(_("Redownload failed from {}.").format(remote_artifact.url))
-
+ try:
+ dl_result = await downloader.run()
+ except ClientResponseError as e:
+ log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
+ else:
+ if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = content_artifact.artifact.file.name
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
+ await sync_to_async(content_artifact.artifact.file.save)(
+ filename, src, save=False
+ )
+ if repaired is not None:
+ await repaired.aincrement()
+ return True
+ log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair feature should remove unrepairable corrupted file
Author: @ipanova ([email protected])
Redmine Issue: 7835, https://pulp.plan.io/issues/7835
---
In case repair feature cannot repair a corrupted file because RemoteArtifact is missing or in case url is not longer valid, it should remove from the fs the corrupted file
```
(06:27:25 PM) ipanova: dalley: maybe before returning False we could just remove the unrepaired corrupted file from the FS https://github.com/pulp/pulpcore/pull/991/files#diff-03bf88cd6f4c5051a7b1478ba05ea63287e7bb4c29feb00413b80ba35d22ccf2R73
(06:28:05 PM) dalley: +1
```
| From: @ipanova ([email protected])
Date: 2020-11-23T17:37:12Z
---
This should also be cleaned up in the django populate-missing-checksums command since it is using some parts of the repair feature
From: @dralley (dalley)
Date: 2020-11-24T16:14:35Z
---
We should consider if we can/should do anything else in case it is a repository metadata artifact.
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution! | 2023-05-25T12:47:52 |
|
pulp/pulpcore | 3,881 | pulp__pulpcore-3881 | [
"1947"
] | 6a46bd507a9563623c002b33f728cbe42bbcc515 | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,6 +2,8 @@
from gettext import gettext as _
+from aiohttp.client_exceptions import ClientResponseError
+
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -122,15 +124,23 @@ def _download_artifact(self, artifact, checksum, file_path):
if remote.policy == "immediate":
self.stdout.write(_("Restoring missing file {}").format(file_path))
downloader = remote.get_downloader(ra)
- dl_result = downloader.fetch()
- # FIXME in case url is not available anymore this will break
- if dl_result.artifact_attributes["sha256"] == artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = artifact.file.name
- artifact.file.save(filename, src, save=False)
- setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
- restored = True
- break
+ try:
+ dl_result = downloader.fetch()
+ except ClientResponseError as e:
+ self.stdout.write(
+ _("Redownload failed from '{}': {}.").format(ra.url, str(e))
+ )
+ else:
+ if dl_result.artifact_attributes["sha256"] == artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = artifact.file.name
+ artifact.file.delete(save=False)
+ artifact.file.save(filename, src, save=False)
+ setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
+ restored = True
+ break
+ self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,6 +4,7 @@
import asyncio
import hashlib
+from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.core.files.storage import default_storage
from django.db import transaction
@@ -69,22 +70,32 @@ async def _repair_ca(content_artifact, repaired=None):
log.warning(
"Artifact {} is unrepairable - no remote source".format(content_artifact.artifact)
)
+ log.warning(
+ "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ )
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
async for remote_artifact in remote_artifacts:
detail_remote = await sync_to_async(remote_artifact.remote.cast)()
downloader = detail_remote.get_downloader(remote_artifact)
- dl_result = await downloader.run()
- if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = content_artifact.artifact.file.name
- await sync_to_async(content_artifact.artifact.file.delete)(save=False)
- await sync_to_async(content_artifact.artifact.file.save)(filename, src, save=False)
- if repaired is not None:
- await repaired.aincrement()
- return True
- log.warn(_("Redownload failed from {}.").format(remote_artifact.url))
-
+ try:
+ dl_result = await downloader.run()
+ except ClientResponseError as e:
+ log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
+ else:
+ if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = content_artifact.artifact.file.name
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
+ await sync_to_async(content_artifact.artifact.file.save)(
+ filename, src, save=False
+ )
+ if repaired is not None:
+ await repaired.aincrement()
+ return True
+ log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair feature should remove unrepairable corrupted file
Author: @ipanova ([email protected])
Redmine Issue: 7835, https://pulp.plan.io/issues/7835
---
In case repair feature cannot repair a corrupted file because RemoteArtifact is missing or in case url is not longer valid, it should remove from the fs the corrupted file
```
(06:27:25 PM) ipanova: dalley: maybe before returning False we could just remove the unrepaired corrupted file from the FS https://github.com/pulp/pulpcore/pull/991/files#diff-03bf88cd6f4c5051a7b1478ba05ea63287e7bb4c29feb00413b80ba35d22ccf2R73
(06:28:05 PM) dalley: +1
```
| From: @ipanova ([email protected])
Date: 2020-11-23T17:37:12Z
---
This should also be cleaned up in the django populate-missing-checksums command since it is using some parts of the repair feature
From: @dralley (dalley)
Date: 2020-11-24T16:14:35Z
---
We should consider if we can/should do anything else in case it is a repository metadata artifact.
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution! | 2023-05-25T12:48:06 |
|
pulp/pulpcore | 3,882 | pulp__pulpcore-3882 | [
"1947"
] | c8db17ca210193116045d7845fbc06394fce7390 | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,6 +2,8 @@
from gettext import gettext as _
+from aiohttp.client_exceptions import ClientResponseError
+
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -122,15 +124,23 @@ def _download_artifact(self, artifact, checksum, file_path):
if remote.policy == "immediate":
self.stdout.write(_("Restoring missing file {}").format(file_path))
downloader = remote.get_downloader(ra)
- dl_result = downloader.fetch()
- # FIXME in case url is not available anymore this will break
- if dl_result.artifact_attributes["sha256"] == artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = artifact.file.name
- artifact.file.save(filename, src, save=False)
- setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
- restored = True
- break
+ try:
+ dl_result = downloader.fetch()
+ except ClientResponseError as e:
+ self.stdout.write(
+ _("Redownload failed from '{}': {}.").format(ra.url, str(e))
+ )
+ else:
+ if dl_result.artifact_attributes["sha256"] == artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = artifact.file.name
+ artifact.file.delete(save=False)
+ artifact.file.save(filename, src, save=False)
+ setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
+ restored = True
+ break
+ self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,6 +4,7 @@
import asyncio
import hashlib
+from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.core.files.storage import default_storage
from django.db import transaction
@@ -69,22 +70,32 @@ async def _repair_ca(content_artifact, repaired=None):
log.warning(
"Artifact {} is unrepairable - no remote source".format(content_artifact.artifact)
)
+ log.warning(
+ "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ )
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
async for remote_artifact in remote_artifacts:
detail_remote = await sync_to_async(remote_artifact.remote.cast)()
downloader = detail_remote.get_downloader(remote_artifact)
- dl_result = await downloader.run()
- if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
- with open(dl_result.path, "rb") as src:
- filename = content_artifact.artifact.file.name
- await sync_to_async(content_artifact.artifact.file.delete)(save=False)
- await sync_to_async(content_artifact.artifact.file.save)(filename, src, save=False)
- if repaired is not None:
- await repaired.aincrement()
- return True
- log.warn(_("Redownload failed from {}.").format(remote_artifact.url))
-
+ try:
+ dl_result = await downloader.run()
+ except ClientResponseError as e:
+ log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
+ else:
+ if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
+ with open(dl_result.path, "rb") as src:
+ filename = content_artifact.artifact.file.name
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
+ await sync_to_async(content_artifact.artifact.file.save)(
+ filename, src, save=False
+ )
+ if repaired is not None:
+ await repaired.aincrement()
+ return True
+ log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair feature should remove unrepairable corrupted file
Author: @ipanova ([email protected])
Redmine Issue: 7835, https://pulp.plan.io/issues/7835
---
In case repair feature cannot repair a corrupted file because RemoteArtifact is missing or in case url is not longer valid, it should remove from the fs the corrupted file
```
(06:27:25 PM) ipanova: dalley: maybe before returning False we could just remove the unrepaired corrupted file from the FS https://github.com/pulp/pulpcore/pull/991/files#diff-03bf88cd6f4c5051a7b1478ba05ea63287e7bb4c29feb00413b80ba35d22ccf2R73
(06:28:05 PM) dalley: +1
```
| From: @ipanova ([email protected])
Date: 2020-11-23T17:37:12Z
---
This should also be cleaned up in the django populate-missing-checksums command since it is using some parts of the repair feature
From: @dralley (dalley)
Date: 2020-11-24T16:14:35Z
---
We should consider if we can/should do anything else in case it is a repository metadata artifact.
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution! | 2023-05-25T12:48:39 |
|
pulp/pulpcore | 3,884 | pulp__pulpcore-3884 | [
"3869"
] | 08868e9ff6c2871ebe47e829857fb2476e1b2e99 | diff --git a/pulpcore/app/monkeypatch.py b/pulpcore/app/monkeypatch.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/monkeypatch.py
@@ -0,0 +1,66 @@
+import sys
+
+# This is a monkeypatch for https://github.com/pulp/pulpcore/issues/3869
+if sys.version_info.major == 3 and sys.version_info.minor < 12:
+ # Code copied from the Python 3.12 standard library
+ # We modify the default gzip compression level for writing streams from
+ # 9 to 1, attempting to vendor the minimum amount of code.
+ # -------------------------------------------------------------------
+ # tarfile.py
+ # -------------------------------------------------------------------
+ # Copyright (C) 2002 Lars Gustaebel <[email protected]>
+ # All rights reserved.
+ #
+ # Permission is hereby granted, free of charge, to any person
+ # obtaining a copy of this software and associated documentation
+ # files (the "Software"), to deal in the Software without
+ # restriction, including without limitation the rights to use,
+ # copy, modify, merge, publish, distribute, sublicense, and/or sell
+ # copies of the Software, and to permit persons to whom the
+ # Software is furnished to do so, subject to the following
+ # conditions:
+ #
+ # The above copyright notice and this permission notice shall be
+ # included in all copies or substantial portions of the Software.
+ #
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ # OTHER DEALINGS IN THE SOFTWARE.
+
+ import tarfile
+ from tarfile import NUL
+ import struct
+ import os
+ import time
+
+ class _Stream(tarfile._Stream):
+ """Class that serves as an adapter between TarFile and
+ a stream-like object. The stream-like object only
+ needs to have a read() or write() method and is accessed
+ blockwise. Use of gzip or bzip2 compression is possible.
+ A stream-like object could be for example: sys.stdin,
+ sys.stdout, a socket, a tape device etc.
+
+ _Stream is intended to be used only internally.
+ """
+
+ def _init_write_gz(self):
+ """Initialize for writing with gzip compression."""
+ self.cmp = self.zlib.compressobj(
+ 1, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0
+ )
+ timestamp = struct.pack("<L", int(time.time()))
+ self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
+ if self.name.endswith(".gz"):
+ self.name = self.name[:-3]
+ # Honor "directory components removed" from RFC1952
+ self.name = os.path.basename(self.name)
+ # RFC1952 says we must use ISO-8859-1 for the FNAME field.
+ self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
+
+ tarfile._Stream = _Stream
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -5,7 +5,6 @@
import os.path
import subprocess
import tarfile
-
from distutils.util import strtobool
from gettext import gettext as _
from glob import glob
| Exports are bottlenecked by gzip compression which cannot be disabled
**Version**
Any
**Describe the bug**
>
> We identified a severe bottleneck in the way hammer exports work and found the code in upstream Pulp.
>
> Line 406 of https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py
>
> "with tarfile.open(tarfile_fp, "w|gz", fileobj=split_process.stdin)"
>
> The tarfile Python module creates tar files using the gizp Python module.
>
> Data compression for the gzip Python module is provided by the zlib Python module.
>
> The zlib Python module calls the zlib library.
>
> If defaults are used the whole way through this series of events, the result is a single threaded pulp process doing compression of a tarball containing a massive content library. This bottleneck and can make large hammer exports take several days.
>
> Modifying the lines that tell the tarfile.open function to NOT use compression ( change "w|gz" to "w" ) dramatically speeds up the hammer export. In our testing it reduced the time from days to just hours. The drawback is the file size was significantly larger, but the trade-off is worthwhile given we have tight timeframes and plentiful disk capacity.
>
> Can this bottleneck be addressed with multi-threaded gzip compression?
>
> and/or
>
> Can a hammer command line option for no compression be implemented?
>
> Run a hammer export and monitor Pulp processes. One process with run at 100% CPU. Modify the abovementioned Python script to NOT use gzip encryption, and an uncompressed tarball will be created instead much quicker and with multiple Pulp processes.
>
>
> Steps to Reproduce:
> 1. Run a "hammer export". Monitor the Pulp process CPU usage and time taken to complete export.
> 2. Change the abovementioned Python code in Pulp.
> 3. Run a "hammer export" again and note performance improvement.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2188504
| I don't know that disabling compression entirely is a good idea. However, the default compression level used is level 9, the most computationally expensive (slow) one. Given level 9 is (roughly) 6x slower than level 1, but only compresses about 20% better, this is probably a poor tradeoff.
We should look at using levels 1-3 instead. | 2023-05-25T15:28:46 |
|
pulp/pulpcore | 3,886 | pulp__pulpcore-3886 | [
"3737"
] | a44a1f1ce761e46d4bcb5f3f40fa466081ae8683 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -335,6 +335,18 @@ def validate_toc(toc_filename):
raise ValidationError(_("Missing 'files' or 'meta' keys in table-of-contents!"))
base_dir = os.path.dirname(toc_filename)
+
+ # Regardless of what the TOC says, it's possible for a previous import to have
+ # failed after successfully creating the combined file. If the TOC specifies multiple
+ # chunks, but the "expected result" exists, ignore the chunk-list and process as if
+ # it's all there ever was.
+ top_level_file = os.path.join(base_dir, the_toc["meta"]["file"])
+ if len(the_toc["files"]) > 1 and os.path.isfile(top_level_file):
+ the_toc["files"] = {the_toc["meta"]["file"]: the_toc["meta"]["global_hash"]}
+
+ # At this point, we either have the original chunks, or we're validating the
+ # full-file as a single chunk. Validate the hash(es).
+
# Points at chunks that exist?
missing_files = []
for f in sorted(the_toc["files"].keys()):
@@ -369,17 +381,7 @@ def validate_toc(toc_filename):
return the_toc
- def validate_and_assemble(toc_filename):
- """Validate checksums of, and reassemble, chunks in table-of-contents file."""
- the_toc = validate_toc(toc_filename)
- toc_dir = os.path.dirname(toc_filename)
- result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
-
- # if we have only one entry in "files", it must be the full .tar.gz - return it
- if len(the_toc["files"]) == 1:
- return os.path.join(toc_dir, list(the_toc["files"].keys())[0])
-
- # We have multiple chunks.
+ def reassemble(the_toc, toc_dir, result_file):
# reassemble into one file 'next to' the toc and return the resulting full-path
chunk_size = int(the_toc["meta"]["chunk_size"])
offset = 0
@@ -432,6 +434,20 @@ def validate_and_assemble(toc_filename):
# Let the rest of the import process do its thing on the new combined-file.
return result_file
+ def validate_and_assemble(toc_filename):
+ """Validate checksums of, and reassemble, chunks in table-of-contents file."""
+ the_toc = validate_toc(toc_filename)
+ toc_dir = os.path.dirname(toc_filename)
+ result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
+
+ # if we have only one entry in "files", it must be the full .tar.gz.
+ # Return the filename from the meta-section.
+ if len(the_toc["files"]) == 1:
+ return result_file
+
+ # We have multiple chunks. Reassemble them and return the result.
+ return reassemble(the_toc, toc_dir, result_file)
+
if toc:
log.info(_("Validating TOC {}.").format(toc))
path = validate_and_assemble(toc)
| Can't rerun a failed content-import task if it was exported using chunks
**Version**
All?
**Describe the bug**
When importing content that was exported in chunks, the importer process concatenate the chunks into a single file in order to import.
If that import task fails for some reason, after the chunks were already combined into a single file, the user can't simply re-run the same command to retry the import. Satellite will complain that the chunks are missing.
Checking the directory, we can see that the chunks are gone and only the master file is present (together with metadata.json and TOC file).
**To Reproduce**
Steps to Reproduce:
1. Export something using chunks
2. Start the import on another satellite. Monitor the data directory until all the chunk files are gone and only the main file is present. At this moment, kill the pulpcore-worker which is processing the import. Wait until the task returns with error.
3. Repeat the same import command. Pulp will error out with an error like this:
~~~
Feb 27 16:38:56 reproducer-import pulpcore-worker-3[156191]: pulp [6c91f855-9959-43b1-864f-925393ae025a]: pulpcore.tasking.pulpcore_worker:INFO: Task ddf45eb1-adea-480e-9114-748baa7bd7bf failed ([ErrorDetail(string="Missing import-chunks named in table-of-contents: ['export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0000', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0001', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0002'].", code='invalid')])
~~~
Actual results:
Re-running same import fails, complaining about chunks missing. User needs to modify the TOC file manually OR split the master file in chunks again OR copy the files again in order to run the import.
**Expected behavior**
Pulp could be smart enough to identify either all the chunks are present OR the global file file. If the checksum matches, move forward with the import.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173757
| 2023-05-26T19:19:41 |
||
pulp/pulpcore | 3,893 | pulp__pulpcore-3893 | [
"3897"
] | 1bf3972b6c9eb30b54866b00e3a7efa26a0a4f39 | diff --git a/pulpcore/app/models/publication.py b/pulpcore/app/models/publication.py
--- a/pulpcore/app/models/publication.py
+++ b/pulpcore/app/models/publication.py
@@ -574,6 +574,17 @@ def content_handler_list_directory(self, rel_path):
"""
return set()
+ def content_headers_for(self, path):
+ """
+ Opportunity for Distribution to specify response-headers for a specific path
+
+ Args:
+ path (str): The path being requested
+ Returns:
+ Empty dictionary, or a dictionary with HTTP Response header/value pairs.
+ """
+ return {}
+
@hook(BEFORE_DELETE)
@hook(
AFTER_UPDATE,
diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -1,5 +1,6 @@
import asyncio
import logging
+from multidict import CIMultiDict
import os
import re
from gettext import gettext as _
@@ -345,20 +346,29 @@ def _permit(request, distribution):
return True
@staticmethod
- def response_headers(path):
+ def response_headers(path, distribution=None):
"""
Get the Content-Type and Encoding-Type headers for the requested `path`.
Args:
path (str): The relative path that was requested.
-
+ distribution(Distribution) : Distribution detail that might want to add headers for path
Returns:
headers (dict): A dictionary of response headers.
"""
+ # headers are case-insensitive
+ headers = CIMultiDict({})
+
+ # Determine a content-type from mime_types and set.
+ # Note: plugin-Distribution can override this.
content_type = mime_types.get_type(path)
- headers = {}
if content_type:
headers["Content-Type"] = content_type
+
+ # Let plugin-Distribution set headers for this path if it wants.
+ if distribution:
+ headers.update(distribution.content_headers_for(path))
+
return headers
@staticmethod
@@ -537,7 +547,7 @@ async def _match_and_stream(self, path, request):
if content_handler_result is not None:
return content_handler_result
- headers = self.response_headers(rel_path)
+ headers = self.response_headers(rel_path, distro)
repository = distro.repository
publication = distro.publication
@@ -569,7 +579,7 @@ async def _match_and_stream(self, path, request):
await publication.published_artifact.aget(relative_path=index_path)
rel_path = index_path
- headers = self.response_headers(rel_path)
+ headers = self.response_headers(rel_path, distro)
except ObjectDoesNotExist:
dir_list, dates, sizes = await self.list_directory(None, publication, rel_path)
dir_list.update(
| Make it possible for plugins to specify headers for content-app responses.
See https://github.com/pulp/pulp_rpm/issues/2947 for the need that drives this.
| 2023-05-31T17:34:23 |
||
pulp/pulpcore | 3,899 | pulp__pulpcore-3899 | [
"3737"
] | a3924808a83edac55596c21450700e36acffd6db | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -328,6 +328,18 @@ def validate_toc(toc_filename):
raise ValidationError(_("Missing 'files' or 'meta' keys in table-of-contents!"))
base_dir = os.path.dirname(toc_filename)
+
+ # Regardless of what the TOC says, it's possible for a previous import to have
+ # failed after successfully creating the combined file. If the TOC specifies multiple
+ # chunks, but the "expected result" exists, ignore the chunk-list and process as if
+ # it's all there ever was.
+ top_level_file = os.path.join(base_dir, the_toc["meta"]["file"])
+ if len(the_toc["files"]) > 1 and os.path.isfile(top_level_file):
+ the_toc["files"] = {the_toc["meta"]["file"]: the_toc["meta"]["global_hash"]}
+
+ # At this point, we either have the original chunks, or we're validating the
+ # full-file as a single chunk. Validate the hash(es).
+
# Points at chunks that exist?
missing_files = []
for f in sorted(the_toc["files"].keys()):
@@ -362,17 +374,7 @@ def validate_toc(toc_filename):
return the_toc
- def validate_and_assemble(toc_filename):
- """Validate checksums of, and reassemble, chunks in table-of-contents file."""
- the_toc = validate_toc(toc_filename)
- toc_dir = os.path.dirname(toc_filename)
- result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
-
- # if we have only one entry in "files", it must be the full .tar.gz - return it
- if len(the_toc["files"]) == 1:
- return os.path.join(toc_dir, list(the_toc["files"].keys())[0])
-
- # We have multiple chunks.
+ def reassemble(the_toc, toc_dir, result_file):
# reassemble into one file 'next to' the toc and return the resulting full-path
chunk_size = int(the_toc["meta"]["chunk_size"])
offset = 0
@@ -425,6 +427,20 @@ def validate_and_assemble(toc_filename):
# Let the rest of the import process do its thing on the new combined-file.
return result_file
+ def validate_and_assemble(toc_filename):
+ """Validate checksums of, and reassemble, chunks in table-of-contents file."""
+ the_toc = validate_toc(toc_filename)
+ toc_dir = os.path.dirname(toc_filename)
+ result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
+
+ # if we have only one entry in "files", it must be the full .tar.gz.
+ # Return the filename from the meta-section.
+ if len(the_toc["files"]) == 1:
+ return result_file
+
+ # We have multiple chunks. Reassemble them and return the result.
+ return reassemble(the_toc, toc_dir, result_file)
+
if toc:
log.info(_("Validating TOC {}.").format(toc))
path = validate_and_assemble(toc)
| Can't rerun a failed content-import task if it was exported using chunks
**Version**
All?
**Describe the bug**
When importing content that was exported in chunks, the importer process concatenate the chunks into a single file in order to import.
If that import task fails for some reason, after the chunks were already combined into a single file, the user can't simply re-run the same command to retry the import. Satellite will complain that the chunks are missing.
Checking the directory, we can see that the chunks are gone and only the master file is present (together with metadata.json and TOC file).
**To Reproduce**
Steps to Reproduce:
1. Export something using chunks
2. Start the import on another satellite. Monitor the data directory until all the chunk files are gone and only the main file is present. At this moment, kill the pulpcore-worker which is processing the import. Wait until the task returns with error.
3. Repeat the same import command. Pulp will error out with an error like this:
~~~
Feb 27 16:38:56 reproducer-import pulpcore-worker-3[156191]: pulp [6c91f855-9959-43b1-864f-925393ae025a]: pulpcore.tasking.pulpcore_worker:INFO: Task ddf45eb1-adea-480e-9114-748baa7bd7bf failed ([ErrorDetail(string="Missing import-chunks named in table-of-contents: ['export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0000', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0001', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0002'].", code='invalid')])
~~~
Actual results:
Re-running same import fails, complaining about chunks missing. User needs to modify the TOC file manually OR split the master file in chunks again OR copy the files again in order to run the import.
**Expected behavior**
Pulp could be smart enough to identify either all the chunks are present OR the global file file. If the checksum matches, move forward with the import.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173757
| 2023-06-05T07:59:39 |
||
pulp/pulpcore | 3,900 | pulp__pulpcore-3900 | [
"3737"
] | 0c4fe85054214ebfe08e114029c429c08e19d3a9 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -335,6 +335,18 @@ def validate_toc(toc_filename):
raise ValidationError(_("Missing 'files' or 'meta' keys in table-of-contents!"))
base_dir = os.path.dirname(toc_filename)
+
+ # Regardless of what the TOC says, it's possible for a previous import to have
+ # failed after successfully creating the combined file. If the TOC specifies multiple
+ # chunks, but the "expected result" exists, ignore the chunk-list and process as if
+ # it's all there ever was.
+ top_level_file = os.path.join(base_dir, the_toc["meta"]["file"])
+ if len(the_toc["files"]) > 1 and os.path.isfile(top_level_file):
+ the_toc["files"] = {the_toc["meta"]["file"]: the_toc["meta"]["global_hash"]}
+
+ # At this point, we either have the original chunks, or we're validating the
+ # full-file as a single chunk. Validate the hash(es).
+
# Points at chunks that exist?
missing_files = []
for f in sorted(the_toc["files"].keys()):
@@ -369,17 +381,7 @@ def validate_toc(toc_filename):
return the_toc
- def validate_and_assemble(toc_filename):
- """Validate checksums of, and reassemble, chunks in table-of-contents file."""
- the_toc = validate_toc(toc_filename)
- toc_dir = os.path.dirname(toc_filename)
- result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
-
- # if we have only one entry in "files", it must be the full .tar.gz - return it
- if len(the_toc["files"]) == 1:
- return os.path.join(toc_dir, list(the_toc["files"].keys())[0])
-
- # We have multiple chunks.
+ def reassemble(the_toc, toc_dir, result_file):
# reassemble into one file 'next to' the toc and return the resulting full-path
chunk_size = int(the_toc["meta"]["chunk_size"])
offset = 0
@@ -432,6 +434,20 @@ def validate_and_assemble(toc_filename):
# Let the rest of the import process do its thing on the new combined-file.
return result_file
+ def validate_and_assemble(toc_filename):
+ """Validate checksums of, and reassemble, chunks in table-of-contents file."""
+ the_toc = validate_toc(toc_filename)
+ toc_dir = os.path.dirname(toc_filename)
+ result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
+
+ # if we have only one entry in "files", it must be the full .tar.gz.
+ # Return the filename from the meta-section.
+ if len(the_toc["files"]) == 1:
+ return result_file
+
+ # We have multiple chunks. Reassemble them and return the result.
+ return reassemble(the_toc, toc_dir, result_file)
+
if toc:
log.info(_("Validating TOC {}.").format(toc))
path = validate_and_assemble(toc)
| Can't rerun a failed content-import task if it was exported using chunks
**Version**
All?
**Describe the bug**
When importing content that was exported in chunks, the importer process concatenate the chunks into a single file in order to import.
If that import task fails for some reason, after the chunks were already combined into a single file, the user can't simply re-run the same command to retry the import. Satellite will complain that the chunks are missing.
Checking the directory, we can see that the chunks are gone and only the master file is present (together with metadata.json and TOC file).
**To Reproduce**
Steps to Reproduce:
1. Export something using chunks
2. Start the import on another satellite. Monitor the data directory until all the chunk files are gone and only the main file is present. At this moment, kill the pulpcore-worker which is processing the import. Wait until the task returns with error.
3. Repeat the same import command. Pulp will error out with an error like this:
~~~
Feb 27 16:38:56 reproducer-import pulpcore-worker-3[156191]: pulp [6c91f855-9959-43b1-864f-925393ae025a]: pulpcore.tasking.pulpcore_worker:INFO: Task ddf45eb1-adea-480e-9114-748baa7bd7bf failed ([ErrorDetail(string="Missing import-chunks named in table-of-contents: ['export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0000', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0001', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0002'].", code='invalid')])
~~~
Actual results:
Re-running same import fails, complaining about chunks missing. User needs to modify the TOC file manually OR split the master file in chunks again OR copy the files again in order to run the import.
**Expected behavior**
Pulp could be smart enough to identify either all the chunks are present OR the global file file. If the checksum matches, move forward with the import.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173757
| 2023-06-05T07:59:54 |
||
pulp/pulpcore | 3,901 | pulp__pulpcore-3901 | [
"3737"
] | 461527320a9a98fa3fb0cd052748972ce2606eb8 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -335,6 +335,18 @@ def validate_toc(toc_filename):
raise ValidationError(_("Missing 'files' or 'meta' keys in table-of-contents!"))
base_dir = os.path.dirname(toc_filename)
+
+ # Regardless of what the TOC says, it's possible for a previous import to have
+ # failed after successfully creating the combined file. If the TOC specifies multiple
+ # chunks, but the "expected result" exists, ignore the chunk-list and process as if
+ # it's all there ever was.
+ top_level_file = os.path.join(base_dir, the_toc["meta"]["file"])
+ if len(the_toc["files"]) > 1 and os.path.isfile(top_level_file):
+ the_toc["files"] = {the_toc["meta"]["file"]: the_toc["meta"]["global_hash"]}
+
+ # At this point, we either have the original chunks, or we're validating the
+ # full-file as a single chunk. Validate the hash(es).
+
# Points at chunks that exist?
missing_files = []
for f in sorted(the_toc["files"].keys()):
@@ -369,17 +381,7 @@ def validate_toc(toc_filename):
return the_toc
- def validate_and_assemble(toc_filename):
- """Validate checksums of, and reassemble, chunks in table-of-contents file."""
- the_toc = validate_toc(toc_filename)
- toc_dir = os.path.dirname(toc_filename)
- result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
-
- # if we have only one entry in "files", it must be the full .tar.gz - return it
- if len(the_toc["files"]) == 1:
- return os.path.join(toc_dir, list(the_toc["files"].keys())[0])
-
- # We have multiple chunks.
+ def reassemble(the_toc, toc_dir, result_file):
# reassemble into one file 'next to' the toc and return the resulting full-path
chunk_size = int(the_toc["meta"]["chunk_size"])
offset = 0
@@ -432,6 +434,20 @@ def validate_and_assemble(toc_filename):
# Let the rest of the import process do its thing on the new combined-file.
return result_file
+ def validate_and_assemble(toc_filename):
+ """Validate checksums of, and reassemble, chunks in table-of-contents file."""
+ the_toc = validate_toc(toc_filename)
+ toc_dir = os.path.dirname(toc_filename)
+ result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
+
+ # if we have only one entry in "files", it must be the full .tar.gz.
+ # Return the filename from the meta-section.
+ if len(the_toc["files"]) == 1:
+ return result_file
+
+ # We have multiple chunks. Reassemble them and return the result.
+ return reassemble(the_toc, toc_dir, result_file)
+
if toc:
log.info(_("Validating TOC {}.").format(toc))
path = validate_and_assemble(toc)
| Can't rerun a failed content-import task if it was exported using chunks
**Version**
All?
**Describe the bug**
When importing content that was exported in chunks, the importer process concatenate the chunks into a single file in order to import.
If that import task fails for some reason, after the chunks were already combined into a single file, the user can't simply re-run the same command to retry the import. Satellite will complain that the chunks are missing.
Checking the directory, we can see that the chunks are gone and only the master file is present (together with metadata.json and TOC file).
**To Reproduce**
Steps to Reproduce:
1. Export something using chunks
2. Start the import on another satellite. Monitor the data directory until all the chunk files are gone and only the main file is present. At this moment, kill the pulpcore-worker which is processing the import. Wait until the task returns with error.
3. Repeat the same import command. Pulp will error out with an error like this:
~~~
Feb 27 16:38:56 reproducer-import pulpcore-worker-3[156191]: pulp [6c91f855-9959-43b1-864f-925393ae025a]: pulpcore.tasking.pulpcore_worker:INFO: Task ddf45eb1-adea-480e-9114-748baa7bd7bf failed ([ErrorDetail(string="Missing import-chunks named in table-of-contents: ['export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0000', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0001', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0002'].", code='invalid')])
~~~
Actual results:
Re-running same import fails, complaining about chunks missing. User needs to modify the TOC file manually OR split the master file in chunks again OR copy the files again in order to run the import.
**Expected behavior**
Pulp could be smart enough to identify either all the chunks are present OR the global file file. If the checksum matches, move forward with the import.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173757
| 2023-06-05T08:00:07 |
||
pulp/pulpcore | 3,902 | pulp__pulpcore-3902 | [
"3737"
] | c723281c9d94c3ace17c8e537068e35a66b67927 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -335,6 +335,18 @@ def validate_toc(toc_filename):
raise ValidationError(_("Missing 'files' or 'meta' keys in table-of-contents!"))
base_dir = os.path.dirname(toc_filename)
+
+ # Regardless of what the TOC says, it's possible for a previous import to have
+ # failed after successfully creating the combined file. If the TOC specifies multiple
+ # chunks, but the "expected result" exists, ignore the chunk-list and process as if
+ # it's all there ever was.
+ top_level_file = os.path.join(base_dir, the_toc["meta"]["file"])
+ if len(the_toc["files"]) > 1 and os.path.isfile(top_level_file):
+ the_toc["files"] = {the_toc["meta"]["file"]: the_toc["meta"]["global_hash"]}
+
+ # At this point, we either have the original chunks, or we're validating the
+ # full-file as a single chunk. Validate the hash(es).
+
# Points at chunks that exist?
missing_files = []
for f in sorted(the_toc["files"].keys()):
@@ -369,17 +381,7 @@ def validate_toc(toc_filename):
return the_toc
- def validate_and_assemble(toc_filename):
- """Validate checksums of, and reassemble, chunks in table-of-contents file."""
- the_toc = validate_toc(toc_filename)
- toc_dir = os.path.dirname(toc_filename)
- result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
-
- # if we have only one entry in "files", it must be the full .tar.gz - return it
- if len(the_toc["files"]) == 1:
- return os.path.join(toc_dir, list(the_toc["files"].keys())[0])
-
- # We have multiple chunks.
+ def reassemble(the_toc, toc_dir, result_file):
# reassemble into one file 'next to' the toc and return the resulting full-path
chunk_size = int(the_toc["meta"]["chunk_size"])
offset = 0
@@ -432,6 +434,20 @@ def validate_and_assemble(toc_filename):
# Let the rest of the import process do its thing on the new combined-file.
return result_file
+ def validate_and_assemble(toc_filename):
+ """Validate checksums of, and reassemble, chunks in table-of-contents file."""
+ the_toc = validate_toc(toc_filename)
+ toc_dir = os.path.dirname(toc_filename)
+ result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
+
+ # if we have only one entry in "files", it must be the full .tar.gz.
+ # Return the filename from the meta-section.
+ if len(the_toc["files"]) == 1:
+ return result_file
+
+ # We have multiple chunks. Reassemble them and return the result.
+ return reassemble(the_toc, toc_dir, result_file)
+
if toc:
log.info(_("Validating TOC {}.").format(toc))
path = validate_and_assemble(toc)
| Can't rerun a failed content-import task if it was exported using chunks
**Version**
All?
**Describe the bug**
When importing content that was exported in chunks, the importer process concatenate the chunks into a single file in order to import.
If that import task fails for some reason, after the chunks were already combined into a single file, the user can't simply re-run the same command to retry the import. Satellite will complain that the chunks are missing.
Checking the directory, we can see that the chunks are gone and only the master file is present (together with metadata.json and TOC file).
**To Reproduce**
Steps to Reproduce:
1. Export something using chunks
2. Start the import on another satellite. Monitor the data directory until all the chunk files are gone and only the main file is present. At this moment, kill the pulpcore-worker which is processing the import. Wait until the task returns with error.
3. Repeat the same import command. Pulp will error out with an error like this:
~~~
Feb 27 16:38:56 reproducer-import pulpcore-worker-3[156191]: pulp [6c91f855-9959-43b1-864f-925393ae025a]: pulpcore.tasking.pulpcore_worker:INFO: Task ddf45eb1-adea-480e-9114-748baa7bd7bf failed ([ErrorDetail(string="Missing import-chunks named in table-of-contents: ['export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0000', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0001', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0002'].", code='invalid')])
~~~
Actual results:
Re-running same import fails, complaining about chunks missing. User needs to modify the TOC file manually OR split the master file in chunks again OR copy the files again in order to run the import.
**Expected behavior**
Pulp could be smart enough to identify either all the chunks are present OR the global file file. If the checksum matches, move forward with the import.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173757
| 2023-06-05T08:19:03 |
||
pulp/pulpcore | 3,903 | pulp__pulpcore-3903 | [
"3737"
] | 898023018031a609b5f1d9df1c9dd03a78fa673b | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -335,6 +335,18 @@ def validate_toc(toc_filename):
raise ValidationError(_("Missing 'files' or 'meta' keys in table-of-contents!"))
base_dir = os.path.dirname(toc_filename)
+
+ # Regardless of what the TOC says, it's possible for a previous import to have
+ # failed after successfully creating the combined file. If the TOC specifies multiple
+ # chunks, but the "expected result" exists, ignore the chunk-list and process as if
+ # it's all there ever was.
+ top_level_file = os.path.join(base_dir, the_toc["meta"]["file"])
+ if len(the_toc["files"]) > 1 and os.path.isfile(top_level_file):
+ the_toc["files"] = {the_toc["meta"]["file"]: the_toc["meta"]["global_hash"]}
+
+ # At this point, we either have the original chunks, or we're validating the
+ # full-file as a single chunk. Validate the hash(es).
+
# Points at chunks that exist?
missing_files = []
for f in sorted(the_toc["files"].keys()):
@@ -369,17 +381,7 @@ def validate_toc(toc_filename):
return the_toc
- def validate_and_assemble(toc_filename):
- """Validate checksums of, and reassemble, chunks in table-of-contents file."""
- the_toc = validate_toc(toc_filename)
- toc_dir = os.path.dirname(toc_filename)
- result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
-
- # if we have only one entry in "files", it must be the full .tar.gz - return it
- if len(the_toc["files"]) == 1:
- return os.path.join(toc_dir, list(the_toc["files"].keys())[0])
-
- # We have multiple chunks.
+ def reassemble(the_toc, toc_dir, result_file):
# reassemble into one file 'next to' the toc and return the resulting full-path
chunk_size = int(the_toc["meta"]["chunk_size"])
offset = 0
@@ -432,6 +434,20 @@ def validate_and_assemble(toc_filename):
# Let the rest of the import process do its thing on the new combined-file.
return result_file
+ def validate_and_assemble(toc_filename):
+ """Validate checksums of, and reassemble, chunks in table-of-contents file."""
+ the_toc = validate_toc(toc_filename)
+ toc_dir = os.path.dirname(toc_filename)
+ result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
+
+ # if we have only one entry in "files", it must be the full .tar.gz.
+ # Return the filename from the meta-section.
+ if len(the_toc["files"]) == 1:
+ return result_file
+
+ # We have multiple chunks. Reassemble them and return the result.
+ return reassemble(the_toc, toc_dir, result_file)
+
if toc:
log.info(_("Validating TOC {}.").format(toc))
path = validate_and_assemble(toc)
| Can't rerun a failed content-import task if it was exported using chunks
**Version**
All?
**Describe the bug**
When importing content that was exported in chunks, the importer process concatenate the chunks into a single file in order to import.
If that import task fails for some reason, after the chunks were already combined into a single file, the user can't simply re-run the same command to retry the import. Satellite will complain that the chunks are missing.
Checking the directory, we can see that the chunks are gone and only the master file is present (together with metadata.json and TOC file).
**To Reproduce**
Steps to Reproduce:
1. Export something using chunks
2. Start the import on another satellite. Monitor the data directory until all the chunk files are gone and only the main file is present. At this moment, kill the pulpcore-worker which is processing the import. Wait until the task returns with error.
3. Repeat the same import command. Pulp will error out with an error like this:
~~~
Feb 27 16:38:56 reproducer-import pulpcore-worker-3[156191]: pulp [6c91f855-9959-43b1-864f-925393ae025a]: pulpcore.tasking.pulpcore_worker:INFO: Task ddf45eb1-adea-480e-9114-748baa7bd7bf failed ([ErrorDetail(string="Missing import-chunks named in table-of-contents: ['export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0000', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0001', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0002'].", code='invalid')])
~~~
Actual results:
Re-running same import fails, complaining about chunks missing. User needs to modify the TOC file manually OR split the master file in chunks again OR copy the files again in order to run the import.
**Expected behavior**
Pulp could be smart enough to identify either all the chunks are present OR the global file file. If the checksum matches, move forward with the import.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173757
| 2023-06-05T08:20:33 |
||
pulp/pulpcore | 3,904 | pulp__pulpcore-3904 | [
"3898"
] | 596a72e4a12541304dcb8e23f7e1d3c5d8a58390 | diff --git a/pulpcore/app/models/base.py b/pulpcore/app/models/base.py
--- a/pulpcore/app/models/base.py
+++ b/pulpcore/app/models/base.py
@@ -197,18 +197,7 @@ def master(self):
def __str__(self):
# similar to Model's __str__, but type-aware
- cast = self.cast()
- if cast is self:
- return super().__str__()
-
- try:
- return "<{} (pulp_type={}): {}>".format(
- self._meta.object_name, cast.pulp_type, cast.name
- )
- except AttributeError:
- return "<{} (pulp_type={}): pk={}>".format(
- self._meta.object_name, cast.pulp_type, cast.pk
- )
+ return "<{} (pulp_type={}): pk={}>".format(self._meta.object_name, self.pulp_type, self.pk)
# Add properties to model _meta info to support master/detail models
| An import of resources fails because of an unsaved casted object
Reproducible in pulp_container:
> src/pulp_container/pulp_container/tests/functional/api/test_pulpimportexport.py::test_import_export_create_repositories
After merging the commit https://github.com/pulp/pulpcore/commit/977a04dbe32d8004629713ffe3121702662bc640, we started to experience runtime errors when importing repositories which were not created in advance.
```
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: ...Importing resource BlobResource.
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: Importing file ./tmpkyf64nd0/repository-18fa8187-5f8f-4731-a486-b997a495ddcc_1/pulp_container.app.modelresource.ManifestResource.json.
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: ...Importing resource ManifestResource.
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: ...1 import-errors encountered importing ./tmpkyf64nd0/repository-18fa8187-5f8f-4731-a486-b997a495ddcc_1/pulp_container.app.modelresource.ManifestResource.json, attempt 2, retrying
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: ...1 import-errors encountered importing ./tmpkyf64nd0/repository-18fa8187-5f8f-4731-a486-b997a495ddcc_1/pulp_container.app.modelresource.ManifestResource.json, attempt 3, retrying
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:ERROR: FATAL import-failure importing ./tmpkyf64nd0/repository-18fa8187-5f8f-4731-a486-b997a495ddcc_1/pulp_container.app.modelresource.ManifestResource.json
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.tasking.tasks:INFO: Task 01887a16-9b4b-7745-a946-19e15dcd84f6 failed (None)
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.tasking.tasks:INFO: File "/usr/local/lib/python3.8/site-packages/pulpcore/tasking/tasks.py", line 66, in _execute_task
result = func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/tasks/importer.py", line 236, in import_repository_version
for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/tasks/importer.py", line 138, in _import_file
a_result = resource.import_data(data, raise_errors=True)
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 813, in import_data
result = self.import_data_inner(
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 882, in import_data_inner
raise row_result.errors[-1].error
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 722, in import_row
diff = self.get_diff_class()(self, original, new)
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 237, in __init__
self.left = self._export_resource_fields(resource, instance)
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 258, in _export_resource_fields
return [resource.export_field(f, instance) if instance else "" for f in resource.get_user_visible_fields()]
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 258, in <listcomp>
return [resource.export_field(f, instance) if instance else "" for f in resource.get_user_visible_fields()]
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 931, in export_field
return field.export(obj)
File "/usr/local/lib/python3.8/site-packages/import_export/fields.py", line 134, in export
value = self.get_value(obj)
File "/usr/local/lib/python3.8/site-packages/import_export/fields.py", line 96, in get_value
value = getattr(value, attr, None)
File "/usr/local/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py", line 617, in __get__
return self.related_manager_cls(instance)
File "/usr/local/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py", line [1023](https://github.com/pulp/pulp_container/actions/runs/5151407496/jobs/9276547218#step:18:1024), in __init__
'"%r" needs to have a value for field "%s" before '
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/base.py", line 65, in __repr__
return str(self)
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/base.py", line 200, in __str__
cast = self.cast()
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/base.py", line 169, in cast
result = self._pulp_model_map[self.pulp_type].objects.get(pk=self.pk)
```
Trapped logging:
```
def cast(self):
"""Return the "Detail" model instance of this master-detail object.
If this is already an instance of its detail type, it will return itself.
"""
if self.pulp_type == self.get_pulp_type():
return self
try:
result = self._pulp_model_map[self.pulp_type].objects.get(pk=self.pk)
except Exception as e:
raise RuntimeError("eeeeeeeeeeeeeeee: " + str(e) + f": {self.pulp_type}, {self.pk}, {self._pulp_model_map} \n {self.get_pulp_type()}")
pulp [c5a37660-a5f1-453e-ba96-a4ce6d01b4d9]: pulpcore.tasking.tasks:INFO: Task 01887c8b-78ce-77b4-921f-4d293330124c failed (eeeeeeeeeeeeeeee: None: None, None, {'core.content': <class 'pulpcore.app.models.content.Content'>, 'core.publishedmetadata': <class 'pulpcore.app.models.publication.PublishedMetadata'>, 'container.blob': <class 'pulp_container.app.models.Blob'>, 'container.manifest': <class 'pulp_container.app.models.Manifest'>, 'container.tag': <class 'pulp_container.app.models.Tag'>, 'container.signature': <class 'pulp_container.app.models.ManifestSignature'>}
container.manifest)
```
| The problem is that the content is not yet present in the database and it fails on this `ValueError` exception because of the `%r` variable that invokes the `__str__` method that calls `cast` on `self.pulp_type=None`:
```
# file: /usr/local/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
#++ raise IOError(f"{self.model} ++: {self.query_field_name} :++ {self.pk_field_names}") -----------> (<class 'pulp_container.app.models.Blob'> ++: container_manifest :++ {'manifest': 'content_ptr'})
raise ValueError(
'"%r" needs to have a value for field "%s" before '
"this many-to-many relationship can be used."
% (instance, self.pk_field_names[self.source_field_name])
)
```
EDIT: we can try to create manifest-blob pairs without direct object references but using "PK placeholders". Not sure if it is worth the effort if everything fails just on the `__str__` method which invokes `cast()` that does not handle a situation where model instances are not saved. Moving to pulpcore.
We should not expect that the instance exists (when importing related content) and act based on that accordingly.
https://github.com/pulp/pulpcore/blob/456ebe447d686b77fadcfb9ee57462f692df8226/pulpcore/app/models/base.py#L198
#### self.pulp_type = None; self.get_pulp_type == container.manifest
https://github.com/pulp/pulpcore/blob/456ebe447d686b77fadcfb9ee57462f692df8226/pulpcore/app/models/base.py#L167
I suggest modifying `MasterModel.__str__`, e.g., like this:
```diff
def __str__(self):
# similar to Model's __str__, but type-aware
+ if self.pulp_type is None:
+ return super().__str__()
+
cast = self.cast()
if cast is self:
return super().__str__()
try:
return "<{} (pulp_type={}): {}>".format(
self._meta.object_name, cast.pulp_type, cast.name
)
except AttributeError:
return "<{} (pulp_type={}): pk={}>".format(
self._meta.object_name, cast.pulp_type, cast.pk
)
```
I'd say it is an error to call cast in `__str__` at all. I would want to reduce it to:
```python
def __str__(self):
# similar to Model's __str__, but type-aware
return "<{} (pulp_type={}): pk={}>".format(
self._meta.object_name, self.pulp_type, self.pk
)
```
And subclasses are free to overwrite it, so it looks pretty after casting. (Assuming that the name of a subclass makes for a good key was also a big mistake.)
Why does export/import call str on models in the first place?
Well, the Django internals for the related objects lookup contains the code which may return an exception with the instance's `__repr__`. The exception can be raised when importing content that does not exist yet in the database: https://github.com/pulp/pulpcore/issues/3898#issuecomment-1575611847 | 2023-06-05T08:47:06 |
|
pulp/pulpcore | 3,906 | pulp__pulpcore-3906 | [
"3898"
] | 04851b4ecee60eb03973c15d87883e186e501af6 | diff --git a/pulpcore/app/models/base.py b/pulpcore/app/models/base.py
--- a/pulpcore/app/models/base.py
+++ b/pulpcore/app/models/base.py
@@ -197,18 +197,7 @@ def master(self):
def __str__(self):
# similar to Model's __str__, but type-aware
- cast = self.cast()
- if cast is self:
- return super().__str__()
-
- try:
- return "<{} (pulp_type={}): {}>".format(
- self._meta.object_name, cast.pulp_type, cast.name
- )
- except AttributeError:
- return "<{} (pulp_type={}): pk={}>".format(
- self._meta.object_name, cast.pulp_type, cast.pk
- )
+ return "<{} (pulp_type={}): pk={}>".format(self._meta.object_name, self.pulp_type, self.pk)
# Add properties to model _meta info to support master/detail models
| An import of resources fails because of an unsaved casted object
Reproducible in pulp_container:
> src/pulp_container/pulp_container/tests/functional/api/test_pulpimportexport.py::test_import_export_create_repositories
After merging the commit https://github.com/pulp/pulpcore/commit/977a04dbe32d8004629713ffe3121702662bc640, we started to experience runtime errors when importing repositories which were not created in advance.
```
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: ...Importing resource BlobResource.
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: Importing file ./tmpkyf64nd0/repository-18fa8187-5f8f-4731-a486-b997a495ddcc_1/pulp_container.app.modelresource.ManifestResource.json.
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: ...Importing resource ManifestResource.
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: ...1 import-errors encountered importing ./tmpkyf64nd0/repository-18fa8187-5f8f-4731-a486-b997a495ddcc_1/pulp_container.app.modelresource.ManifestResource.json, attempt 2, retrying
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:INFO: ...1 import-errors encountered importing ./tmpkyf64nd0/repository-18fa8187-5f8f-4731-a486-b997a495ddcc_1/pulp_container.app.modelresource.ManifestResource.json, attempt 3, retrying
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.app.tasks.importer:ERROR: FATAL import-failure importing ./tmpkyf64nd0/repository-18fa8187-5f8f-4731-a486-b997a495ddcc_1/pulp_container.app.modelresource.ManifestResource.json
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.tasking.tasks:INFO: Task 01887a16-9b4b-7745-a946-19e15dcd84f6 failed (None)
pulp [0c7ff191-3103-4829-b68b-e88c40ed1d7e]: pulpcore.tasking.tasks:INFO: File "/usr/local/lib/python3.8/site-packages/pulpcore/tasking/tasks.py", line 66, in _execute_task
result = func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/tasks/importer.py", line 236, in import_repository_version
for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/tasks/importer.py", line 138, in _import_file
a_result = resource.import_data(data, raise_errors=True)
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 813, in import_data
result = self.import_data_inner(
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 882, in import_data_inner
raise row_result.errors[-1].error
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 722, in import_row
diff = self.get_diff_class()(self, original, new)
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 237, in __init__
self.left = self._export_resource_fields(resource, instance)
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 258, in _export_resource_fields
return [resource.export_field(f, instance) if instance else "" for f in resource.get_user_visible_fields()]
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 258, in <listcomp>
return [resource.export_field(f, instance) if instance else "" for f in resource.get_user_visible_fields()]
File "/usr/local/lib/python3.8/site-packages/import_export/resources.py", line 931, in export_field
return field.export(obj)
File "/usr/local/lib/python3.8/site-packages/import_export/fields.py", line 134, in export
value = self.get_value(obj)
File "/usr/local/lib/python3.8/site-packages/import_export/fields.py", line 96, in get_value
value = getattr(value, attr, None)
File "/usr/local/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py", line 617, in __get__
return self.related_manager_cls(instance)
File "/usr/local/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py", line [1023](https://github.com/pulp/pulp_container/actions/runs/5151407496/jobs/9276547218#step:18:1024), in __init__
'"%r" needs to have a value for field "%s" before '
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/base.py", line 65, in __repr__
return str(self)
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/base.py", line 200, in __str__
cast = self.cast()
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/base.py", line 169, in cast
result = self._pulp_model_map[self.pulp_type].objects.get(pk=self.pk)
```
Trapped logging:
```
def cast(self):
"""Return the "Detail" model instance of this master-detail object.
If this is already an instance of its detail type, it will return itself.
"""
if self.pulp_type == self.get_pulp_type():
return self
try:
result = self._pulp_model_map[self.pulp_type].objects.get(pk=self.pk)
except Exception as e:
raise RuntimeError("eeeeeeeeeeeeeeee: " + str(e) + f": {self.pulp_type}, {self.pk}, {self._pulp_model_map} \n {self.get_pulp_type()}")
pulp [c5a37660-a5f1-453e-ba96-a4ce6d01b4d9]: pulpcore.tasking.tasks:INFO: Task 01887c8b-78ce-77b4-921f-4d293330124c failed (eeeeeeeeeeeeeeee: None: None, None, {'core.content': <class 'pulpcore.app.models.content.Content'>, 'core.publishedmetadata': <class 'pulpcore.app.models.publication.PublishedMetadata'>, 'container.blob': <class 'pulp_container.app.models.Blob'>, 'container.manifest': <class 'pulp_container.app.models.Manifest'>, 'container.tag': <class 'pulp_container.app.models.Tag'>, 'container.signature': <class 'pulp_container.app.models.ManifestSignature'>}
container.manifest)
```
| The problem is that the content is not yet present in the database and it fails on this `ValueError` exception because of the `%r` variable that invokes the `__str__` method that calls `cast` on `self.pulp_type=None`:
```
# file: /usr/local/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
#++ raise IOError(f"{self.model} ++: {self.query_field_name} :++ {self.pk_field_names}") -----------> (<class 'pulp_container.app.models.Blob'> ++: container_manifest :++ {'manifest': 'content_ptr'})
raise ValueError(
'"%r" needs to have a value for field "%s" before '
"this many-to-many relationship can be used."
% (instance, self.pk_field_names[self.source_field_name])
)
```
EDIT: we can try to create manifest-blob pairs without direct object references but using "PK placeholders". Not sure if it is worth the effort if everything fails just on the `__str__` method which invokes `cast()` that does not handle a situation where model instances are not saved. Moving to pulpcore.
We should not expect that the instance exists (when importing related content) and act based on that accordingly.
https://github.com/pulp/pulpcore/blob/456ebe447d686b77fadcfb9ee57462f692df8226/pulpcore/app/models/base.py#L198
#### self.pulp_type = None; self.get_pulp_type == container.manifest
https://github.com/pulp/pulpcore/blob/456ebe447d686b77fadcfb9ee57462f692df8226/pulpcore/app/models/base.py#L167
I suggest modifying `MasterModel.__str__`, e.g., like this:
```diff
def __str__(self):
# similar to Model's __str__, but type-aware
+ if self.pulp_type is None:
+ return super().__str__()
+
cast = self.cast()
if cast is self:
return super().__str__()
try:
return "<{} (pulp_type={}): {}>".format(
self._meta.object_name, cast.pulp_type, cast.name
)
except AttributeError:
return "<{} (pulp_type={}): pk={}>".format(
self._meta.object_name, cast.pulp_type, cast.pk
)
```
I'd say it is an error to call cast in `__str__` at all. I would want to reduce it to:
```python
def __str__(self):
# similar to Model's __str__, but type-aware
return "<{} (pulp_type={}): pk={}>".format(
self._meta.object_name, self.pulp_type, self.pk
)
```
And subclasses are free to overwrite it, so it looks pretty after casting. (Assuming that the name of a subclass makes for a good key was also a big mistake.)
Why does export/import call str on models in the first place?
Well, the Django internals for the related objects lookup contains the code which may return an exception with the instance's `__repr__`. The exception can be raised when importing content that does not exist yet in the database: https://github.com/pulp/pulpcore/issues/3898#issuecomment-1575611847 | 2023-06-05T10:27:16 |
|
pulp/pulpcore | 3,917 | pulp__pulpcore-3917 | [
"3869"
] | e8632b3b5336b13f90806c5a0ec2d1efacb315ff | diff --git a/pulpcore/app/monkeypatch.py b/pulpcore/app/monkeypatch.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/monkeypatch.py
@@ -0,0 +1,66 @@
+import sys
+
+# This is a monkeypatch for https://github.com/pulp/pulpcore/issues/3869
+if sys.version_info.major == 3 and sys.version_info.minor < 12:
+ # Code copied from the Python 3.12 standard library
+ # We modify the default gzip compression level for writing streams from
+ # 9 to 1, attempting to vendor the minimum amount of code.
+ # -------------------------------------------------------------------
+ # tarfile.py
+ # -------------------------------------------------------------------
+ # Copyright (C) 2002 Lars Gustaebel <[email protected]>
+ # All rights reserved.
+ #
+ # Permission is hereby granted, free of charge, to any person
+ # obtaining a copy of this software and associated documentation
+ # files (the "Software"), to deal in the Software without
+ # restriction, including without limitation the rights to use,
+ # copy, modify, merge, publish, distribute, sublicense, and/or sell
+ # copies of the Software, and to permit persons to whom the
+ # Software is furnished to do so, subject to the following
+ # conditions:
+ #
+ # The above copyright notice and this permission notice shall be
+ # included in all copies or substantial portions of the Software.
+ #
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ # OTHER DEALINGS IN THE SOFTWARE.
+
+ import tarfile
+ from tarfile import NUL
+ import struct
+ import os
+ import time
+
+ class _Stream(tarfile._Stream):
+ """Class that serves as an adapter between TarFile and
+ a stream-like object. The stream-like object only
+ needs to have a read() or write() method and is accessed
+ blockwise. Use of gzip or bzip2 compression is possible.
+ A stream-like object could be for example: sys.stdin,
+ sys.stdout, a socket, a tape device etc.
+
+ _Stream is intended to be used only internally.
+ """
+
+ def _init_write_gz(self):
+ """Initialize for writing with gzip compression."""
+ self.cmp = self.zlib.compressobj(
+ 1, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0
+ )
+ timestamp = struct.pack("<L", int(time.time()))
+ self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
+ if self.name.endswith(".gz"):
+ self.name = self.name[:-3]
+ # Honor "directory components removed" from RFC1952
+ self.name = os.path.basename(self.name)
+ # RFC1952 says we must use ISO-8859-1 for the FNAME field.
+ self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
+
+ tarfile._Stream = _Stream
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -5,7 +5,6 @@
import os.path
import subprocess
import tarfile
-
from distutils.util import strtobool
from gettext import gettext as _
from glob import glob
| Exports are bottlenecked by gzip compression which cannot be disabled
**Version**
Any
**Describe the bug**
>
> We identified a severe bottleneck in the way hammer exports work and found the code in upstream Pulp.
>
> Line 406 of https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py
>
> "with tarfile.open(tarfile_fp, "w|gz", fileobj=split_process.stdin)"
>
> The tarfile Python module creates tar files using the gizp Python module.
>
> Data compression for the gzip Python module is provided by the zlib Python module.
>
> The zlib Python module calls the zlib library.
>
> If defaults are used the whole way through this series of events, the result is a single threaded pulp process doing compression of a tarball containing a massive content library. This bottleneck and can make large hammer exports take several days.
>
> Modifying the lines that tell the tarfile.open function to NOT use compression ( change "w|gz" to "w" ) dramatically speeds up the hammer export. In our testing it reduced the time from days to just hours. The drawback is the file size was significantly larger, but the trade-off is worthwhile given we have tight timeframes and plentiful disk capacity.
>
> Can this bottleneck be addressed with multi-threaded gzip compression?
>
> and/or
>
> Can a hammer command line option for no compression be implemented?
>
> Run a hammer export and monitor Pulp processes. One process with run at 100% CPU. Modify the abovementioned Python script to NOT use gzip encryption, and an uncompressed tarball will be created instead much quicker and with multiple Pulp processes.
>
>
> Steps to Reproduce:
> 1. Run a "hammer export". Monitor the Pulp process CPU usage and time taken to complete export.
> 2. Change the abovementioned Python code in Pulp.
> 3. Run a "hammer export" again and note performance improvement.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2188504
| I don't know that disabling compression entirely is a good idea. However, the default compression level used is level 9, the most computationally expensive (slow) one. Given level 9 is (roughly) 6x slower than level 1, but only compresses about 20% better, this is probably a poor tradeoff.
We should look at using levels 1-3 instead. | 2023-06-13T11:10:47 |
|
pulp/pulpcore | 3,918 | pulp__pulpcore-3918 | [
"3869"
] | 049215284caad2d5a690fcaa44957e004750d746 | diff --git a/pulpcore/app/monkeypatch.py b/pulpcore/app/monkeypatch.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/monkeypatch.py
@@ -0,0 +1,66 @@
+import sys
+
+# This is a monkeypatch for https://github.com/pulp/pulpcore/issues/3869
+if sys.version_info.major == 3 and sys.version_info.minor < 12:
+ # Code copied from the Python 3.12 standard library
+ # We modify the default gzip compression level for writing streams from
+ # 9 to 1, attempting to vendor the minimum amount of code.
+ # -------------------------------------------------------------------
+ # tarfile.py
+ # -------------------------------------------------------------------
+ # Copyright (C) 2002 Lars Gustaebel <[email protected]>
+ # All rights reserved.
+ #
+ # Permission is hereby granted, free of charge, to any person
+ # obtaining a copy of this software and associated documentation
+ # files (the "Software"), to deal in the Software without
+ # restriction, including without limitation the rights to use,
+ # copy, modify, merge, publish, distribute, sublicense, and/or sell
+ # copies of the Software, and to permit persons to whom the
+ # Software is furnished to do so, subject to the following
+ # conditions:
+ #
+ # The above copyright notice and this permission notice shall be
+ # included in all copies or substantial portions of the Software.
+ #
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ # OTHER DEALINGS IN THE SOFTWARE.
+
+ import tarfile
+ from tarfile import NUL
+ import struct
+ import os
+ import time
+
+ class _Stream(tarfile._Stream):
+ """Class that serves as an adapter between TarFile and
+ a stream-like object. The stream-like object only
+ needs to have a read() or write() method and is accessed
+ blockwise. Use of gzip or bzip2 compression is possible.
+ A stream-like object could be for example: sys.stdin,
+ sys.stdout, a socket, a tape device etc.
+
+ _Stream is intended to be used only internally.
+ """
+
+ def _init_write_gz(self):
+ """Initialize for writing with gzip compression."""
+ self.cmp = self.zlib.compressobj(
+ 1, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0
+ )
+ timestamp = struct.pack("<L", int(time.time()))
+ self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
+ if self.name.endswith(".gz"):
+ self.name = self.name[:-3]
+ # Honor "directory components removed" from RFC1952
+ self.name = os.path.basename(self.name)
+ # RFC1952 says we must use ISO-8859-1 for the FNAME field.
+ self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
+
+ tarfile._Stream = _Stream
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -5,7 +5,6 @@
import os.path
import subprocess
import tarfile
-
from distutils.util import strtobool
from gettext import gettext as _
from glob import glob
| Exports are bottlenecked by gzip compression which cannot be disabled
**Version**
Any
**Describe the bug**
>
> We identified a severe bottleneck in the way hammer exports work and found the code in upstream Pulp.
>
> Line 406 of https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py
>
> "with tarfile.open(tarfile_fp, "w|gz", fileobj=split_process.stdin)"
>
> The tarfile Python module creates tar files using the gizp Python module.
>
> Data compression for the gzip Python module is provided by the zlib Python module.
>
> The zlib Python module calls the zlib library.
>
> If defaults are used the whole way through this series of events, the result is a single threaded pulp process doing compression of a tarball containing a massive content library. This bottleneck and can make large hammer exports take several days.
>
> Modifying the lines that tell the tarfile.open function to NOT use compression ( change "w|gz" to "w" ) dramatically speeds up the hammer export. In our testing it reduced the time from days to just hours. The drawback is the file size was significantly larger, but the trade-off is worthwhile given we have tight timeframes and plentiful disk capacity.
>
> Can this bottleneck be addressed with multi-threaded gzip compression?
>
> and/or
>
> Can a hammer command line option for no compression be implemented?
>
> Run a hammer export and monitor Pulp processes. One process with run at 100% CPU. Modify the abovementioned Python script to NOT use gzip encryption, and an uncompressed tarball will be created instead much quicker and with multiple Pulp processes.
>
>
> Steps to Reproduce:
> 1. Run a "hammer export". Monitor the Pulp process CPU usage and time taken to complete export.
> 2. Change the abovementioned Python code in Pulp.
> 3. Run a "hammer export" again and note performance improvement.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2188504
| I don't know that disabling compression entirely is a good idea. However, the default compression level used is level 9, the most computationally expensive (slow) one. Given level 9 is (roughly) 6x slower than level 1, but only compresses about 20% better, this is probably a poor tradeoff.
We should look at using levels 1-3 instead. | 2023-06-13T11:11:01 |
|
pulp/pulpcore | 3,919 | pulp__pulpcore-3919 | [
"3869"
] | 171000ea5184afb0d858539f23c0954ffa2e410b | diff --git a/pulpcore/app/monkeypatch.py b/pulpcore/app/monkeypatch.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/monkeypatch.py
@@ -0,0 +1,66 @@
+import sys
+
+# This is a monkeypatch for https://github.com/pulp/pulpcore/issues/3869
+if sys.version_info.major == 3 and sys.version_info.minor < 12:
+ # Code copied from the Python 3.12 standard library
+ # We modify the default gzip compression level for writing streams from
+ # 9 to 1, attempting to vendor the minimum amount of code.
+ # -------------------------------------------------------------------
+ # tarfile.py
+ # -------------------------------------------------------------------
+ # Copyright (C) 2002 Lars Gustaebel <[email protected]>
+ # All rights reserved.
+ #
+ # Permission is hereby granted, free of charge, to any person
+ # obtaining a copy of this software and associated documentation
+ # files (the "Software"), to deal in the Software without
+ # restriction, including without limitation the rights to use,
+ # copy, modify, merge, publish, distribute, sublicense, and/or sell
+ # copies of the Software, and to permit persons to whom the
+ # Software is furnished to do so, subject to the following
+ # conditions:
+ #
+ # The above copyright notice and this permission notice shall be
+ # included in all copies or substantial portions of the Software.
+ #
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ # OTHER DEALINGS IN THE SOFTWARE.
+
+ import tarfile
+ from tarfile import NUL
+ import struct
+ import os
+ import time
+
+ class _Stream(tarfile._Stream):
+ """Class that serves as an adapter between TarFile and
+ a stream-like object. The stream-like object only
+ needs to have a read() or write() method and is accessed
+ blockwise. Use of gzip or bzip2 compression is possible.
+ A stream-like object could be for example: sys.stdin,
+ sys.stdout, a socket, a tape device etc.
+
+ _Stream is intended to be used only internally.
+ """
+
+ def _init_write_gz(self):
+ """Initialize for writing with gzip compression."""
+ self.cmp = self.zlib.compressobj(
+ 1, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0
+ )
+ timestamp = struct.pack("<L", int(time.time()))
+ self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
+ if self.name.endswith(".gz"):
+ self.name = self.name[:-3]
+ # Honor "directory components removed" from RFC1952
+ self.name = os.path.basename(self.name)
+ # RFC1952 says we must use ISO-8859-1 for the FNAME field.
+ self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
+
+ tarfile._Stream = _Stream
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -5,7 +5,6 @@
import os.path
import subprocess
import tarfile
-
from distutils.util import strtobool
from gettext import gettext as _
from glob import glob
| Exports are bottlenecked by gzip compression which cannot be disabled
**Version**
Any
**Describe the bug**
>
> We identified a severe bottleneck in the way hammer exports work and found the code in upstream Pulp.
>
> Line 406 of https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py
>
> "with tarfile.open(tarfile_fp, "w|gz", fileobj=split_process.stdin)"
>
> The tarfile Python module creates tar files using the gizp Python module.
>
> Data compression for the gzip Python module is provided by the zlib Python module.
>
> The zlib Python module calls the zlib library.
>
> If defaults are used the whole way through this series of events, the result is a single threaded pulp process doing compression of a tarball containing a massive content library. This bottleneck and can make large hammer exports take several days.
>
> Modifying the lines that tell the tarfile.open function to NOT use compression ( change "w|gz" to "w" ) dramatically speeds up the hammer export. In our testing it reduced the time from days to just hours. The drawback is the file size was significantly larger, but the trade-off is worthwhile given we have tight timeframes and plentiful disk capacity.
>
> Can this bottleneck be addressed with multi-threaded gzip compression?
>
> and/or
>
> Can a hammer command line option for no compression be implemented?
>
> Run a hammer export and monitor Pulp processes. One process with run at 100% CPU. Modify the abovementioned Python script to NOT use gzip encryption, and an uncompressed tarball will be created instead much quicker and with multiple Pulp processes.
>
>
> Steps to Reproduce:
> 1. Run a "hammer export". Monitor the Pulp process CPU usage and time taken to complete export.
> 2. Change the abovementioned Python code in Pulp.
> 3. Run a "hammer export" again and note performance improvement.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2188504
| I don't know that disabling compression entirely is a good idea. However, the default compression level used is level 9, the most computationally expensive (slow) one. Given level 9 is (roughly) 6x slower than level 1, but only compresses about 20% better, this is probably a poor tradeoff.
We should look at using levels 1-3 instead. | 2023-06-13T11:11:14 |
|
pulp/pulpcore | 3,920 | pulp__pulpcore-3920 | [
"3869"
] | 12dbf6bd7de6d3793d4e40f03e5eddd3b4cf56dc | diff --git a/pulpcore/app/monkeypatch.py b/pulpcore/app/monkeypatch.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/monkeypatch.py
@@ -0,0 +1,66 @@
+import sys
+
+# This is a monkeypatch for https://github.com/pulp/pulpcore/issues/3869
+if sys.version_info.major == 3 and sys.version_info.minor < 12:
+ # Code copied from the Python 3.12 standard library
+ # We modify the default gzip compression level for writing streams from
+ # 9 to 1, attempting to vendor the minimum amount of code.
+ # -------------------------------------------------------------------
+ # tarfile.py
+ # -------------------------------------------------------------------
+ # Copyright (C) 2002 Lars Gustaebel <[email protected]>
+ # All rights reserved.
+ #
+ # Permission is hereby granted, free of charge, to any person
+ # obtaining a copy of this software and associated documentation
+ # files (the "Software"), to deal in the Software without
+ # restriction, including without limitation the rights to use,
+ # copy, modify, merge, publish, distribute, sublicense, and/or sell
+ # copies of the Software, and to permit persons to whom the
+ # Software is furnished to do so, subject to the following
+ # conditions:
+ #
+ # The above copyright notice and this permission notice shall be
+ # included in all copies or substantial portions of the Software.
+ #
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ # OTHER DEALINGS IN THE SOFTWARE.
+
+ import tarfile
+ from tarfile import NUL
+ import struct
+ import os
+ import time
+
+ class _Stream(tarfile._Stream):
+ """Class that serves as an adapter between TarFile and
+ a stream-like object. The stream-like object only
+ needs to have a read() or write() method and is accessed
+ blockwise. Use of gzip or bzip2 compression is possible.
+ A stream-like object could be for example: sys.stdin,
+ sys.stdout, a socket, a tape device etc.
+
+ _Stream is intended to be used only internally.
+ """
+
+ def _init_write_gz(self):
+ """Initialize for writing with gzip compression."""
+ self.cmp = self.zlib.compressobj(
+ 1, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0
+ )
+ timestamp = struct.pack("<L", int(time.time()))
+ self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
+ if self.name.endswith(".gz"):
+ self.name = self.name[:-3]
+ # Honor "directory components removed" from RFC1952
+ self.name = os.path.basename(self.name)
+ # RFC1952 says we must use ISO-8859-1 for the FNAME field.
+ self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
+
+ tarfile._Stream = _Stream
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -5,7 +5,6 @@
import os.path
import subprocess
import tarfile
-
from distutils.util import strtobool
from gettext import gettext as _
from glob import glob
| Exports are bottlenecked by gzip compression which cannot be disabled
**Version**
Any
**Describe the bug**
>
> We identified a severe bottleneck in the way hammer exports work and found the code in upstream Pulp.
>
> Line 406 of https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py
>
> "with tarfile.open(tarfile_fp, "w|gz", fileobj=split_process.stdin)"
>
> The tarfile Python module creates tar files using the gizp Python module.
>
> Data compression for the gzip Python module is provided by the zlib Python module.
>
> The zlib Python module calls the zlib library.
>
> If defaults are used the whole way through this series of events, the result is a single threaded pulp process doing compression of a tarball containing a massive content library. This bottleneck and can make large hammer exports take several days.
>
> Modifying the lines that tell the tarfile.open function to NOT use compression ( change "w|gz" to "w" ) dramatically speeds up the hammer export. In our testing it reduced the time from days to just hours. The drawback is the file size was significantly larger, but the trade-off is worthwhile given we have tight timeframes and plentiful disk capacity.
>
> Can this bottleneck be addressed with multi-threaded gzip compression?
>
> and/or
>
> Can a hammer command line option for no compression be implemented?
>
> Run a hammer export and monitor Pulp processes. One process with run at 100% CPU. Modify the abovementioned Python script to NOT use gzip encryption, and an uncompressed tarball will be created instead much quicker and with multiple Pulp processes.
>
>
> Steps to Reproduce:
> 1. Run a "hammer export". Monitor the Pulp process CPU usage and time taken to complete export.
> 2. Change the abovementioned Python code in Pulp.
> 3. Run a "hammer export" again and note performance improvement.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2188504
| I don't know that disabling compression entirely is a good idea. However, the default compression level used is level 9, the most computationally expensive (slow) one. Given level 9 is (roughly) 6x slower than level 1, but only compresses about 20% better, this is probably a poor tradeoff.
We should look at using levels 1-3 instead. | 2023-06-13T11:11:28 |
|
pulp/pulpcore | 3,943 | pulp__pulpcore-3943 | [
"3940"
] | a7e7ce7a48dd344360be104a9abf45bcc141cffb | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -114,14 +114,18 @@ def __init__(self):
startup_hook()
def _signal_handler(self, thesignal, frame):
- # Reset signal handlers to default
- # If you kill the process a second time it's not graceful anymore.
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
- _logger.info(_("Worker %s was requested to shut down."), self.name)
-
- self.task_grace_timeout = TASK_GRACE_INTERVAL
+ if thesignal in (signal.SIGHUP, signal.SIGTERM):
+ _logger.info(_("Worker %s was requested to shut down gracefully."), self.name)
+ self.task_grace_timeout = -1
+ else:
+ # Reset signal handlers to default
+ # If you kill the process a second time it's not graceful anymore.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ signal.signal(signal.SIGHUP, signal.SIG_DFL)
+
+ _logger.info(_("Worker %s was requested to shut down."), self.name)
+ self.task_grace_timeout = TASK_GRACE_INTERVAL
self.shutdown_requested = True
def _pg_notify_handler(self, notification):
@@ -312,7 +316,7 @@ def supervise_task(self, task):
task_process.start()
while True:
if cancel_state:
- if self.task_grace_timeout > 0:
+ if self.task_grace_timeout != 0:
_logger.info("Wait for canceled task to abort.")
else:
self.task_grace_timeout = TASK_KILL_INTERVAL
@@ -331,13 +335,14 @@ def supervise_task(self, task):
if self.cancel_task:
_logger.info(_("Received signal to cancel current task %s."), task.pk)
cancel_state = TASK_STATES.CANCELED
+ self.cancel_task = False
if task_process.sentinel in r:
if not task_process.is_alive():
break
if self.sentinel in r:
os.read(self.sentinel, 256)
if self.shutdown_requested:
- if self.task_grace_timeout > 0:
+ if self.task_grace_timeout != 0:
_logger.info(
"Worker shutdown requested, waiting for task %s to finish.", task.pk
)
@@ -371,6 +376,7 @@ def run_forever(self):
with WorkerDirectory(self.name):
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
+ signal.signal(signal.SIGHUP, self._signal_handler)
# Subscribe to pgsql channels
connection.connection.add_notify_handler(self._pg_notify_handler)
self.cursor.execute("LISTEN pulp_worker_wakeup")
@@ -407,6 +413,7 @@ def child_signal_handler(sig, frame):
# If you kill the process a second time it's not graceful anymore.
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGUSR1, signal.SIG_DFL)
if sig == signal.SIGUSR1:
@@ -418,6 +425,7 @@ def _perform_task(task_pk, task_working_dir_rel_path):
This must be called as a subprocess, while the parent holds the advisory lock."""
signal.signal(signal.SIGINT, child_signal_handler)
signal.signal(signal.SIGTERM, child_signal_handler)
+ signal.signal(signal.SIGHUP, child_signal_handler)
signal.signal(signal.SIGUSR1, child_signal_handler)
if settings.TASK_DIAGNOSTICS:
# It would be better to have this recording happen in the parent process instead of here
| As a user I can gracefully shut down a pulpcore-worker
A user should be able to stop a `pulpcore-worker` in such a way that allows the worker's current task to run to completion.
| Currently, when you sigint a worker, it will wait for a few heartbeats before killing the current task. This is only working for short tasks obviously, but I leave it here for context.
Is this about sending SIGUSR1 to the worker, or do we need to make this available from the api?
How useful will this be with a long running task like sync? Is there an expected max time within which the worker would supposedly gracefully shutdown? | 2023-06-21T12:20:44 |
|
pulp/pulpcore | 3,944 | pulp__pulpcore-3944 | [
"3945"
] | 30bfe11243467899cc5947013735b6dcd361afca | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -844,7 +844,7 @@ def _save_artifact(self, download_result, remote_artifact, request=None):
# There is already content saved
content = c_type.objects.get(content.q())
created_artifact_digests = {rp: a.sha256 for rp, a in artifacts.items() if a}
- cas = list(content.contentartifact_set().select_related("artifact"))
+ cas = list(content.contentartifact_set.select_related("artifact"))
found_artifact_digests = {
ca.relative_path: ca.artifact.sha256 for ca in cas if ca.artifact
}
| Pull through requesting existing content produces a bad call to contentartifact_set
**Version**
3.28, as this code was not released earlier.
**Describe the bug**
```
File "/usr/local/lib/python3.8/site-packages/pulpcore/content/handler.py", line 847, in _save_artifact
cas = list(content.contentartifact_set().select_related("artifact"))
TypeError: __call__() missing 1 required keyword-only argument: 'manager'
```
| 2023-06-21T19:55:24 |
||
pulp/pulpcore | 3,946 | pulp__pulpcore-3946 | [
"3945"
] | b5fd8f11e5ac1ad42324c1a6a44e408dde86fd1a | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -844,7 +844,7 @@ def _save_artifact(self, download_result, remote_artifact, request=None):
# There is already content saved
content = c_type.objects.get(content.q())
created_artifact_digests = {rp: a.sha256 for rp, a in artifacts.items() if a}
- cas = list(content.contentartifact_set().select_related("artifact"))
+ cas = list(content.contentartifact_set.select_related("artifact"))
found_artifact_digests = {
ca.relative_path: ca.artifact.sha256 for ca in cas if ca.artifact
}
| Pull through requesting existing content produces a bad call to contentartifact_set
**Version**
3.28, as this code was not released earlier.
**Describe the bug**
```
File "/usr/local/lib/python3.8/site-packages/pulpcore/content/handler.py", line 847, in _save_artifact
cas = list(content.contentartifact_set().select_related("artifact"))
TypeError: __call__() missing 1 required keyword-only argument: 'manager'
```
| 2023-06-22T11:24:28 |
||
pulp/pulpcore | 3,947 | pulp__pulpcore-3947 | [
"3957"
] | d1de78b4f314d307be2ce69127d628c6ab953370 | diff --git a/pulpcore/app/models/role.py b/pulpcore/app/models/role.py
--- a/pulpcore/app/models/role.py
+++ b/pulpcore/app/models/role.py
@@ -1,4 +1,4 @@
-from django.contrib.auth import get_user_model
+from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
@@ -41,7 +41,7 @@ class UserRole(BaseModel):
"""
user = models.ForeignKey(
- get_user_model(), related_name="object_roles", on_delete=models.CASCADE
+ settings.AUTH_USER_MODEL, related_name="object_roles", on_delete=models.CASCADE
)
role = models.ForeignKey(Role, related_name="object_users", on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, null=True)
diff --git a/pulpcore/app/role_util.py b/pulpcore/app/role_util.py
--- a/pulpcore/app/role_util.py
+++ b/pulpcore/app/role_util.py
@@ -1,19 +1,23 @@
from gettext import gettext as _
from collections import defaultdict
+from functools import lru_cache
from django.conf import settings
from django.core.exceptions import BadRequest
from django.db.models import Q, Exists, OuterRef, CharField
from django.db.models.functions import Cast
-from django.contrib.auth import get_user_model
+from django.contrib.auth import get_user_model as django_get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from pulpcore.app.models import Group
from pulpcore.app.models.role import GroupRole, Role, UserRole
-User = get_user_model()
+
+@lru_cache(maxsize=1)
+def get_user_model():
+ return django_get_user_model()
def assign_role(rolename, entity, obj=None, domain=None):
@@ -300,6 +304,7 @@ def get_users_with_perms_roles(
include_model_permissions=True,
for_concrete_model=False,
):
+ User = get_user_model()
qs = User.objects.none()
if with_superusers:
qs |= User.objects.filter(is_superuser=True)
@@ -338,6 +343,7 @@ def get_users_with_perms_attached_perms(
include_model_permissions=True,
for_concrete_model=False,
):
+ User = get_user_model()
ctype = ContentType.objects.get_for_model(obj, for_concrete_model=for_concrete_model)
perms = Permission.objects.filter(content_type__pk=ctype.id)
if only_with_perms_in:
@@ -422,6 +428,7 @@ def get_users_with_perms(
include_model_permissions=True,
for_concrete_model=False,
):
+ User = get_user_model()
if attach_perms:
res = defaultdict(set)
if "pulpcore.backends.ObjectRolePermissionBackend" in settings.AUTHENTICATION_BACKENDS:
| get_user_model calls cause circular imports
**Version**
pulpcore 3.27, pulp container 2.16.x
**Describe the bug**
```
Traceback (most recent call last):
File "/venv/bin/django-admin", line 8, in <module>
sys.exit(execute_from_command_line())
File "/venv/lib64/python3.9/site-packages/django/core/management/__init__.py", line 442, in execute_from_command_line
utility.execute()
File "/venv/lib64/python3.9/site-packages/django/core/management/__init__.py", line 416, in execute
django.setup()
File "/venv/lib64/python3.9/site-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/venv/lib64/python3.9/site-packages/django/apps/registry.py", line 116, in populate
app_config.import_models()
File "/venv/lib64/python3.9/site-packages/django/apps/config.py", line 269, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib64/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/venv/lib64/python3.9/site-packages/pulp_container/app/models.py", line 28, in <module>
from pulpcore.plugin.util import gpg_verify
File "/venv/lib64/python3.9/site-packages/pulpcore/plugin/util.py", line 1, in <module>
from pulpcore.app.role_util import ( # noqa
File "/venv/lib64/python3.9/site-packages/pulpcore/app/role_util.py", line 14, in <module>
from pulpcore.app.models.role import GroupRole, Role, UserRole
File "/venv/lib64/python3.9/site-packages/pulpcore/app/models/role.py", line 30, in <module>
class UserRole(BaseModel):
File "/venv/lib64/python3.9/site-packages/pulpcore/app/models/role.py", line 44, in UserRole
get_user_model(), related_name="object_roles", on_delete=models.CASCADE
File "/venv/lib64/python3.9/site-packages/django/contrib/auth/__init__.py", line 170, in get_user_model
return django_apps.get_model(settings.AUTH_USER_MODEL, require_ready=False)
File "/venv/lib64/python3.9/site-packages/django/apps/registry.py", line 211, in get_model
app_config.import_models()
File "/venv/lib64/python3.9/site-packages/django/apps/config.py", line 269, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib64/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/app/galaxy_ng/app/models/__init__.py", line 17, in <module>
from .container import (
File "/app/galaxy_ng/app/models/container.py", line 11, in <module>
container_models.ContainerDistribution,
AttributeError: partially initialized module 'pulp_container.app.models' has no attribute 'ContainerDistribution' (most likely due to a circular import)
make: *** [Makefile:108: docker/loaddata] Error 1
```
**To Reproduce**
Spin up galaxy_ng with pulpcore updated to 3.27.
**Expected behavior**
No circular import errors.
**Additional context**
| 2023-06-22T14:17:47 |
||
pulp/pulpcore | 3,958 | pulp__pulpcore-3958 | [
"3957"
] | 5ead7ec09cf0240b7eb49c0e4605fcca6eecf415 | diff --git a/pulpcore/app/models/role.py b/pulpcore/app/models/role.py
--- a/pulpcore/app/models/role.py
+++ b/pulpcore/app/models/role.py
@@ -1,4 +1,4 @@
-from django.contrib.auth import get_user_model
+from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
@@ -41,7 +41,7 @@ class UserRole(BaseModel):
"""
user = models.ForeignKey(
- get_user_model(), related_name="object_roles", on_delete=models.CASCADE
+ settings.AUTH_USER_MODEL, related_name="object_roles", on_delete=models.CASCADE
)
role = models.ForeignKey(Role, related_name="object_users", on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, null=True)
diff --git a/pulpcore/app/role_util.py b/pulpcore/app/role_util.py
--- a/pulpcore/app/role_util.py
+++ b/pulpcore/app/role_util.py
@@ -1,19 +1,23 @@
from gettext import gettext as _
from collections import defaultdict
+from functools import lru_cache
from django.conf import settings
from django.core.exceptions import BadRequest
from django.db.models import Q, Exists, OuterRef, CharField
from django.db.models.functions import Cast
-from django.contrib.auth import get_user_model
+from django.contrib.auth import get_user_model as django_get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from pulpcore.app.models import Group
from pulpcore.app.models.role import GroupRole, Role, UserRole
-User = get_user_model()
+
+@lru_cache(maxsize=1)
+def get_user_model():
+ return django_get_user_model()
def assign_role(rolename, entity, obj=None, domain=None):
@@ -300,6 +304,7 @@ def get_users_with_perms_roles(
include_model_permissions=True,
for_concrete_model=False,
):
+ User = get_user_model()
qs = User.objects.none()
if with_superusers:
qs |= User.objects.filter(is_superuser=True)
@@ -338,6 +343,7 @@ def get_users_with_perms_attached_perms(
include_model_permissions=True,
for_concrete_model=False,
):
+ User = get_user_model()
ctype = ContentType.objects.get_for_model(obj, for_concrete_model=for_concrete_model)
perms = Permission.objects.filter(content_type__pk=ctype.id)
if only_with_perms_in:
@@ -422,6 +428,7 @@ def get_users_with_perms(
include_model_permissions=True,
for_concrete_model=False,
):
+ User = get_user_model()
if attach_perms:
res = defaultdict(set)
if "pulpcore.backends.ObjectRolePermissionBackend" in settings.AUTHENTICATION_BACKENDS:
| get_user_model calls cause circular imports
**Version**
pulpcore 3.27, pulp container 2.16.x
**Describe the bug**
```
Traceback (most recent call last):
File "/venv/bin/django-admin", line 8, in <module>
sys.exit(execute_from_command_line())
File "/venv/lib64/python3.9/site-packages/django/core/management/__init__.py", line 442, in execute_from_command_line
utility.execute()
File "/venv/lib64/python3.9/site-packages/django/core/management/__init__.py", line 416, in execute
django.setup()
File "/venv/lib64/python3.9/site-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/venv/lib64/python3.9/site-packages/django/apps/registry.py", line 116, in populate
app_config.import_models()
File "/venv/lib64/python3.9/site-packages/django/apps/config.py", line 269, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib64/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/venv/lib64/python3.9/site-packages/pulp_container/app/models.py", line 28, in <module>
from pulpcore.plugin.util import gpg_verify
File "/venv/lib64/python3.9/site-packages/pulpcore/plugin/util.py", line 1, in <module>
from pulpcore.app.role_util import ( # noqa
File "/venv/lib64/python3.9/site-packages/pulpcore/app/role_util.py", line 14, in <module>
from pulpcore.app.models.role import GroupRole, Role, UserRole
File "/venv/lib64/python3.9/site-packages/pulpcore/app/models/role.py", line 30, in <module>
class UserRole(BaseModel):
File "/venv/lib64/python3.9/site-packages/pulpcore/app/models/role.py", line 44, in UserRole
get_user_model(), related_name="object_roles", on_delete=models.CASCADE
File "/venv/lib64/python3.9/site-packages/django/contrib/auth/__init__.py", line 170, in get_user_model
return django_apps.get_model(settings.AUTH_USER_MODEL, require_ready=False)
File "/venv/lib64/python3.9/site-packages/django/apps/registry.py", line 211, in get_model
app_config.import_models()
File "/venv/lib64/python3.9/site-packages/django/apps/config.py", line 269, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib64/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/app/galaxy_ng/app/models/__init__.py", line 17, in <module>
from .container import (
File "/app/galaxy_ng/app/models/container.py", line 11, in <module>
container_models.ContainerDistribution,
AttributeError: partially initialized module 'pulp_container.app.models' has no attribute 'ContainerDistribution' (most likely due to a circular import)
make: *** [Makefile:108: docker/loaddata] Error 1
```
**To Reproduce**
Spin up galaxy_ng with pulpcore updated to 3.27.
**Expected behavior**
No circular import errors.
**Additional context**
| 2023-06-26T15:39:03 |
||
pulp/pulpcore | 3,959 | pulp__pulpcore-3959 | [
"3957"
] | 8a6df766855cfd19c2f735f83e3379d483d071d9 | diff --git a/pulpcore/app/models/role.py b/pulpcore/app/models/role.py
--- a/pulpcore/app/models/role.py
+++ b/pulpcore/app/models/role.py
@@ -1,4 +1,4 @@
-from django.contrib.auth import get_user_model
+from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
@@ -41,7 +41,7 @@ class UserRole(BaseModel):
"""
user = models.ForeignKey(
- get_user_model(), related_name="object_roles", on_delete=models.CASCADE
+ settings.AUTH_USER_MODEL, related_name="object_roles", on_delete=models.CASCADE
)
role = models.ForeignKey(Role, related_name="object_users", on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, null=True)
diff --git a/pulpcore/app/role_util.py b/pulpcore/app/role_util.py
--- a/pulpcore/app/role_util.py
+++ b/pulpcore/app/role_util.py
@@ -1,19 +1,23 @@
from gettext import gettext as _
from collections import defaultdict
+from functools import lru_cache
from django.conf import settings
from django.core.exceptions import BadRequest
from django.db.models import Q, Exists, OuterRef, CharField
from django.db.models.functions import Cast
-from django.contrib.auth import get_user_model
+from django.contrib.auth import get_user_model as django_get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from pulpcore.app.models import Group
from pulpcore.app.models.role import GroupRole, Role, UserRole
-User = get_user_model()
+
+@lru_cache(maxsize=1)
+def get_user_model():
+ return django_get_user_model()
def assign_role(rolename, entity, obj=None, domain=None):
@@ -300,6 +304,7 @@ def get_users_with_perms_roles(
include_model_permissions=True,
for_concrete_model=False,
):
+ User = get_user_model()
qs = User.objects.none()
if with_superusers:
qs |= User.objects.filter(is_superuser=True)
@@ -338,6 +343,7 @@ def get_users_with_perms_attached_perms(
include_model_permissions=True,
for_concrete_model=False,
):
+ User = get_user_model()
ctype = ContentType.objects.get_for_model(obj, for_concrete_model=for_concrete_model)
perms = Permission.objects.filter(content_type__pk=ctype.id)
if only_with_perms_in:
@@ -422,6 +428,7 @@ def get_users_with_perms(
include_model_permissions=True,
for_concrete_model=False,
):
+ User = get_user_model()
if attach_perms:
res = defaultdict(set)
if "pulpcore.backends.ObjectRolePermissionBackend" in settings.AUTHENTICATION_BACKENDS:
| get_user_model calls cause circular imports
**Version**
pulpcore 3.27, pulp container 2.16.x
**Describe the bug**
```
Traceback (most recent call last):
File "/venv/bin/django-admin", line 8, in <module>
sys.exit(execute_from_command_line())
File "/venv/lib64/python3.9/site-packages/django/core/management/__init__.py", line 442, in execute_from_command_line
utility.execute()
File "/venv/lib64/python3.9/site-packages/django/core/management/__init__.py", line 416, in execute
django.setup()
File "/venv/lib64/python3.9/site-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/venv/lib64/python3.9/site-packages/django/apps/registry.py", line 116, in populate
app_config.import_models()
File "/venv/lib64/python3.9/site-packages/django/apps/config.py", line 269, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib64/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/venv/lib64/python3.9/site-packages/pulp_container/app/models.py", line 28, in <module>
from pulpcore.plugin.util import gpg_verify
File "/venv/lib64/python3.9/site-packages/pulpcore/plugin/util.py", line 1, in <module>
from pulpcore.app.role_util import ( # noqa
File "/venv/lib64/python3.9/site-packages/pulpcore/app/role_util.py", line 14, in <module>
from pulpcore.app.models.role import GroupRole, Role, UserRole
File "/venv/lib64/python3.9/site-packages/pulpcore/app/models/role.py", line 30, in <module>
class UserRole(BaseModel):
File "/venv/lib64/python3.9/site-packages/pulpcore/app/models/role.py", line 44, in UserRole
get_user_model(), related_name="object_roles", on_delete=models.CASCADE
File "/venv/lib64/python3.9/site-packages/django/contrib/auth/__init__.py", line 170, in get_user_model
return django_apps.get_model(settings.AUTH_USER_MODEL, require_ready=False)
File "/venv/lib64/python3.9/site-packages/django/apps/registry.py", line 211, in get_model
app_config.import_models()
File "/venv/lib64/python3.9/site-packages/django/apps/config.py", line 269, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib64/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/app/galaxy_ng/app/models/__init__.py", line 17, in <module>
from .container import (
File "/app/galaxy_ng/app/models/container.py", line 11, in <module>
container_models.ContainerDistribution,
AttributeError: partially initialized module 'pulp_container.app.models' has no attribute 'ContainerDistribution' (most likely due to a circular import)
make: *** [Makefile:108: docker/loaddata] Error 1
```
**To Reproduce**
Spin up galaxy_ng with pulpcore updated to 3.27.
**Expected behavior**
No circular import errors.
**Additional context**
| 2023-06-26T15:39:17 |
||
pulp/pulpcore | 3,960 | pulp__pulpcore-3960 | [
"3951"
] | 1f1b869fb836c89d5900b37aac0b510a79f99ec1 | diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py
--- a/pulpcore/app/serializers/content.py
+++ b/pulpcore/app/serializers/content.py
@@ -5,91 +5,127 @@
from rest_framework.validators import UniqueValidator
from pulpcore.app import models
-from pulpcore.app.serializers import base, fields
+from pulpcore.app.serializers import base, fields, DetailRelatedField
from pulpcore.app.util import get_domain
-class BaseContentSerializer(base.ModelSerializer):
+class NoArtifactContentSerializer(base.ModelSerializer):
pulp_href = base.DetailIdentityField(view_name_pattern=r"contents(-.*/.*)-detail")
-
- class Meta:
- model = models.Content
- fields = base.ModelSerializer.Meta.fields
-
-
-class NoArtifactContentSerializer(BaseContentSerializer):
- class Meta:
- model = models.Content
- fields = BaseContentSerializer.Meta.fields
-
-
-class SingleArtifactContentSerializer(BaseContentSerializer):
- artifact = fields.SingleContentArtifactField(
- help_text=_("Artifact file representing the physical content"),
- )
-
- relative_path = serializers.CharField(
- help_text=_("Path where the artifact is located relative to distributions base_path"),
- validators=[fields.relative_path_validator],
+ repository = DetailRelatedField(
+ help_text=_("A URI of a repository the new content unit should be associated with."),
+ required=False,
write_only=True,
+ view_name_pattern=r"repositories(-.*/.*)-detail",
+ queryset=models.Repository.objects.all(),
)
- def __init__(self, *args, **kwargs):
+ def get_artifacts(self, validated_data):
"""
- Initializer for SingleArtifactContentSerializer
+ Extract artifacts from validated_data.
+
+ This function is supposed to extract the information about content artifacts from
+ validated_data and return a dictionary with artifacts and relative paths as keys.
"""
- super().__init__(*args, **kwargs)
+ return {}
- # If the content model has its own database field 'relative_path',
- # we should not mark the field write_only
- if hasattr(self.Meta.model, "relative_path") and "relative_path" in self.fields:
- self.fields["relative_path"].write_only = False
+ def retrieve(self, validated_data):
+ """
+ Retrieve existing content unit if it exists, else return None.
+
+ This method is plugin-specific and implementing it for a specific content type
+ allows for uploading already existing content units of that type.
+ """
+ return None
+
+ def validate(self, data):
+ """Validate that we have an Artifact or can create one."""
+
+ data = super().validate(data)
+ if repository := data.get("repository"):
+ if (
+ self.Meta.model
+ not in repository.get_model_for_pulp_type(repository.pulp_type).CONTENT_TYPES
+ ):
+ raise serializers.ValidationError("Content is not supported by this repository.")
+ return data
def create(self, validated_data):
"""
- Create the content and associate it with its Artifact, or retrieve the existing content.
+ Create the content and associate it with its Artifacts, or retrieve the existing content.
Args:
validated_data (dict): Data to save to the database
"""
- content = self.retrieve(validated_data)
+ repository = validated_data.pop("repository", None)
+ artifacts = self.get_artifacts(validated_data)
+ content = self.retrieve(validated_data)
if content is not None:
content.touch()
else:
- artifact = validated_data.pop("artifact")
- if "relative_path" not in self.fields or self.fields["relative_path"].write_only:
- relative_path = validated_data.pop("relative_path")
- else:
- relative_path = validated_data.get("relative_path")
try:
with transaction.atomic():
content = self.Meta.model.objects.create(**validated_data)
- models.ContentArtifact.objects.create(
- artifact=artifact, content=content, relative_path=relative_path
- )
+ for relative_path, artifact in artifacts.items():
+ models.ContentArtifact.objects.create(
+ artifact=artifact, content=content, relative_path=relative_path
+ )
except IntegrityError:
content = self.retrieve(validated_data)
if content is None:
raise
+ if repository:
+ repository.cast()
+ content_to_add = self.Meta.model.objects.filter(pk=content.pk)
+
+ # create new repo version with uploaded package
+ with repository.new_version() as new_version:
+ new_version.add_content(content_to_add)
+
return content
- def retrieve(self, validated_data):
- """
- Retrieve existing content unit if it exists, else return None.
+ class Meta:
+ model = models.Content
+ fields = base.ModelSerializer.Meta.fields + ("repository",)
- This method is plugin-specific and implementing it for a specific content type
- allows for uploading already existing content units of that type.
+
+class SingleArtifactContentSerializer(NoArtifactContentSerializer):
+ artifact = fields.SingleContentArtifactField(
+ help_text=_("Artifact file representing the physical content"),
+ )
+
+ relative_path = serializers.CharField(
+ help_text=_("Path where the artifact is located relative to distributions base_path"),
+ validators=[fields.relative_path_validator],
+ write_only=True,
+ )
+
+ def __init__(self, *args, **kwargs):
"""
- return None
+ Initializer for SingleArtifactContentSerializer
+ """
+ super().__init__(*args, **kwargs)
+
+ # If the content model has its own database field 'relative_path',
+ # we should not mark the field write_only
+ if hasattr(self.Meta.model, "relative_path") and "relative_path" in self.fields:
+ self.fields["relative_path"].write_only = False
+
+ def get_artifacts(self, validated_data):
+ artifact = validated_data.pop("artifact")
+ if "relative_path" not in self.fields or self.fields["relative_path"].write_only:
+ relative_path = validated_data.pop("relative_path")
+ else:
+ relative_path = validated_data.get("relative_path")
+ return {relative_path: artifact}
class Meta:
model = models.Content
- fields = BaseContentSerializer.Meta.fields + ("artifact", "relative_path")
+ fields = NoArtifactContentSerializer.Meta.fields + ("artifact", "relative_path")
-class MultipleArtifactContentSerializer(BaseContentSerializer):
+class MultipleArtifactContentSerializer(NoArtifactContentSerializer):
artifacts = fields.ContentArtifactsField(
help_text=_(
"A dict mapping relative paths inside the Content to the corresponding"
@@ -98,25 +134,12 @@ class MultipleArtifactContentSerializer(BaseContentSerializer):
),
)
- @transaction.atomic
- def create(self, validated_data):
- """
- Create the content and associate it with all its Artifacts.
-
- Args:
- validated_data (dict): Data to save to the database
- """
- artifacts = validated_data.pop("artifacts")
- content = self.Meta.model.objects.create(**validated_data)
- for relative_path, artifact in artifacts.items():
- models.ContentArtifact.objects.create(
- artifact=artifact, content=content, relative_path=relative_path
- )
- return content
+ def get_artifacts(self, validated_data):
+ return validated_data.pop("artifacts")
class Meta:
model = models.Content
- fields = BaseContentSerializer.Meta.fields + ("artifacts",)
+ fields = NoArtifactContentSerializer.Meta.fields + ("artifacts",)
class ContentChecksumSerializer(serializers.Serializer):
@@ -290,7 +313,7 @@ class SigningServiceSerializer(base.ModelSerializer):
class Meta:
model = models.SigningService
- fields = BaseContentSerializer.Meta.fields + (
+ fields = base.ModelSerializer.Meta.fields + (
"name",
"public_key",
"pubkey_fingerprint",
diff --git a/pulpcore/plugin/serializers/content.py b/pulpcore/plugin/serializers/content.py
--- a/pulpcore/plugin/serializers/content.py
+++ b/pulpcore/plugin/serializers/content.py
@@ -10,9 +10,8 @@
ValidationError,
)
from pulpcore.app.files import PulpTemporaryUploadedFile
-from pulpcore.app.models import Artifact, Repository, Upload, UploadChunk
+from pulpcore.app.models import Artifact, Upload, UploadChunk
from pulpcore.app.serializers import (
- DetailRelatedField,
RelatedField,
ArtifactSerializer,
NoArtifactContentSerializer,
@@ -31,35 +30,9 @@ class UploadSerializerFieldsMixin(Serializer):
required=False,
write_only=True,
)
- repository = DetailRelatedField(
- help_text=_("A URI of a repository the new content unit should be associated with."),
- required=False,
- write_only=True,
- view_name_pattern=r"repositories(-.*/.*)-detail",
- queryset=Repository.objects.all(),
- )
-
- def create(self, validated_data):
- """
- Save a GenericContent unit.
-
- This must be used inside a task that locks on the Artifact and if given, the repository.
- """
-
- repository = validated_data.pop("repository", None)
- content = super().create(validated_data)
-
- if repository:
- repository.cast()
- content_to_add = self.Meta.model.objects.filter(pk=content.pk)
-
- # create new repo version with uploaded package
- with repository.new_version() as new_version:
- new_version.add_content(content_to_add)
- return content
class Meta:
- fields = ("file", "repository")
+ fields = ("file",)
class NoArtifactContentUploadSerializer(UploadSerializerFieldsMixin, NoArtifactContentSerializer):
diff --git a/pulpcore/plugin/viewsets/content.py b/pulpcore/plugin/viewsets/content.py
--- a/pulpcore/plugin/viewsets/content.py
+++ b/pulpcore/plugin/viewsets/content.py
@@ -43,20 +43,19 @@ def create(self, request):
serializer.is_valid(raise_exception=True)
task_payload = {k: v for k, v in request.data.items()}
- file_content = task_payload.pop("file", None)
+ file_content = task_payload.pop("file", None)
temp_file = PulpTemporaryFile.init_and_validate(file_content)
temp_file.save()
- resources = []
- repository = serializer.validated_data.get("repository")
- if repository:
- resources.append(repository)
+ exclusive_resources = [
+ item for item in (serializer.validated_data.get(key) for key in ("repository",)) if item
+ ]
app_label = self.queryset.model._meta.app_label
task = dispatch(
tasks.base.general_create_from_temp_file,
- exclusive_resources=resources,
+ exclusive_resources=exclusive_resources,
args=(app_label, serializer.__class__.__name__, str(temp_file.pk)),
kwargs={"data": task_payload, "context": self.get_deferred_context(request)},
)
@@ -87,8 +86,8 @@ def create(self, request):
app_label = self.queryset.model._meta.app_label
task = dispatch(
tasks.base.general_create,
- args=(app_label, serializer.__class__.__name__),
exclusive_resources=exclusive_resources,
+ args=(app_label, serializer.__class__.__name__),
kwargs={
"data": task_payload,
"context": self.get_deferred_context(request),
| Add the retrieve logic to MultipleArtifactSerializer
| 2023-06-26T17:19:17 |
||
pulp/pulpcore | 3,974 | pulp__pulpcore-3974 | [
"3407"
] | 2358a4fbbc83729ada0b86d365475b87a2406080 | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -53,7 +53,11 @@
_logger = logging.getLogger(__name__)
random.seed()
+# Number of heartbeats for a task to finish on graceful worker shutdown (approx)
TASK_GRACE_INTERVAL = 3
+# Number of heartbeats between attempts to kill the subprocess (approx)
+TASK_KILL_INTERVAL = 1
+# Number of heartbeats between cleaning up worker processes (approx)
WORKER_CLEANUP_INTERVAL = 100
# Randomly chosen
TASK_SCHEDULING_LOCK = 42
@@ -147,6 +151,7 @@ def _signal_handler(self, thesignal, frame):
_logger.info(_("Worker %s was requested to shut down."), self.name)
+ self.task_grace_timeout = TASK_GRACE_INTERVAL
self.shutdown_requested = True
def shutdown(self):
@@ -163,7 +168,7 @@ def worker_cleanup(self):
def beat(self):
if self.worker.last_heartbeat < timezone.now() - timedelta(seconds=self.heartbeat_period):
self.worker = handle_worker_heartbeat(self.name)
- if self.shutdown_requested:
+ if self.task_grace_timeout > 0:
self.task_grace_timeout -= 1
self.worker_cleanup_countdown -= 1
if self.worker_cleanup_countdown <= 0:
@@ -308,7 +313,6 @@ def supervise_task(self, task):
This function must only be called while holding the lock for that task."""
- self.task_grace_timeout = TASK_GRACE_INTERVAL
task.worker = self.worker
task.save(update_fields=["worker"])
cancel_state = None
@@ -322,11 +326,15 @@ def supervise_task(self, task):
item = connection.connection.notifies.pop(0)
if item.channel == "pulp_worker_cancel" and item.payload == str(task.pk):
_logger.info(_("Received signal to cancel current task %s."), task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
cancel_state = TASK_STATES.CANCELED
# ignore all other notifications
if cancel_state:
- break
+ if self.task_grace_timeout > 0:
+ _logger.info("Wait for canceled task to abort.")
+ else:
+ self.task_grace_timeout = TASK_KILL_INTERVAL
+ _logger.info("Aborting current task %s due to cancelation.", task.pk)
+ os.kill(task_process.pid, signal.SIGUSR1)
r, w, x = select.select(
[self.sentinel, connection.connection, task_process.sentinel],
@@ -349,10 +357,8 @@ def supervise_task(self, task):
)
else:
_logger.info("Aborting current task %s due to worker shutdown.", task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
cancel_state = TASK_STATES.FAILED
cancel_reason = "Aborted during worker shutdown."
- break
task_process.join()
if not cancel_state and task_process.exitcode != 0:
_logger.warning(
| diff --git a/pulpcore/app/tasks/test.py b/pulpcore/app/tasks/test.py
--- a/pulpcore/app/tasks/test.py
+++ b/pulpcore/app/tasks/test.py
@@ -1,3 +1,14 @@
+import backoff
+
+
def dummy_task():
"""Dummy task, that can be used in tests."""
pass
+
+
[email protected]_exception(backoff.expo, BaseException)
+def gooey_task(interval):
+ """A sleep task that tries to avoid being killed by ignoring all exceptions."""
+ from time import sleep
+
+ sleep(interval)
| Task cancellation gets stuck
**Version**
Pulpcore 3.16 via Satellite 6.11
**Describe the bug**
If "Reclaim Space" (Actions::Pulp3::CapsuleContent::ReclaimSpace) action is executed and canceled after that task will get stuck in canceling not only in foreman tasks but also in pulp3.
The task was apparently stuck in "canceling" state for more than 6 days.
**To Reproduce**
How reproducible:
Always
Steps to Reproduce:
1. Execute "Reclaim Space" on Satellite
2. Cancel task (Actions::Pulp3::CapsuleContent::ReclaimSpace)
**Expected results:**
Task will get canceled
**Actual results:**
Task is stuck in foreman-tasks and in pulp3
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2143290
| This was independently reproduced on a lab system
Removing triage tag because it was reproduced, adding prio tag because it can block up the tasking system.
As far as i heard, rebooting _all_ workers unblocks the tasking system. I would wonder if rebooting just the worker stuck on that task would be sufficient too.
Can we add logging from the worker when the cancel happens?
I haven't been able to recreate this in a just-pulp env yet. What would be *really* useful, is the journalctl output from the 60 sec around the time the cancel was *issued*. Looking at the sys.exit() call that ends a worker that's been cancelled, I can imagine a series of Unfortunate Events that would result in the worker-process not actually exiting - but I'd think they'd all leave traces of unusual exceptions in the logs.
Investigation continues. | 2023-06-28T19:50:28 |
pulp/pulpcore | 3,975 | pulp__pulpcore-3975 | [
"3407"
] | 93b8efdbf1e7acf79e1147ab947475ac15a502be | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -53,7 +53,11 @@
_logger = logging.getLogger(__name__)
random.seed()
+# Number of heartbeats for a task to finish on graceful worker shutdown (approx)
TASK_GRACE_INTERVAL = 3
+# Number of heartbeats between attempts to kill the subprocess (approx)
+TASK_KILL_INTERVAL = 1
+# Number of heartbeats between cleaning up worker processes (approx)
WORKER_CLEANUP_INTERVAL = 100
# Randomly chosen
TASK_SCHEDULING_LOCK = 42
@@ -147,6 +151,7 @@ def _signal_handler(self, thesignal, frame):
_logger.info(_("Worker %s was requested to shut down."), self.name)
+ self.task_grace_timeout = TASK_GRACE_INTERVAL
self.shutdown_requested = True
def shutdown(self):
@@ -163,7 +168,7 @@ def worker_cleanup(self):
def beat(self):
if self.worker.last_heartbeat < timezone.now() - timedelta(seconds=self.heartbeat_period):
self.worker = handle_worker_heartbeat(self.name)
- if self.shutdown_requested:
+ if self.task_grace_timeout > 0:
self.task_grace_timeout -= 1
self.worker_cleanup_countdown -= 1
if self.worker_cleanup_countdown <= 0:
@@ -300,7 +305,6 @@ def supervise_task(self, task):
This function must only be called while holding the lock for that task."""
- self.task_grace_timeout = TASK_GRACE_INTERVAL
task.worker = self.worker
task.save(update_fields=["worker"])
cancel_state = None
@@ -314,11 +318,15 @@ def supervise_task(self, task):
item = connection.connection.notifies.pop(0)
if item.channel == "pulp_worker_cancel" and item.payload == str(task.pk):
_logger.info(_("Received signal to cancel current task %s."), task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
cancel_state = TASK_STATES.CANCELED
# ignore all other notifications
if cancel_state:
- break
+ if self.task_grace_timeout > 0:
+ _logger.info("Wait for canceled task to abort.")
+ else:
+ self.task_grace_timeout = TASK_KILL_INTERVAL
+ _logger.info("Aborting current task %s due to cancelation.", task.pk)
+ os.kill(task_process.pid, signal.SIGUSR1)
r, w, x = select.select(
[self.sentinel, connection.connection, task_process.sentinel],
@@ -341,10 +349,8 @@ def supervise_task(self, task):
)
else:
_logger.info("Aborting current task %s due to worker shutdown.", task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
cancel_state = TASK_STATES.FAILED
cancel_reason = "Aborted during worker shutdown."
- break
task_process.join()
if not cancel_state and task_process.exitcode != 0:
_logger.warning(
| diff --git a/pulpcore/app/tasks/test.py b/pulpcore/app/tasks/test.py
--- a/pulpcore/app/tasks/test.py
+++ b/pulpcore/app/tasks/test.py
@@ -1,3 +1,14 @@
+import backoff
+
+
def dummy_task():
"""Dummy task, that can be used in tests."""
pass
+
+
[email protected]_exception(backoff.expo, BaseException)
+def gooey_task(interval):
+ """A sleep task that tries to avoid being killed by ignoring all exceptions."""
+ from time import sleep
+
+ sleep(interval)
diff --git a/pulpcore/tests/functional/api/test_tasking.py b/pulpcore/tests/functional/api/test_tasking.py
--- a/pulpcore/tests/functional/api/test_tasking.py
+++ b/pulpcore/tests/functional/api/test_tasking.py
@@ -105,7 +105,7 @@ def test_delete_cancel_running_task(dispatch_task, tasks_api_client):
for i in range(10):
task = tasks_api_client.read(task_href)
- if task.state != "running":
+ if task.state == "running":
break
time.sleep(1)
@@ -275,3 +275,23 @@ def test_filter_tasks_using_worker__in_filter(tasks_api_client, dispatch_task, m
assert task1_href in tasks_hrefs
assert task2_href in tasks_hrefs
+
+
+def test_cancel_gooey_task(tasks_api_client, dispatch_task, monitor_task):
+ task_href = dispatch_task("pulpcore.app.tasks.test.gooey_task", (60,))
+ for i in range(10):
+ task = tasks_api_client.read(task_href)
+ if task.state == "running":
+ break
+ time.sleep(1)
+
+ task = tasks_api_client.tasks_cancel(task_href, {"state": "canceled"})
+
+ if task.state == "canceling":
+ for i in range(30):
+ if task.state != "canceling":
+ break
+ time.sleep(1)
+ task = tasks_api_client.read(task_href)
+
+ assert task.state == "canceled"
diff --git a/pulpcore/tests/functional/utils.py b/pulpcore/tests/functional/utils.py
--- a/pulpcore/tests/functional/utils.py
+++ b/pulpcore/tests/functional/utils.py
@@ -21,7 +21,7 @@ class PulpTaskError(Exception):
def __init__(self, task):
"""Provide task info to exception."""
- description = task.to_dict()["error"]["description"]
+ description = task.to_dict()["error"].get("description")
super().__init__(self, f"Pulp task failed ({description})")
self.task = task
| Task cancellation gets stuck
**Version**
Pulpcore 3.16 via Satellite 6.11
**Describe the bug**
If "Reclaim Space" (Actions::Pulp3::CapsuleContent::ReclaimSpace) action is executed and canceled after that task will get stuck in canceling not only in foreman tasks but also in pulp3.
The task was apparently stuck in "canceling" state for more than 6 days.
**To Reproduce**
How reproducible:
Always
Steps to Reproduce:
1. Execute "Reclaim Space" on Satellite
2. Cancel task (Actions::Pulp3::CapsuleContent::ReclaimSpace)
**Expected results:**
Task will get canceled
**Actual results:**
Task is stuck in foreman-tasks and in pulp3
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2143290
| This was independently reproduced on a lab system
Removing triage tag because it was reproduced, adding prio tag because it can block up the tasking system.
As far as i heard, rebooting _all_ workers unblocks the tasking system. I would wonder if rebooting just the worker stuck on that task would be sufficient too.
Can we add logging from the worker when the cancel happens?
I haven't been able to recreate this in a just-pulp env yet. What would be *really* useful, is the journalctl output from the 60 sec around the time the cancel was *issued*. Looking at the sys.exit() call that ends a worker that's been cancelled, I can imagine a series of Unfortunate Events that would result in the worker-process not actually exiting - but I'd think they'd all leave traces of unusual exceptions in the logs.
Investigation continues. | 2023-06-28T20:30:05 |
pulp/pulpcore | 3,976 | pulp__pulpcore-3976 | [
"3941"
] | ee78e0c3c54174b241907a002f697111f1855281 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -143,12 +143,17 @@ def _export_publication_to_file_system(
for ca in content_artifacts.select_related("artifact").iterator()
}
+ publication_metadata_paths = set(
+ publication.published_metadata.values_list("relative_path", flat=True)
+ )
for pa in publication.published_artifact.select_related(
"content_artifact", "content_artifact__artifact"
).iterator():
# Artifact isn't guaranteed to be present
if pa.content_artifact.artifact and (
- start_repo_version is None or pa.content_artifact.pk in difference_content_artifacts
+ start_repo_version is None
+ or pa.relative_path in publication_metadata_paths
+ or pa.content_artifact.pk in difference_content_artifacts
):
relative_path_to_artifacts[pa.relative_path] = pa.content_artifact.artifact
| fs_exporter does not include productid certificate on incremental exports
**Version**
3.18.16
**Describe the bug**
Using fs_exporter to export incrementally a repository that has a productid certificate will not copy the productid certificate.
This happens because we calculate the differences between repository version being export and start_version. As the productid is always the same, then this artifact is not listed as new on the list of differences and is the not included on the export.
We should ensure all artifacts that belong to metadata are exported.
**To Reproduce**
1. Have a repository that contains a productid certificate
2. Generate a complete export of such repository
3. Export an incremental version of this repository using fs_exporter
4. Check that the productid certificate of the repository is missing from the export
**Expected behavior**
All metadata files to be exported, regardless if the export is complete or incremental
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2212523
| Copying over my comment from the PR:
----
I would consider this either a Katello or a documentation bug then. This is the way this feature was described when it was requested / added:
https://github.com/pulp/pulpcore/issues/3413
> Idea is
>
> * User has already exported Rhel - 7 (using the fs exporter) - full export
> * User consumes content from the export
> * Several days later the user now wants the new content for RHEL 7. Instead of exporting 50GB worth of content again the user's export should only consist of rpms that got added after the start version and latest metadata
> * User should be able to **copy the contents of this export over the regular rhel export directory** and use that to sync repodata.
From Pulp's perspective I don't think there is a bug here.
So there is / was some misunderstandings around the original design. The intention was for the incremental filesystem export to also be syncable on its own, which means it needs to have all the metadata, even if it didn't change, whereas the current incremental implementation only includes artifacts that changed and expects the user to copy it on top of an existing filesystem export.
If copying isn't an option, then we could change the implementation to export metadata as well. Copying Grant's suggestion:
>Maybe somewhere around here https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L146-L155 we could look for for all PublishedMetadata associated with the passed-in publication, and force-add them to relative_path_to_artifacts - I think that will address the need, without exposing pulpcore to "knowing things about plugins Core Was Not Meant to Know" | 2023-06-28T21:47:06 |
|
pulp/pulpcore | 3,996 | pulp__pulpcore-3996 | [
"3995"
] | 70bab01b2dd382fffafb550e96a93281c4f7f08f | diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@ class UpstreamPulpViewSet(
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
| RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
| 2023-07-06T09:52:09 |
||
pulp/pulpcore | 4,010 | pulp__pulpcore-4010 | [
"3995"
] | 24ceed09f5f0cbe9c006f147d5cf284f325f0714 | diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@ class UpstreamPulpViewSet(
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
| RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
| 2023-07-11T13:31:57 |
||
pulp/pulpcore | 4,011 | pulp__pulpcore-4011 | [
"3995"
] | b98acc03113698a35b3014837af6eec778a2511f | diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@ class UpstreamPulpViewSet(
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
| RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
| 2023-07-11T13:32:14 |
||
pulp/pulpcore | 4,016 | pulp__pulpcore-4016 | [
"3941"
] | ea6e0244ed2f94d10e2a32a28603371ce8baec15 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -143,12 +143,17 @@ def _export_publication_to_file_system(
for ca in content_artifacts.select_related("artifact").iterator()
}
+ publication_metadata_paths = set(
+ publication.published_metadata.values_list("relative_path", flat=True)
+ )
for pa in publication.published_artifact.select_related(
"content_artifact", "content_artifact__artifact"
).iterator():
# Artifact isn't guaranteed to be present
if pa.content_artifact.artifact and (
- start_repo_version is None or pa.content_artifact.pk in difference_content_artifacts
+ start_repo_version is None
+ or pa.relative_path in publication_metadata_paths
+ or pa.content_artifact.pk in difference_content_artifacts
):
relative_path_to_artifacts[pa.relative_path] = pa.content_artifact.artifact
| fs_exporter does not include productid certificate on incremental exports
**Version**
3.18.16
**Describe the bug**
Using fs_exporter to export incrementally a repository that has a productid certificate will not copy the productid certificate.
This happens because we calculate the differences between repository version being export and start_version. As the productid is always the same, then this artifact is not listed as new on the list of differences and is the not included on the export.
We should ensure all artifacts that belong to metadata are exported.
**To Reproduce**
1. Have a repository that contains a productid certificate
2. Generate a complete export of such repository
3. Export an incremental version of this repository using fs_exporter
4. Check that the productid certificate of the repository is missing from the export
**Expected behavior**
All metadata files to be exported, regardless if the export is complete or incremental
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2212523
| Copying over my comment from the PR:
----
I would consider this either a Katello or a documentation bug then. This is the way this feature was described when it was requested / added:
https://github.com/pulp/pulpcore/issues/3413
> Idea is
>
> * User has already exported Rhel - 7 (using the fs exporter) - full export
> * User consumes content from the export
> * Several days later the user now wants the new content for RHEL 7. Instead of exporting 50GB worth of content again the user's export should only consist of rpms that got added after the start version and latest metadata
> * User should be able to **copy the contents of this export over the regular rhel export directory** and use that to sync repodata.
From Pulp's perspective I don't think there is a bug here.
So there is / was some misunderstandings around the original design. The intention was for the incremental filesystem export to also be syncable on its own, which means it needs to have all the metadata, even if it didn't change, whereas the current incremental implementation only includes artifacts that changed and expects the user to copy it on top of an existing filesystem export.
If copying isn't an option, then we could change the implementation to export metadata as well. Copying Grant's suggestion:
>Maybe somewhere around here https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L146-L155 we could look for for all PublishedMetadata associated with the passed-in publication, and force-add them to relative_path_to_artifacts - I think that will address the need, without exposing pulpcore to "knowing things about plugins Core Was Not Meant to Know" | 2023-07-11T19:23:56 |
|
pulp/pulpcore | 4,017 | pulp__pulpcore-4017 | [
"3941"
] | c3f434ee80c8195a753883345424b7f1fe86209c | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -143,12 +143,17 @@ def _export_publication_to_file_system(
for ca in content_artifacts.select_related("artifact").iterator()
}
+ publication_metadata_paths = set(
+ publication.published_metadata.values_list("relative_path", flat=True)
+ )
for pa in publication.published_artifact.select_related(
"content_artifact", "content_artifact__artifact"
).iterator():
# Artifact isn't guaranteed to be present
if pa.content_artifact.artifact and (
- start_repo_version is None or pa.content_artifact.pk in difference_content_artifacts
+ start_repo_version is None
+ or pa.relative_path in publication_metadata_paths
+ or pa.content_artifact.pk in difference_content_artifacts
):
relative_path_to_artifacts[pa.relative_path] = pa.content_artifact.artifact
| fs_exporter does not include productid certificate on incremental exports
**Version**
3.18.16
**Describe the bug**
Using fs_exporter to export incrementally a repository that has a productid certificate will not copy the productid certificate.
This happens because we calculate the differences between repository version being export and start_version. As the productid is always the same, then this artifact is not listed as new on the list of differences and is the not included on the export.
We should ensure all artifacts that belong to metadata are exported.
**To Reproduce**
1. Have a repository that contains a productid certificate
2. Generate a complete export of such repository
3. Export an incremental version of this repository using fs_exporter
4. Check that the productid certificate of the repository is missing from the export
**Expected behavior**
All metadata files to be exported, regardless if the export is complete or incremental
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2212523
| Copying over my comment from the PR:
----
I would consider this either a Katello or a documentation bug then. This is the way this feature was described when it was requested / added:
https://github.com/pulp/pulpcore/issues/3413
> Idea is
>
> * User has already exported Rhel - 7 (using the fs exporter) - full export
> * User consumes content from the export
> * Several days later the user now wants the new content for RHEL 7. Instead of exporting 50GB worth of content again the user's export should only consist of rpms that got added after the start version and latest metadata
> * User should be able to **copy the contents of this export over the regular rhel export directory** and use that to sync repodata.
From Pulp's perspective I don't think there is a bug here.
So there is / was some misunderstandings around the original design. The intention was for the incremental filesystem export to also be syncable on its own, which means it needs to have all the metadata, even if it didn't change, whereas the current incremental implementation only includes artifacts that changed and expects the user to copy it on top of an existing filesystem export.
If copying isn't an option, then we could change the implementation to export metadata as well. Copying Grant's suggestion:
>Maybe somewhere around here https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L146-L155 we could look for for all PublishedMetadata associated with the passed-in publication, and force-add them to relative_path_to_artifacts - I think that will address the need, without exposing pulpcore to "knowing things about plugins Core Was Not Meant to Know" | 2023-07-11T19:24:09 |
|
pulp/pulpcore | 4,029 | pulp__pulpcore-4029 | [
"3941"
] | 66fd9d107bb75f0687bdc81e0179f39935b5abea | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -143,12 +143,17 @@ def _export_publication_to_file_system(
for ca in content_artifacts.select_related("artifact").iterator()
}
+ publication_metadata_paths = set(
+ publication.published_metadata.values_list("relative_path", flat=True)
+ )
for pa in publication.published_artifact.select_related(
"content_artifact", "content_artifact__artifact"
).iterator():
# Artifact isn't guaranteed to be present
if pa.content_artifact.artifact and (
- start_repo_version is None or pa.content_artifact.pk in difference_content_artifacts
+ start_repo_version is None
+ or pa.relative_path in publication_metadata_paths
+ or pa.content_artifact.pk in difference_content_artifacts
):
relative_path_to_artifacts[pa.relative_path] = pa.content_artifact.artifact
| fs_exporter does not include productid certificate on incremental exports
**Version**
3.18.16
**Describe the bug**
Using fs_exporter to export incrementally a repository that has a productid certificate will not copy the productid certificate.
This happens because we calculate the differences between repository version being export and start_version. As the productid is always the same, then this artifact is not listed as new on the list of differences and is the not included on the export.
We should ensure all artifacts that belong to metadata are exported.
**To Reproduce**
1. Have a repository that contains a productid certificate
2. Generate a complete export of such repository
3. Export an incremental version of this repository using fs_exporter
4. Check that the productid certificate of the repository is missing from the export
**Expected behavior**
All metadata files to be exported, regardless if the export is complete or incremental
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2212523
| Copying over my comment from the PR:
----
I would consider this either a Katello or a documentation bug then. This is the way this feature was described when it was requested / added:
https://github.com/pulp/pulpcore/issues/3413
> Idea is
>
> * User has already exported Rhel - 7 (using the fs exporter) - full export
> * User consumes content from the export
> * Several days later the user now wants the new content for RHEL 7. Instead of exporting 50GB worth of content again the user's export should only consist of rpms that got added after the start version and latest metadata
> * User should be able to **copy the contents of this export over the regular rhel export directory** and use that to sync repodata.
From Pulp's perspective I don't think there is a bug here.
So there is / was some misunderstandings around the original design. The intention was for the incremental filesystem export to also be syncable on its own, which means it needs to have all the metadata, even if it didn't change, whereas the current incremental implementation only includes artifacts that changed and expects the user to copy it on top of an existing filesystem export.
If copying isn't an option, then we could change the implementation to export metadata as well. Copying Grant's suggestion:
>Maybe somewhere around here https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L146-L155 we could look for for all PublishedMetadata associated with the passed-in publication, and force-add them to relative_path_to_artifacts - I think that will address the need, without exposing pulpcore to "knowing things about plugins Core Was Not Meant to Know" | 2023-07-12T17:24:30 |
|
pulp/pulpcore | 4,031 | pulp__pulpcore-4031 | [
"4019"
] | 3cb7ef4369fd612d17e2be7863845f6cc74c141f | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -1012,6 +1012,13 @@ async def handle_response_headers(headers):
async def handle_data(data):
nonlocal data_size_handled
+ # If we got here, and the response hasn't had "prepare()" called on it, it's due to
+ # some code-path (i.e., FileDownloader) that doesn't know/care about
+ # headers_ready_callback failing to invoke it.
+ # We're not going to do anything more with headers at this point, so it's safe to
+ # "backstop" the prepare() call here, so the write() will be allowed.
+ if not response.prepared:
+ await response.prepare(request)
if range_start or range_stop:
start_byte_pos = 0
end_byte_pos = len(data)
| diff --git a/pulpcore/tests/functional/api/using_plugin/test_content_access.py b/pulpcore/tests/functional/api/using_plugin/test_content_access.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/functional/api/using_plugin/test_content_access.py
@@ -0,0 +1,40 @@
+"""Tests related to content delivery."""
+import pytest
+import uuid
+
+from pulpcore.client.pulp_file import (
+ RepositorySyncURL,
+)
+
+from pulp_file.tests.functional.utils import (
+ download_file,
+)
+
+
[email protected]
+def test_file_remote_on_demand(
+ basic_manifest_path,
+ file_distribution_factory,
+ file_fixtures_root,
+ file_repo_with_auto_publish,
+ file_remote_api_client,
+ file_repository_api_client,
+ gen_object_with_cleanup,
+ monitor_task,
+):
+ # Start with the path to the basic file-fixture, build a file: remote pointing into it
+ file_path = str(file_fixtures_root) + basic_manifest_path
+ kwargs = {
+ "url": f"file://{file_path}",
+ "policy": "on_demand",
+ "name": str(uuid.uuid4()),
+ }
+ remote = gen_object_with_cleanup(file_remote_api_client, kwargs)
+ # Sync from the remote
+ body = RepositorySyncURL(remote=remote.pulp_href)
+ monitor_task(file_repository_api_client.sync(file_repo_with_auto_publish.pulp_href, body).task)
+ repo = file_repository_api_client.read(file_repo_with_auto_publish.pulp_href)
+ # Create a distribution from the publication
+ distribution = file_distribution_factory(repository=repo.pulp_href)
+ # attempt to download_file() a file
+ download_file(f"{distribution.base_url}/1.iso")
| --url file:///foo/bar/PULP_MANIFEST with policy of on_demand or streamed raises unhandled RuntimeError
**Version**
Using the all-in-one pulp container with pulpcore 3.28.1 and pulp-file 1.14.3
**Describe the bug**
With a remote with --url file:///foo/bar/ using either on_demand or streamed policy, when attempting to retrieve a file via wget, a 500 Internal Server Error is returned, and the logs show an unhandled RunTimeError
**To Reproduce**
```
pulp file remote create --name foobar --url file:///foo/bar/PULP_MANIFEST --policy on_demand
...
wget http://myserver/pulp/content/foobar/somefile
...
HTTP request sent, awaiting response... 500 Internal Server Error
2023-07-08 01:19:34 ERROR 500: Internal Server Error.
```
**Expected behavior**
The file is successfully downloaded
**Additional context**
I mentioned this issue in IRC and it was confirmed as reproducible
| Reproduced using pulp_rpm. The problem is definitely in pulpcore. I'm moving this issue to the correct component.
RPM Reproducer:
```
# get a small RPM repo to a local-dir on the Pulp instance
$ cd /tmp
$ wget --recursive --no-parent https://fixtures.pulpproject.org/rpm-signed/
# Sync and distribute it
$ pulp rpm remote create --name foo --url file:///tmp/fixtures.pulpproject.org/rpm-signed/ --policy on_demand
$ pulp rpm repository create --name foo --remote foo --autopublish
$ pulp rpm repository sync --name foo
$ pulp rpm distribution create --name foo --repository foo --base-path foo
$ wget http://localhost:5001/pulp/content/foo/Packages/b/bear-4.1-1.noarch.rpm
--2023-07-12 13:52:28-- http://localhost:5001/pulp/content/foo/Packages/b/bear-4.1-1.noarch.rpm
Resolving localhost (localhost)... ::1, 127.0.0.1
Connecting to localhost (localhost)|::1|:5001... failed: Connection refused.
Connecting to localhost (localhost)|127.0.0.1|:5001... connected.
HTTP request sent, awaiting response... 500 Internal Server Error
2023-07-12 13:52:28 ERROR 500: Internal Server Error.
$
```
Same traceback in logs:
```
[pulp] | 127.0.0.1 [12/Jul/2023:13:48:44 +0000] "GET /pulp/content/foo/Packages/b/bear-4.1-1.noarch.rpm HTTP/1.0" 500 315 "http://localhost:5001/pulp/content/foo/Packages/b/" "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/115.0"
[2023-07-12 13:52:28 +0000] [1366] [ERROR] Error handling request
Traceback (most recent call last):
File "/usr/local/lib64/python3.8/site-packages/aiohttp/web_protocol.py", line 433, in _handle_request
resp = await request_handler(request)
File "/usr/local/lib64/python3.8/site-packages/aiohttp/web_app.py", line 504, in _handle
resp = await handler(request)
File "/usr/local/lib64/python3.8/site-packages/aiohttp/web_middlewares.py", line 117, in impl
return await handler(request)
File "/src/pulpcore/pulpcore/content/authentication.py", line 48, in authenticate
return await handler(request)
File "/src/pulpcore/pulpcore/content/handler.py", line 248, in stream_content
return await self._match_and_stream(path, request)
File "/src/pulpcore/pulpcore/content/handler.py", line 611, in _match_and_stream
return await self._stream_content_artifact(
File "/src/pulpcore/pulpcore/content/handler.py", line 764, in _stream_content_artifact
response = await self._stream_remote_artifact(request, response, remote_artifact)
File "/src/pulpcore/pulpcore/content/handler.py", line 1042, in _stream_remote_artifact
download_result = await downloader.run()
File "/src/pulpcore/pulpcore/download/base.py", line 257, in run
return await self._run(extra_data=extra_data)
File "/src/pulpcore/pulpcore/download/file.py", line 58, in _run
await self.handle_data(chunk)
File "/src/pulpcore/pulpcore/content/handler.py", line 1027, in handle_data
await response.write(data)
File "/usr/local/lib64/python3.8/site-packages/aiohttp/web_response.py", line 512, in write
raise RuntimeError("Cannot call write() before prepare()")
RuntimeError: Cannot call write() before prepare()
``` | 2023-07-12T21:02:36 |
pulp/pulpcore | 4,052 | pulp__pulpcore-4052 | [
"4051"
] | bbb30b7a5de3cab3ac13e1913facb33f1292e2fe | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -113,7 +113,6 @@ def _export_publication_to_file_system(
path (str): Path to place the exported data
publication_pk (str): Publication pk
"""
- difference_content_artifacts = []
content_artifacts = ContentArtifact.objects.filter(
pk__in=publication.published_artifact.values_list("content_artifact__pk", flat=True)
)
@@ -121,11 +120,6 @@ def _export_publication_to_file_system(
start_version_content_artifacts = ContentArtifact.objects.filter(
artifact__in=start_repo_version.artifacts
)
- difference_content_artifacts = set(
- content_artifacts.difference(start_version_content_artifacts).values_list(
- "pk", flat=True
- )
- )
if publication.pass_through:
content_artifacts |= ContentArtifact.objects.filter(
@@ -136,11 +130,20 @@ def _export_publication_to_file_system(
# In some cases we may want to disable this validation
_validate_fs_export(content_artifacts)
+ difference_content_artifacts = []
+ if start_repo_version:
+ difference_content_artifacts = set(
+ content_artifacts.difference(start_version_content_artifacts).values_list(
+ "pk", flat=True
+ )
+ )
+
relative_path_to_artifacts = {}
if publication.pass_through:
relative_path_to_artifacts = {
ca.relative_path: ca.artifact
for ca in content_artifacts.select_related("artifact").iterator()
+ if (start_repo_version is None) or (ca.pk in difference_content_artifacts)
}
publication_metadata_paths = set(
| File System Exporter ignores start_repository_version and exports everything for pass-through publications
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
Pulp 3.18 and later
**Describe the bug**
The file system exporter ignores start_repository_version and exports everything for pass-through publications. So FilePublications for example are marked as pass_through publications. The export task in this case seems to ignore the start_repository_version and dumps everything over.
Note: If you gave it a repo version and start_repo version it returns the right content . This behavior is only when pass a publication and a start_repo_version (which is what Katello uses).
**To Reproduce**
Steps to reproduce the behavior:
- Have a file repo with multiple versions and publications (make sure the versions have some new files)
- Try to export this repo via the file system exporter and provide a publication and start_version
**Expected behavior**
Exported content should have the published_metadata + difference in content i.e. publication.repository_version.content - start_repository_version.content
** Actual **
Everything gets copied.
Bugzilla https://bugzilla.redhat.com/show_bug.cgi?id=2172564
| 2023-07-15T02:04:42 |
||
pulp/pulpcore | 4,061 | pulp__pulpcore-4061 | [
"4051"
] | a64729e31307ae68a2fd733dbafeb6a342b5112a | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -113,7 +113,6 @@ def _export_publication_to_file_system(
path (str): Path to place the exported data
publication_pk (str): Publication pk
"""
- difference_content_artifacts = []
content_artifacts = ContentArtifact.objects.filter(
pk__in=publication.published_artifact.values_list("content_artifact__pk", flat=True)
)
@@ -121,11 +120,6 @@ def _export_publication_to_file_system(
start_version_content_artifacts = ContentArtifact.objects.filter(
artifact__in=start_repo_version.artifacts
)
- difference_content_artifacts = set(
- content_artifacts.difference(start_version_content_artifacts).values_list(
- "pk", flat=True
- )
- )
if publication.pass_through:
content_artifacts |= ContentArtifact.objects.filter(
@@ -136,11 +130,20 @@ def _export_publication_to_file_system(
# In some cases we may want to disable this validation
_validate_fs_export(content_artifacts)
+ difference_content_artifacts = []
+ if start_repo_version:
+ difference_content_artifacts = set(
+ content_artifacts.difference(start_version_content_artifacts).values_list(
+ "pk", flat=True
+ )
+ )
+
relative_path_to_artifacts = {}
if publication.pass_through:
relative_path_to_artifacts = {
ca.relative_path: ca.artifact
for ca in content_artifacts.select_related("artifact").iterator()
+ if (start_repo_version is None) or (ca.pk in difference_content_artifacts)
}
publication_metadata_paths = set(
| File System Exporter ignores start_repository_version and exports everything for pass-through publications
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
Pulp 3.18 and later
**Describe the bug**
The file system exporter ignores start_repository_version and exports everything for pass-through publications. So FilePublications for example are marked as pass_through publications. The export task in this case seems to ignore the start_repository_version and dumps everything over.
Note: If you gave it a repo version and start_repo version it returns the right content . This behavior is only when pass a publication and a start_repo_version (which is what Katello uses).
**To Reproduce**
Steps to reproduce the behavior:
- Have a file repo with multiple versions and publications (make sure the versions have some new files)
- Try to export this repo via the file system exporter and provide a publication and start_version
**Expected behavior**
Exported content should have the published_metadata + difference in content i.e. publication.repository_version.content - start_repository_version.content
** Actual **
Everything gets copied.
Bugzilla https://bugzilla.redhat.com/show_bug.cgi?id=2172564
| 2023-07-17T14:54:06 |
||
pulp/pulpcore | 4,062 | pulp__pulpcore-4062 | [
"4051"
] | 2496d04aa3b700979cac547c224a758d9a2410cb | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -113,7 +113,6 @@ def _export_publication_to_file_system(
path (str): Path to place the exported data
publication_pk (str): Publication pk
"""
- difference_content_artifacts = []
content_artifacts = ContentArtifact.objects.filter(
pk__in=publication.published_artifact.values_list("content_artifact__pk", flat=True)
)
@@ -121,11 +120,6 @@ def _export_publication_to_file_system(
start_version_content_artifacts = ContentArtifact.objects.filter(
artifact__in=start_repo_version.artifacts
)
- difference_content_artifacts = set(
- content_artifacts.difference(start_version_content_artifacts).values_list(
- "pk", flat=True
- )
- )
if publication.pass_through:
content_artifacts |= ContentArtifact.objects.filter(
@@ -136,11 +130,20 @@ def _export_publication_to_file_system(
# In some cases we may want to disable this validation
_validate_fs_export(content_artifacts)
+ difference_content_artifacts = []
+ if start_repo_version:
+ difference_content_artifacts = set(
+ content_artifacts.difference(start_version_content_artifacts).values_list(
+ "pk", flat=True
+ )
+ )
+
relative_path_to_artifacts = {}
if publication.pass_through:
relative_path_to_artifacts = {
ca.relative_path: ca.artifact
for ca in content_artifacts.select_related("artifact").iterator()
+ if (start_repo_version is None) or (ca.pk in difference_content_artifacts)
}
publication_metadata_paths = set(
| File System Exporter ignores start_repository_version and exports everything for pass-through publications
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
Pulp 3.18 and later
**Describe the bug**
The file system exporter ignores start_repository_version and exports everything for pass-through publications. So FilePublications for example are marked as pass_through publications. The export task in this case seems to ignore the start_repository_version and dumps everything over.
Note: If you gave it a repo version and start_repo version it returns the right content . This behavior is only when pass a publication and a start_repo_version (which is what Katello uses).
**To Reproduce**
Steps to reproduce the behavior:
- Have a file repo with multiple versions and publications (make sure the versions have some new files)
- Try to export this repo via the file system exporter and provide a publication and start_version
**Expected behavior**
Exported content should have the published_metadata + difference in content i.e. publication.repository_version.content - start_repository_version.content
** Actual **
Everything gets copied.
Bugzilla https://bugzilla.redhat.com/show_bug.cgi?id=2172564
| 2023-07-17T14:54:20 |
||
pulp/pulpcore | 4,063 | pulp__pulpcore-4063 | [
"4051"
] | 08f6079ab03ad63700dda568d21f31322d57cf9c | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -113,7 +113,6 @@ def _export_publication_to_file_system(
path (str): Path to place the exported data
publication_pk (str): Publication pk
"""
- difference_content_artifacts = []
content_artifacts = ContentArtifact.objects.filter(
pk__in=publication.published_artifact.values_list("content_artifact__pk", flat=True)
)
@@ -121,11 +120,6 @@ def _export_publication_to_file_system(
start_version_content_artifacts = ContentArtifact.objects.filter(
artifact__in=start_repo_version.artifacts
)
- difference_content_artifacts = set(
- content_artifacts.difference(start_version_content_artifacts).values_list(
- "pk", flat=True
- )
- )
if publication.pass_through:
content_artifacts |= ContentArtifact.objects.filter(
@@ -136,11 +130,20 @@ def _export_publication_to_file_system(
# In some cases we may want to disable this validation
_validate_fs_export(content_artifacts)
+ difference_content_artifacts = []
+ if start_repo_version:
+ difference_content_artifacts = set(
+ content_artifacts.difference(start_version_content_artifacts).values_list(
+ "pk", flat=True
+ )
+ )
+
relative_path_to_artifacts = {}
if publication.pass_through:
relative_path_to_artifacts = {
ca.relative_path: ca.artifact
for ca in content_artifacts.select_related("artifact").iterator()
+ if (start_repo_version is None) or (ca.pk in difference_content_artifacts)
}
publication_metadata_paths = set(
| File System Exporter ignores start_repository_version and exports everything for pass-through publications
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
Pulp 3.18 and later
**Describe the bug**
The file system exporter ignores start_repository_version and exports everything for pass-through publications. So FilePublications for example are marked as pass_through publications. The export task in this case seems to ignore the start_repository_version and dumps everything over.
Note: If you gave it a repo version and start_repo version it returns the right content . This behavior is only when pass a publication and a start_repo_version (which is what Katello uses).
**To Reproduce**
Steps to reproduce the behavior:
- Have a file repo with multiple versions and publications (make sure the versions have some new files)
- Try to export this repo via the file system exporter and provide a publication and start_version
**Expected behavior**
Exported content should have the published_metadata + difference in content i.e. publication.repository_version.content - start_repository_version.content
** Actual **
Everything gets copied.
Bugzilla https://bugzilla.redhat.com/show_bug.cgi?id=2172564
| 2023-07-17T14:54:34 |
||
pulp/pulpcore | 4,064 | pulp__pulpcore-4064 | [
"4051"
] | a63a47b8ab535a7a66299d488e59be25b54dd70e | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -113,7 +113,6 @@ def _export_publication_to_file_system(
path (str): Path to place the exported data
publication_pk (str): Publication pk
"""
- difference_content_artifacts = []
content_artifacts = ContentArtifact.objects.filter(
pk__in=publication.published_artifact.values_list("content_artifact__pk", flat=True)
)
@@ -121,11 +120,6 @@ def _export_publication_to_file_system(
start_version_content_artifacts = ContentArtifact.objects.filter(
artifact__in=start_repo_version.artifacts
)
- difference_content_artifacts = set(
- content_artifacts.difference(start_version_content_artifacts).values_list(
- "pk", flat=True
- )
- )
if publication.pass_through:
content_artifacts |= ContentArtifact.objects.filter(
@@ -136,11 +130,20 @@ def _export_publication_to_file_system(
# In some cases we may want to disable this validation
_validate_fs_export(content_artifacts)
+ difference_content_artifacts = []
+ if start_repo_version:
+ difference_content_artifacts = set(
+ content_artifacts.difference(start_version_content_artifacts).values_list(
+ "pk", flat=True
+ )
+ )
+
relative_path_to_artifacts = {}
if publication.pass_through:
relative_path_to_artifacts = {
ca.relative_path: ca.artifact
for ca in content_artifacts.select_related("artifact").iterator()
+ if (start_repo_version is None) or (ca.pk in difference_content_artifacts)
}
for pa in publication.published_artifact.select_related(
| File System Exporter ignores start_repository_version and exports everything for pass-through publications
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
Pulp 3.18 and later
**Describe the bug**
The file system exporter ignores start_repository_version and exports everything for pass-through publications. So FilePublications for example are marked as pass_through publications. The export task in this case seems to ignore the start_repository_version and dumps everything over.
Note: If you gave it a repo version and start_repo version it returns the right content . This behavior is only when pass a publication and a start_repo_version (which is what Katello uses).
**To Reproduce**
Steps to reproduce the behavior:
- Have a file repo with multiple versions and publications (make sure the versions have some new files)
- Try to export this repo via the file system exporter and provide a publication and start_version
**Expected behavior**
Exported content should have the published_metadata + difference in content i.e. publication.repository_version.content - start_repository_version.content
** Actual **
Everything gets copied.
Bugzilla https://bugzilla.redhat.com/show_bug.cgi?id=2172564
| 2023-07-17T14:54:50 |
||
pulp/pulpcore | 4,065 | pulp__pulpcore-4065 | [
"4051"
] | 15e05d90dd4693f8b90031a1f3a7147e43111e0b | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -113,7 +113,6 @@ def _export_publication_to_file_system(
path (str): Path to place the exported data
publication_pk (str): Publication pk
"""
- difference_content_artifacts = []
content_artifacts = ContentArtifact.objects.filter(
pk__in=publication.published_artifact.values_list("content_artifact__pk", flat=True)
)
@@ -121,11 +120,6 @@ def _export_publication_to_file_system(
start_version_content_artifacts = ContentArtifact.objects.filter(
artifact__in=start_repo_version.artifacts
)
- difference_content_artifacts = set(
- content_artifacts.difference(start_version_content_artifacts).values_list(
- "pk", flat=True
- )
- )
if publication.pass_through:
content_artifacts |= ContentArtifact.objects.filter(
@@ -136,11 +130,20 @@ def _export_publication_to_file_system(
# In some cases we may want to disable this validation
_validate_fs_export(content_artifacts)
+ difference_content_artifacts = []
+ if start_repo_version:
+ difference_content_artifacts = set(
+ content_artifacts.difference(start_version_content_artifacts).values_list(
+ "pk", flat=True
+ )
+ )
+
relative_path_to_artifacts = {}
if publication.pass_through:
relative_path_to_artifacts = {
ca.relative_path: ca.artifact
for ca in content_artifacts.select_related("artifact").iterator()
+ if (start_repo_version is None) or (ca.pk in difference_content_artifacts)
}
publication_metadata_paths = set(
| File System Exporter ignores start_repository_version and exports everything for pass-through publications
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
Pulp 3.18 and later
**Describe the bug**
The file system exporter ignores start_repository_version and exports everything for pass-through publications. So FilePublications for example are marked as pass_through publications. The export task in this case seems to ignore the start_repository_version and dumps everything over.
Note: If you gave it a repo version and start_repo version it returns the right content . This behavior is only when pass a publication and a start_repo_version (which is what Katello uses).
**To Reproduce**
Steps to reproduce the behavior:
- Have a file repo with multiple versions and publications (make sure the versions have some new files)
- Try to export this repo via the file system exporter and provide a publication and start_version
**Expected behavior**
Exported content should have the published_metadata + difference in content i.e. publication.repository_version.content - start_repository_version.content
** Actual **
Everything gets copied.
Bugzilla https://bugzilla.redhat.com/show_bug.cgi?id=2172564
| 2023-07-17T14:55:05 |
||
pulp/pulpcore | 4,090 | pulp__pulpcore-4090 | [
"2250"
] | c6902ff3aaf277658def40341b20064cfe1f43c2 | diff --git a/pulpcore/app/access_policy.py b/pulpcore/app/access_policy.py
--- a/pulpcore/app/access_policy.py
+++ b/pulpcore/app/access_policy.py
@@ -1,4 +1,3 @@
-from functools import lru_cache
from rest_access_policy import AccessPolicy
from rest_framework.exceptions import APIException
@@ -12,7 +11,6 @@ class AccessPolicyFromDB(AccessPolicy):
"""
@staticmethod
- @lru_cache
def get_access_policy(view):
"""
Retrieves the AccessPolicy from the DB or None if it doesn't exist.
| Gunicorn consuming excessive amounts of memory
**Version**
3.16.z
**Describe the bug**
Gunicorn consuming excessive amounts of memory, 3.5-4gb
**To Reproduce**
Unclear
**Expected behavior**
Probably not to have a single gunicorn process use 4gb of memory
**Additional context**
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2035873
Katello forum discussion: https://community.theforeman.org/t/katello-4-5-foreman-3-3-memory-leak-in-gunicorn/29658/22
| One workaround is to enable process recycling for gunicorn workers. This doesn't get at the root cause though, which I'm curious about. Also how does it grow over time, does it grow slowly or are there specific requests that leave large amounts of memory pinned?
> One workaround is to enable process recycling for gunicorn workers.
We have a user doing exactly this, with some success. See https://bugzilla.redhat.com/show_bug.cgi?id=1994397#c43
copy pasting for transparency so it's in one place
```
To enable gunicorn worker recycling the procedure goes like this:
1. Create a gunicorn.conf.py under /var/lib/pulp containing max_requests and max_requests_jitter values
# cat /var/lib/pulp/gunicorn.conf.py
max_requests = 1000
max_requests_jitter = 50
2. restart the specific service systemctl restart pulpcore-api
3. gunicorn workers will then be automatically recycled every 1000 requests (with a jitter to avoid all workers restarting at the same time) One can reduce the max_requests value to have more aggressive recycling.
```
The installer [passes the gunicorn options along via command line params](https://github.com/pulp/pulp_installer/blob/669dbdde710762c59a6840c1b97cca119d6f6c0a/roles/pulp_content/templates/pulpcore-content.service.j2#L27-L32), so we could use `--max-requests` and `--max-requests-jitter` options like we do today.
To implement this as a default I expected an installer + operator + single container to all make the changes, and pulpcore to document it's benefit.
This might not (fully) be a leak, look at what it's doing, it's fetching the full details of RPM packages from the /pulp/api/v3/rpm/packages/ endpoint. So: serializing 100-1000 packages at a time. That might still be a stretch but maybe if it's doing so really inefficiently?
@dralley I agree. My concern (besides the efficiency of that code) is that for subsequent requests the memory isn't released until the recycling occurs.
Added a link to a Katello forum discussion that is ongoing. Some discoveries so far:
* it's definitely the API server rather than anything else
* process recycling does help, but the memory is re-leaked extremely quickly
* it's worse with multiple API server processes running
Trimming the (RPM) changelogs appears to help. So there is likely overlap with https://github.com/pulp/pulpcore/issues/2138
Sounds like it's responsible for not just memory usage issues but also potentially timeouts (there is a BZ for that as well)
Closing in favor of https://github.com/pulp/pulpcore/issues/2138
Katello / Satellite are the main impacted users right now, and they now have multiple workarounds.
Could this issue be reopened? This is not solved yet. We are running pulpcore version 3.21.4
We have some scripts in place for automating some pulp settings. The scripts that we are using use the python pulp client bindings, in these it is not possible to set the fields that the api returns in these bindings.
I can replicate the issue with the following api commands:
Ram usage (of the virtual machine) before the command: 3.7Gi of 24 Gi
```
# pulp rpm content list --name 'self-build-package' --limit 1000 | jq .[].location_href | wc -l
267
```
Ram usage (of the virtual machine) after execurting the command: 6.6Gi of 24Gi
Our pulp virtual machine ran out of memory once when the pulpcore-api service was using 12.7Gi of memory. This was cleared after restarting the service, but that should not be the case.
@maartenbeeckmans How old is this instance, and are you working with RHEL RPMs synced from at least 6 months ago? If so, could you try out the changelog trimming feature and report back on whether it helps?
Also what is your pulp_rpm version? You only mentioned pulpcore.
Versions can be seen below, we are using the rpm's provided by theforeman.
```sh
# pulp status | jq .versions
[
{
"component": "core",
"version": "3.21.4",
"package": "pulpcore"
},
{
"component": "rpm",
"version": "3.18.9",
"package": "pulp-rpm"
},
{
"component": "file",
"version": "1.11.1",
"package": "pulp-file"
},
{
"component": "deb",
"version": "2.20.0",
"package": "pulp_deb"
},
{
"component": "container",
"version": "2.14.3",
"package": "pulp-container"
}
]
```
I've tried a number of different debugging approaches, I know that the issue is all of the RPM changelogs loaded while listing the packages, but I don't know why they're leaking. My best guess is that maybe the django queryset cache is being used on the viewset, but I'm not confident of that.
https://github.com/pulp/pulpcore/issues/2138 would help to work around it as does process recycling
So finally we have an answer to this saga. [This line](https://github.com/pulp/pulpcore/pull/2826/files/bc85beb34796682071e72d5ee44a9425c2b86b75#diff-f3d57ed180a61179704601b8a978567bf8e5c5c54e95458ed8b0b49bbc150f3cR15) was caching the view objects, rather than serving the intended goal of reducing the number of lookups.
Thanks to Hao Yu for finding the root cause (I need find out how!) and also Gerrod for putting in some time investigating once that was identified. | 2023-07-19T19:35:40 |
|
pulp/pulpcore | 4,095 | pulp__pulpcore-4095 | [
"2250"
] | c00fb91810d9a626831cfae5fe1b46879d1ef4db | diff --git a/pulpcore/app/access_policy.py b/pulpcore/app/access_policy.py
--- a/pulpcore/app/access_policy.py
+++ b/pulpcore/app/access_policy.py
@@ -1,4 +1,3 @@
-from functools import lru_cache
from rest_access_policy import AccessPolicy
from rest_framework.exceptions import APIException
@@ -12,7 +11,6 @@ class AccessPolicyFromDB(AccessPolicy):
"""
@staticmethod
- @lru_cache
def get_access_policy(view):
"""
Retrieves the AccessPolicy from the DB or None if it doesn't exist.
| Gunicorn consuming excessive amounts of memory
**Version**
3.16.z
**Describe the bug**
Gunicorn consuming excessive amounts of memory, 3.5-4gb
**To Reproduce**
Unclear
**Expected behavior**
Probably not to have a single gunicorn process use 4gb of memory
**Additional context**
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2035873
Katello forum discussion: https://community.theforeman.org/t/katello-4-5-foreman-3-3-memory-leak-in-gunicorn/29658/22
| One workaround is to enable process recycling for gunicorn workers. This doesn't get at the root cause though, which I'm curious about. Also how does it grow over time, does it grow slowly or are there specific requests that leave large amounts of memory pinned?
> One workaround is to enable process recycling for gunicorn workers.
We have a user doing exactly this, with some success. See https://bugzilla.redhat.com/show_bug.cgi?id=1994397#c43
copy pasting for transparency so it's in one place
```
To enable gunicorn worker recycling the procedure goes like this:
1. Create a gunicorn.conf.py under /var/lib/pulp containing max_requests and max_requests_jitter values
# cat /var/lib/pulp/gunicorn.conf.py
max_requests = 1000
max_requests_jitter = 50
2. restart the specific service systemctl restart pulpcore-api
3. gunicorn workers will then be automatically recycled every 1000 requests (with a jitter to avoid all workers restarting at the same time) One can reduce the max_requests value to have more aggressive recycling.
```
The installer [passes the gunicorn options along via command line params](https://github.com/pulp/pulp_installer/blob/669dbdde710762c59a6840c1b97cca119d6f6c0a/roles/pulp_content/templates/pulpcore-content.service.j2#L27-L32), so we could use `--max-requests` and `--max-requests-jitter` options like we do today.
To implement this as a default I expected an installer + operator + single container to all make the changes, and pulpcore to document it's benefit.
This might not (fully) be a leak, look at what it's doing, it's fetching the full details of RPM packages from the /pulp/api/v3/rpm/packages/ endpoint. So: serializing 100-1000 packages at a time. That might still be a stretch but maybe if it's doing so really inefficiently?
@dralley I agree. My concern (besides the efficiency of that code) is that for subsequent requests the memory isn't released until the recycling occurs.
Added a link to a Katello forum discussion that is ongoing. Some discoveries so far:
* it's definitely the API server rather than anything else
* process recycling does help, but the memory is re-leaked extremely quickly
* it's worse with multiple API server processes running
Trimming the (RPM) changelogs appears to help. So there is likely overlap with https://github.com/pulp/pulpcore/issues/2138
Sounds like it's responsible for not just memory usage issues but also potentially timeouts (there is a BZ for that as well)
Closing in favor of https://github.com/pulp/pulpcore/issues/2138
Katello / Satellite are the main impacted users right now, and they now have multiple workarounds.
Could this issue be reopened? This is not solved yet. We are running pulpcore version 3.21.4
We have some scripts in place for automating some pulp settings. The scripts that we are using use the python pulp client bindings, in these it is not possible to set the fields that the api returns in these bindings.
I can replicate the issue with the following api commands:
Ram usage (of the virtual machine) before the command: 3.7Gi of 24 Gi
```
# pulp rpm content list --name 'self-build-package' --limit 1000 | jq .[].location_href | wc -l
267
```
Ram usage (of the virtual machine) after execurting the command: 6.6Gi of 24Gi
Our pulp virtual machine ran out of memory once when the pulpcore-api service was using 12.7Gi of memory. This was cleared after restarting the service, but that should not be the case.
@maartenbeeckmans How old is this instance, and are you working with RHEL RPMs synced from at least 6 months ago? If so, could you try out the changelog trimming feature and report back on whether it helps?
Also what is your pulp_rpm version? You only mentioned pulpcore.
Versions can be seen below, we are using the rpm's provided by theforeman.
```sh
# pulp status | jq .versions
[
{
"component": "core",
"version": "3.21.4",
"package": "pulpcore"
},
{
"component": "rpm",
"version": "3.18.9",
"package": "pulp-rpm"
},
{
"component": "file",
"version": "1.11.1",
"package": "pulp-file"
},
{
"component": "deb",
"version": "2.20.0",
"package": "pulp_deb"
},
{
"component": "container",
"version": "2.14.3",
"package": "pulp-container"
}
]
```
I've tried a number of different debugging approaches, I know that the issue is all of the RPM changelogs loaded while listing the packages, but I don't know why they're leaking. My best guess is that maybe the django queryset cache is being used on the viewset, but I'm not confident of that.
https://github.com/pulp/pulpcore/issues/2138 would help to work around it as does process recycling
So finally we have an answer to this saga. [This line](https://github.com/pulp/pulpcore/pull/2826/files/bc85beb34796682071e72d5ee44a9425c2b86b75#diff-f3d57ed180a61179704601b8a978567bf8e5c5c54e95458ed8b0b49bbc150f3cR15) was caching the view objects, rather than serving the intended goal of reducing the number of lookups.
Thanks to Hao Yu for finding the root cause (I need find out how!) and also Gerrod for putting in some time investigating once that was identified. | 2023-07-20T12:20:23 |
|
pulp/pulpcore | 4,096 | pulp__pulpcore-4096 | [
"2250"
] | 94629de406fd0d849612b6b59ba343b3188a3f1b | diff --git a/pulpcore/app/access_policy.py b/pulpcore/app/access_policy.py
--- a/pulpcore/app/access_policy.py
+++ b/pulpcore/app/access_policy.py
@@ -1,4 +1,3 @@
-from functools import lru_cache
from rest_access_policy import AccessPolicy
from rest_framework.exceptions import APIException
@@ -12,7 +11,6 @@ class AccessPolicyFromDB(AccessPolicy):
"""
@staticmethod
- @lru_cache
def get_access_policy(view):
"""
Retrieves the AccessPolicy from the DB or None if it doesn't exist.
| Gunicorn consuming excessive amounts of memory
**Version**
3.16.z
**Describe the bug**
Gunicorn consuming excessive amounts of memory, 3.5-4gb
**To Reproduce**
Unclear
**Expected behavior**
Probably not to have a single gunicorn process use 4gb of memory
**Additional context**
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2035873
Katello forum discussion: https://community.theforeman.org/t/katello-4-5-foreman-3-3-memory-leak-in-gunicorn/29658/22
| One workaround is to enable process recycling for gunicorn workers. This doesn't get at the root cause though, which I'm curious about. Also how does it grow over time, does it grow slowly or are there specific requests that leave large amounts of memory pinned?
> One workaround is to enable process recycling for gunicorn workers.
We have a user doing exactly this, with some success. See https://bugzilla.redhat.com/show_bug.cgi?id=1994397#c43
copy pasting for transparency so it's in one place
```
To enable gunicorn worker recycling the procedure goes like this:
1. Create a gunicorn.conf.py under /var/lib/pulp containing max_requests and max_requests_jitter values
# cat /var/lib/pulp/gunicorn.conf.py
max_requests = 1000
max_requests_jitter = 50
2. restart the specific service systemctl restart pulpcore-api
3. gunicorn workers will then be automatically recycled every 1000 requests (with a jitter to avoid all workers restarting at the same time) One can reduce the max_requests value to have more aggressive recycling.
```
The installer [passes the gunicorn options along via command line params](https://github.com/pulp/pulp_installer/blob/669dbdde710762c59a6840c1b97cca119d6f6c0a/roles/pulp_content/templates/pulpcore-content.service.j2#L27-L32), so we could use `--max-requests` and `--max-requests-jitter` options like we do today.
To implement this as a default I expected an installer + operator + single container to all make the changes, and pulpcore to document it's benefit.
This might not (fully) be a leak, look at what it's doing, it's fetching the full details of RPM packages from the /pulp/api/v3/rpm/packages/ endpoint. So: serializing 100-1000 packages at a time. That might still be a stretch but maybe if it's doing so really inefficiently?
@dralley I agree. My concern (besides the efficiency of that code) is that for subsequent requests the memory isn't released until the recycling occurs.
Added a link to a Katello forum discussion that is ongoing. Some discoveries so far:
* it's definitely the API server rather than anything else
* process recycling does help, but the memory is re-leaked extremely quickly
* it's worse with multiple API server processes running
Trimming the (RPM) changelogs appears to help. So there is likely overlap with https://github.com/pulp/pulpcore/issues/2138
Sounds like it's responsible for not just memory usage issues but also potentially timeouts (there is a BZ for that as well)
Closing in favor of https://github.com/pulp/pulpcore/issues/2138
Katello / Satellite are the main impacted users right now, and they now have multiple workarounds.
Could this issue be reopened? This is not solved yet. We are running pulpcore version 3.21.4
We have some scripts in place for automating some pulp settings. The scripts that we are using use the python pulp client bindings, in these it is not possible to set the fields that the api returns in these bindings.
I can replicate the issue with the following api commands:
Ram usage (of the virtual machine) before the command: 3.7Gi of 24 Gi
```
# pulp rpm content list --name 'self-build-package' --limit 1000 | jq .[].location_href | wc -l
267
```
Ram usage (of the virtual machine) after execurting the command: 6.6Gi of 24Gi
Our pulp virtual machine ran out of memory once when the pulpcore-api service was using 12.7Gi of memory. This was cleared after restarting the service, but that should not be the case.
@maartenbeeckmans How old is this instance, and are you working with RHEL RPMs synced from at least 6 months ago? If so, could you try out the changelog trimming feature and report back on whether it helps?
Also what is your pulp_rpm version? You only mentioned pulpcore.
Versions can be seen below, we are using the rpm's provided by theforeman.
```sh
# pulp status | jq .versions
[
{
"component": "core",
"version": "3.21.4",
"package": "pulpcore"
},
{
"component": "rpm",
"version": "3.18.9",
"package": "pulp-rpm"
},
{
"component": "file",
"version": "1.11.1",
"package": "pulp-file"
},
{
"component": "deb",
"version": "2.20.0",
"package": "pulp_deb"
},
{
"component": "container",
"version": "2.14.3",
"package": "pulp-container"
}
]
```
I've tried a number of different debugging approaches, I know that the issue is all of the RPM changelogs loaded while listing the packages, but I don't know why they're leaking. My best guess is that maybe the django queryset cache is being used on the viewset, but I'm not confident of that.
https://github.com/pulp/pulpcore/issues/2138 would help to work around it as does process recycling
So finally we have an answer to this saga. [This line](https://github.com/pulp/pulpcore/pull/2826/files/bc85beb34796682071e72d5ee44a9425c2b86b75#diff-f3d57ed180a61179704601b8a978567bf8e5c5c54e95458ed8b0b49bbc150f3cR15) was caching the view objects, rather than serving the intended goal of reducing the number of lookups.
Thanks to Hao Yu for finding the root cause (I need find out how!) and also Gerrod for putting in some time investigating once that was identified. | 2023-07-20T12:20:36 |
|
pulp/pulpcore | 4,097 | pulp__pulpcore-4097 | [
"2250"
] | d9045a4f8f025d2aa794a4affd60f96267840254 | diff --git a/pulpcore/app/access_policy.py b/pulpcore/app/access_policy.py
--- a/pulpcore/app/access_policy.py
+++ b/pulpcore/app/access_policy.py
@@ -1,4 +1,3 @@
-from functools import lru_cache
from rest_access_policy import AccessPolicy
from rest_framework.exceptions import APIException
@@ -12,7 +11,6 @@ class AccessPolicyFromDB(AccessPolicy):
"""
@staticmethod
- @lru_cache
def get_access_policy(view):
"""
Retrieves the AccessPolicy from the DB or None if it doesn't exist.
| Gunicorn consuming excessive amounts of memory
**Version**
3.16.z
**Describe the bug**
Gunicorn consuming excessive amounts of memory, 3.5-4gb
**To Reproduce**
Unclear
**Expected behavior**
Probably not to have a single gunicorn process use 4gb of memory
**Additional context**
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2035873
Katello forum discussion: https://community.theforeman.org/t/katello-4-5-foreman-3-3-memory-leak-in-gunicorn/29658/22
| One workaround is to enable process recycling for gunicorn workers. This doesn't get at the root cause though, which I'm curious about. Also how does it grow over time, does it grow slowly or are there specific requests that leave large amounts of memory pinned?
> One workaround is to enable process recycling for gunicorn workers.
We have a user doing exactly this, with some success. See https://bugzilla.redhat.com/show_bug.cgi?id=1994397#c43
copy pasting for transparency so it's in one place
```
To enable gunicorn worker recycling the procedure goes like this:
1. Create a gunicorn.conf.py under /var/lib/pulp containing max_requests and max_requests_jitter values
# cat /var/lib/pulp/gunicorn.conf.py
max_requests = 1000
max_requests_jitter = 50
2. restart the specific service systemctl restart pulpcore-api
3. gunicorn workers will then be automatically recycled every 1000 requests (with a jitter to avoid all workers restarting at the same time) One can reduce the max_requests value to have more aggressive recycling.
```
The installer [passes the gunicorn options along via command line params](https://github.com/pulp/pulp_installer/blob/669dbdde710762c59a6840c1b97cca119d6f6c0a/roles/pulp_content/templates/pulpcore-content.service.j2#L27-L32), so we could use `--max-requests` and `--max-requests-jitter` options like we do today.
To implement this as a default I expected an installer + operator + single container to all make the changes, and pulpcore to document it's benefit.
This might not (fully) be a leak, look at what it's doing, it's fetching the full details of RPM packages from the /pulp/api/v3/rpm/packages/ endpoint. So: serializing 100-1000 packages at a time. That might still be a stretch but maybe if it's doing so really inefficiently?
@dralley I agree. My concern (besides the efficiency of that code) is that for subsequent requests the memory isn't released until the recycling occurs.
Added a link to a Katello forum discussion that is ongoing. Some discoveries so far:
* it's definitely the API server rather than anything else
* process recycling does help, but the memory is re-leaked extremely quickly
* it's worse with multiple API server processes running
Trimming the (RPM) changelogs appears to help. So there is likely overlap with https://github.com/pulp/pulpcore/issues/2138
Sounds like it's responsible for not just memory usage issues but also potentially timeouts (there is a BZ for that as well)
Closing in favor of https://github.com/pulp/pulpcore/issues/2138
Katello / Satellite are the main impacted users right now, and they now have multiple workarounds.
Could this issue be reopened? This is not solved yet. We are running pulpcore version 3.21.4
We have some scripts in place for automating some pulp settings. The scripts that we are using use the python pulp client bindings, in these it is not possible to set the fields that the api returns in these bindings.
I can replicate the issue with the following api commands:
Ram usage (of the virtual machine) before the command: 3.7Gi of 24 Gi
```
# pulp rpm content list --name 'self-build-package' --limit 1000 | jq .[].location_href | wc -l
267
```
Ram usage (of the virtual machine) after execurting the command: 6.6Gi of 24Gi
Our pulp virtual machine ran out of memory once when the pulpcore-api service was using 12.7Gi of memory. This was cleared after restarting the service, but that should not be the case.
@maartenbeeckmans How old is this instance, and are you working with RHEL RPMs synced from at least 6 months ago? If so, could you try out the changelog trimming feature and report back on whether it helps?
Also what is your pulp_rpm version? You only mentioned pulpcore.
Versions can be seen below, we are using the rpm's provided by theforeman.
```sh
# pulp status | jq .versions
[
{
"component": "core",
"version": "3.21.4",
"package": "pulpcore"
},
{
"component": "rpm",
"version": "3.18.9",
"package": "pulp-rpm"
},
{
"component": "file",
"version": "1.11.1",
"package": "pulp-file"
},
{
"component": "deb",
"version": "2.20.0",
"package": "pulp_deb"
},
{
"component": "container",
"version": "2.14.3",
"package": "pulp-container"
}
]
```
I've tried a number of different debugging approaches, I know that the issue is all of the RPM changelogs loaded while listing the packages, but I don't know why they're leaking. My best guess is that maybe the django queryset cache is being used on the viewset, but I'm not confident of that.
https://github.com/pulp/pulpcore/issues/2138 would help to work around it as does process recycling
So finally we have an answer to this saga. [This line](https://github.com/pulp/pulpcore/pull/2826/files/bc85beb34796682071e72d5ee44a9425c2b86b75#diff-f3d57ed180a61179704601b8a978567bf8e5c5c54e95458ed8b0b49bbc150f3cR15) was caching the view objects, rather than serving the intended goal of reducing the number of lookups.
Thanks to Hao Yu for finding the root cause (I need find out how!) and also Gerrod for putting in some time investigating once that was identified. | 2023-07-20T12:20:50 |
|
pulp/pulpcore | 4,098 | pulp__pulpcore-4098 | [
"2250"
] | deae4951394142927d0e8eda275ea1c7aa60aa38 | diff --git a/pulpcore/app/access_policy.py b/pulpcore/app/access_policy.py
--- a/pulpcore/app/access_policy.py
+++ b/pulpcore/app/access_policy.py
@@ -1,4 +1,3 @@
-from functools import lru_cache
from rest_access_policy import AccessPolicy
from rest_framework.exceptions import APIException
@@ -12,7 +11,6 @@ class AccessPolicyFromDB(AccessPolicy):
"""
@staticmethod
- @lru_cache
def get_access_policy(view):
"""
Retrieves the AccessPolicy from the DB or None if it doesn't exist.
| Gunicorn consuming excessive amounts of memory
**Version**
3.16.z
**Describe the bug**
Gunicorn consuming excessive amounts of memory, 3.5-4gb
**To Reproduce**
Unclear
**Expected behavior**
Probably not to have a single gunicorn process use 4gb of memory
**Additional context**
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2035873
Katello forum discussion: https://community.theforeman.org/t/katello-4-5-foreman-3-3-memory-leak-in-gunicorn/29658/22
| One workaround is to enable process recycling for gunicorn workers. This doesn't get at the root cause though, which I'm curious about. Also how does it grow over time, does it grow slowly or are there specific requests that leave large amounts of memory pinned?
> One workaround is to enable process recycling for gunicorn workers.
We have a user doing exactly this, with some success. See https://bugzilla.redhat.com/show_bug.cgi?id=1994397#c43
copy pasting for transparency so it's in one place
```
To enable gunicorn worker recycling the procedure goes like this:
1. Create a gunicorn.conf.py under /var/lib/pulp containing max_requests and max_requests_jitter values
# cat /var/lib/pulp/gunicorn.conf.py
max_requests = 1000
max_requests_jitter = 50
2. restart the specific service systemctl restart pulpcore-api
3. gunicorn workers will then be automatically recycled every 1000 requests (with a jitter to avoid all workers restarting at the same time) One can reduce the max_requests value to have more aggressive recycling.
```
The installer [passes the gunicorn options along via command line params](https://github.com/pulp/pulp_installer/blob/669dbdde710762c59a6840c1b97cca119d6f6c0a/roles/pulp_content/templates/pulpcore-content.service.j2#L27-L32), so we could use `--max-requests` and `--max-requests-jitter` options like we do today.
To implement this as a default I expected an installer + operator + single container to all make the changes, and pulpcore to document it's benefit.
This might not (fully) be a leak, look at what it's doing, it's fetching the full details of RPM packages from the /pulp/api/v3/rpm/packages/ endpoint. So: serializing 100-1000 packages at a time. That might still be a stretch but maybe if it's doing so really inefficiently?
@dralley I agree. My concern (besides the efficiency of that code) is that for subsequent requests the memory isn't released until the recycling occurs.
Added a link to a Katello forum discussion that is ongoing. Some discoveries so far:
* it's definitely the API server rather than anything else
* process recycling does help, but the memory is re-leaked extremely quickly
* it's worse with multiple API server processes running
Trimming the (RPM) changelogs appears to help. So there is likely overlap with https://github.com/pulp/pulpcore/issues/2138
Sounds like it's responsible for not just memory usage issues but also potentially timeouts (there is a BZ for that as well)
Closing in favor of https://github.com/pulp/pulpcore/issues/2138
Katello / Satellite are the main impacted users right now, and they now have multiple workarounds.
Could this issue be reopened? This is not solved yet. We are running pulpcore version 3.21.4
We have some scripts in place for automating some pulp settings. The scripts that we are using use the python pulp client bindings, in these it is not possible to set the fields that the api returns in these bindings.
I can replicate the issue with the following api commands:
Ram usage (of the virtual machine) before the command: 3.7Gi of 24 Gi
```
# pulp rpm content list --name 'self-build-package' --limit 1000 | jq .[].location_href | wc -l
267
```
Ram usage (of the virtual machine) after execurting the command: 6.6Gi of 24Gi
Our pulp virtual machine ran out of memory once when the pulpcore-api service was using 12.7Gi of memory. This was cleared after restarting the service, but that should not be the case.
@maartenbeeckmans How old is this instance, and are you working with RHEL RPMs synced from at least 6 months ago? If so, could you try out the changelog trimming feature and report back on whether it helps?
Also what is your pulp_rpm version? You only mentioned pulpcore.
Versions can be seen below, we are using the rpm's provided by theforeman.
```sh
# pulp status | jq .versions
[
{
"component": "core",
"version": "3.21.4",
"package": "pulpcore"
},
{
"component": "rpm",
"version": "3.18.9",
"package": "pulp-rpm"
},
{
"component": "file",
"version": "1.11.1",
"package": "pulp-file"
},
{
"component": "deb",
"version": "2.20.0",
"package": "pulp_deb"
},
{
"component": "container",
"version": "2.14.3",
"package": "pulp-container"
}
]
```
I've tried a number of different debugging approaches, I know that the issue is all of the RPM changelogs loaded while listing the packages, but I don't know why they're leaking. My best guess is that maybe the django queryset cache is being used on the viewset, but I'm not confident of that.
https://github.com/pulp/pulpcore/issues/2138 would help to work around it as does process recycling
So finally we have an answer to this saga. [This line](https://github.com/pulp/pulpcore/pull/2826/files/bc85beb34796682071e72d5ee44a9425c2b86b75#diff-f3d57ed180a61179704601b8a978567bf8e5c5c54e95458ed8b0b49bbc150f3cR15) was caching the view objects, rather than serving the intended goal of reducing the number of lookups.
Thanks to Hao Yu for finding the root cause (I need find out how!) and also Gerrod for putting in some time investigating once that was identified. | 2023-07-20T12:21:03 |
|
pulp/pulpcore | 4,112 | pulp__pulpcore-4112 | [
"4111"
] | d033d84e9cdecae1798452fb0322cfca2a3a68bf | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,8 +2,6 @@
from gettext import gettext as _
-from aiohttp.client_exceptions import ClientResponseError
-
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -126,7 +124,7 @@ def _download_artifact(self, artifact, checksum, file_path):
downloader = remote.get_downloader(ra)
try:
dl_result = downloader.fetch()
- except ClientResponseError as e:
+ except Exception as e:
self.stdout.write(
_("Redownload failed from '{}': {}.").format(ra.url, str(e))
)
@@ -139,8 +137,9 @@ def _download_artifact(self, artifact, checksum, file_path):
setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
restored = True
break
- self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
- artifact.file.delete(save=False)
+ if not restored:
+ self.stdout.write(_("Deleting unrepairable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,7 +4,6 @@
import asyncio
import hashlib
-from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.db import transaction
from rest_framework.serializers import ValidationError
@@ -68,7 +67,7 @@ async def _repair_ca(content_artifact, repaired=None):
"Artifact {} is unrepairable - no remote source".format(content_artifact.artifact)
)
log.warning(
- "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ "Deleting file for the unrepairable artifact {}".format(content_artifact.artifact)
)
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
@@ -78,7 +77,7 @@ async def _repair_ca(content_artifact, repaired=None):
downloader = detail_remote.get_downloader(remote_artifact)
try:
dl_result = await downloader.run()
- except ClientResponseError as e:
+ except Exception as e:
log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
else:
if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
@@ -91,7 +90,7 @@ async def _repair_ca(content_artifact, repaired=None):
if repaired is not None:
await repaired.aincrement()
return True
- log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ log.warning("Deleting file for the unrepairable artifact {}".format(content_artifact.artifact))
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair API should be tolerant of any failure during the repair process
**Version**
main and older
**Describe the bug**
We have made repair api tolerant to a 404 error but that is not the only error that someone can receive.
If remote is associated with a proxy and that proxy is limiting connections or misconfigured someway, Then TimeoutException would be a common thing to occur as well.
If Firewall is not properly configured that may even result in "Connection Reset".
No matter what the failure is, The Repair API, should not halt but continue processing rest of the units that it has identified as corrupted or missing.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173692#c13
https://github.com/pulp/pulpcore/issues/3611 cope with 404
| I would request you to look at 6.13.3 Failed QA BZ's traceback which is here https://bugzilla.redhat.com/show_bug.cgi?id=2218980#c2 and we need to consider "DigestValidationError" error as well in the fix of upstream issue request https://github.com/pulp/pulpcore/issues/4111, as this checksum verification is another issue CU are facing. | 2023-07-21T12:25:03 |
|
pulp/pulpcore | 4,113 | pulp__pulpcore-4113 | [
"4111"
] | bb420cf3f81381952d9eb5826395af5cde27b2cc | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,8 +2,6 @@
from gettext import gettext as _
-from aiohttp.client_exceptions import ClientResponseError
-
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -128,7 +126,7 @@ def _download_artifact(self, artifact, checksum, file_path):
downloader = remote.get_downloader(ra)
try:
dl_result = downloader.fetch()
- except ClientResponseError as e:
+ except Exception as e:
self.stdout.write(
_("Redownload failed from '{}': {}.").format(ra.url, str(e))
)
@@ -141,8 +139,9 @@ def _download_artifact(self, artifact, checksum, file_path):
setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
restored = True
break
- self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
- artifact.file.delete(save=False)
+ if not restored:
+ self.stdout.write(_("Deleting unrepairable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,7 +4,6 @@
import asyncio
import hashlib
-from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.core.files.storage import default_storage
from django.db import transaction
@@ -71,7 +70,7 @@ async def _repair_ca(content_artifact, repaired=None):
_("Artifact {} is unrepairable - no remote source".format(content_artifact.artifact))
)
log.warning(
- "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ "Deleting file for the unrepairable artifact {}".format(content_artifact.artifact)
)
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
@@ -81,7 +80,7 @@ async def _repair_ca(content_artifact, repaired=None):
downloader = detail_remote.get_downloader(remote_artifact)
try:
dl_result = await downloader.run()
- except ClientResponseError as e:
+ except Exception as e:
log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
else:
if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
@@ -94,7 +93,7 @@ async def _repair_ca(content_artifact, repaired=None):
if repaired is not None:
await repaired.aincrement()
return True
- log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ log.warning("Deleting file for the unrepairable artifact {}".format(content_artifact.artifact))
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair API should be tolerant of any failure during the repair process
**Version**
main and older
**Describe the bug**
We have made repair api tolerant to a 404 error but that is not the only error that someone can receive.
If remote is associated with a proxy and that proxy is limiting connections or misconfigured someway, Then TimeoutException would be a common thing to occur as well.
If Firewall is not properly configured that may even result in "Connection Reset".
No matter what the failure is, The Repair API, should not halt but continue processing rest of the units that it has identified as corrupted or missing.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173692#c13
https://github.com/pulp/pulpcore/issues/3611 cope with 404
| I would request you to look at 6.13.3 Failed QA BZ's traceback which is here https://bugzilla.redhat.com/show_bug.cgi?id=2218980#c2 and we need to consider "DigestValidationError" error as well in the fix of upstream issue request https://github.com/pulp/pulpcore/issues/4111, as this checksum verification is another issue CU are facing. | 2023-07-21T16:22:47 |
|
pulp/pulpcore | 4,114 | pulp__pulpcore-4114 | [
"4111"
] | e1df7491a504248932197ab248374f8accc8ebc5 | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,8 +2,6 @@
from gettext import gettext as _
-from aiohttp.client_exceptions import ClientResponseError
-
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -126,7 +124,7 @@ def _download_artifact(self, artifact, checksum, file_path):
downloader = remote.get_downloader(ra)
try:
dl_result = downloader.fetch()
- except ClientResponseError as e:
+ except Exception as e:
self.stdout.write(
_("Redownload failed from '{}': {}.").format(ra.url, str(e))
)
@@ -139,8 +137,9 @@ def _download_artifact(self, artifact, checksum, file_path):
setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
restored = True
break
- self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
- artifact.file.delete(save=False)
+ if not restored:
+ self.stdout.write(_("Deleting unrepairable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,7 +4,6 @@
import asyncio
import hashlib
-from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.core.files.storage import default_storage
from django.db import transaction
@@ -71,7 +70,7 @@ async def _repair_ca(content_artifact, repaired=None):
"Artifact {} is unrepairable - no remote source".format(content_artifact.artifact)
)
log.warning(
- "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ "Deleting file for the unrepairable artifact {}".format(content_artifact.artifact)
)
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
@@ -81,7 +80,7 @@ async def _repair_ca(content_artifact, repaired=None):
downloader = detail_remote.get_downloader(remote_artifact)
try:
dl_result = await downloader.run()
- except ClientResponseError as e:
+ except Exception as e:
log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
else:
if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
@@ -94,7 +93,7 @@ async def _repair_ca(content_artifact, repaired=None):
if repaired is not None:
await repaired.aincrement()
return True
- log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ log.warning("Deleting file for the unrepairable artifact {}".format(content_artifact.artifact))
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair API should be tolerant of any failure during the repair process
**Version**
main and older
**Describe the bug**
We have made repair api tolerant to a 404 error but that is not the only error that someone can receive.
If remote is associated with a proxy and that proxy is limiting connections or misconfigured someway, Then TimeoutException would be a common thing to occur as well.
If Firewall is not properly configured that may even result in "Connection Reset".
No matter what the failure is, The Repair API, should not halt but continue processing rest of the units that it has identified as corrupted or missing.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173692#c13
https://github.com/pulp/pulpcore/issues/3611 cope with 404
| I would request you to look at 6.13.3 Failed QA BZ's traceback which is here https://bugzilla.redhat.com/show_bug.cgi?id=2218980#c2 and we need to consider "DigestValidationError" error as well in the fix of upstream issue request https://github.com/pulp/pulpcore/issues/4111, as this checksum verification is another issue CU are facing. | 2023-07-21T16:23:00 |
|
pulp/pulpcore | 4,115 | pulp__pulpcore-4115 | [
"4111"
] | 748d7e522fddaa4a4ce016880da631a70a11bc81 | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,8 +2,6 @@
from gettext import gettext as _
-from aiohttp.client_exceptions import ClientResponseError
-
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -126,7 +124,7 @@ def _download_artifact(self, artifact, checksum, file_path):
downloader = remote.get_downloader(ra)
try:
dl_result = downloader.fetch()
- except ClientResponseError as e:
+ except Exception as e:
self.stdout.write(
_("Redownload failed from '{}': {}.").format(ra.url, str(e))
)
@@ -139,8 +137,9 @@ def _download_artifact(self, artifact, checksum, file_path):
setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
restored = True
break
- self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
- artifact.file.delete(save=False)
+ if not restored:
+ self.stdout.write(_("Deleting unrepairable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,7 +4,6 @@
import asyncio
import hashlib
-from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.core.files.storage import default_storage
from django.db import transaction
@@ -71,7 +70,7 @@ async def _repair_ca(content_artifact, repaired=None):
"Artifact {} is unrepairable - no remote source".format(content_artifact.artifact)
)
log.warning(
- "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ "Deleting file for the unrepairable artifact {}".format(content_artifact.artifact)
)
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
@@ -81,7 +80,7 @@ async def _repair_ca(content_artifact, repaired=None):
downloader = detail_remote.get_downloader(remote_artifact)
try:
dl_result = await downloader.run()
- except ClientResponseError as e:
+ except Exception as e:
log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
else:
if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
@@ -94,7 +93,7 @@ async def _repair_ca(content_artifact, repaired=None):
if repaired is not None:
await repaired.aincrement()
return True
- log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ log.warning("Deleting file for the unrepairable artifact {}".format(content_artifact.artifact))
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair API should be tolerant of any failure during the repair process
**Version**
main and older
**Describe the bug**
We have made repair api tolerant to a 404 error but that is not the only error that someone can receive.
If remote is associated with a proxy and that proxy is limiting connections or misconfigured someway, Then TimeoutException would be a common thing to occur as well.
If Firewall is not properly configured that may even result in "Connection Reset".
No matter what the failure is, The Repair API, should not halt but continue processing rest of the units that it has identified as corrupted or missing.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173692#c13
https://github.com/pulp/pulpcore/issues/3611 cope with 404
| I would request you to look at 6.13.3 Failed QA BZ's traceback which is here https://bugzilla.redhat.com/show_bug.cgi?id=2218980#c2 and we need to consider "DigestValidationError" error as well in the fix of upstream issue request https://github.com/pulp/pulpcore/issues/4111, as this checksum verification is another issue CU are facing. | 2023-07-21T16:23:13 |
|
pulp/pulpcore | 4,116 | pulp__pulpcore-4116 | [
"4111"
] | 4171dc23241031dbc46a25ce01de505fbc4615cc | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,8 +2,6 @@
from gettext import gettext as _
-from aiohttp.client_exceptions import ClientResponseError
-
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -126,7 +124,7 @@ def _download_artifact(self, artifact, checksum, file_path):
downloader = remote.get_downloader(ra)
try:
dl_result = downloader.fetch()
- except ClientResponseError as e:
+ except Exception as e:
self.stdout.write(
_("Redownload failed from '{}': {}.").format(ra.url, str(e))
)
@@ -139,8 +137,9 @@ def _download_artifact(self, artifact, checksum, file_path):
setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
restored = True
break
- self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
- artifact.file.delete(save=False)
+ if not restored:
+ self.stdout.write(_("Deleting unrepairable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,7 +4,6 @@
import asyncio
import hashlib
-from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.db import transaction
from rest_framework.serializers import ValidationError
@@ -71,7 +70,7 @@ async def _repair_ca(content_artifact, repaired=None):
"Artifact {} is unrepairable - no remote source".format(content_artifact.artifact)
)
log.warning(
- "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ "Deleting file for the unrepairable artifact {}".format(content_artifact.artifact)
)
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
@@ -81,7 +80,7 @@ async def _repair_ca(content_artifact, repaired=None):
downloader = detail_remote.get_downloader(remote_artifact)
try:
dl_result = await downloader.run()
- except ClientResponseError as e:
+ except Exception as e:
log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
else:
if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
@@ -94,7 +93,7 @@ async def _repair_ca(content_artifact, repaired=None):
if repaired is not None:
await repaired.aincrement()
return True
- log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ log.warning("Deleting file for the unrepairable artifact {}".format(content_artifact.artifact))
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair API should be tolerant of any failure during the repair process
**Version**
main and older
**Describe the bug**
We have made repair api tolerant to a 404 error but that is not the only error that someone can receive.
If remote is associated with a proxy and that proxy is limiting connections or misconfigured someway, Then TimeoutException would be a common thing to occur as well.
If Firewall is not properly configured that may even result in "Connection Reset".
No matter what the failure is, The Repair API, should not halt but continue processing rest of the units that it has identified as corrupted or missing.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173692#c13
https://github.com/pulp/pulpcore/issues/3611 cope with 404
| I would request you to look at 6.13.3 Failed QA BZ's traceback which is here https://bugzilla.redhat.com/show_bug.cgi?id=2218980#c2 and we need to consider "DigestValidationError" error as well in the fix of upstream issue request https://github.com/pulp/pulpcore/issues/4111, as this checksum verification is another issue CU are facing. | 2023-07-21T16:23:26 |
|
pulp/pulpcore | 4,117 | pulp__pulpcore-4117 | [
"4111"
] | d345b1c2f435616fa8371ed60875e7a885bd1bf8 | diff --git a/pulpcore/app/management/commands/handle-artifact-checksums.py b/pulpcore/app/management/commands/handle-artifact-checksums.py
--- a/pulpcore/app/management/commands/handle-artifact-checksums.py
+++ b/pulpcore/app/management/commands/handle-artifact-checksums.py
@@ -2,8 +2,6 @@
from gettext import gettext as _
-from aiohttp.client_exceptions import ClientResponseError
-
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db.models import Q, Sum
@@ -126,7 +124,7 @@ def _download_artifact(self, artifact, checksum, file_path):
downloader = remote.get_downloader(ra)
try:
dl_result = downloader.fetch()
- except ClientResponseError as e:
+ except Exception as e:
self.stdout.write(
_("Redownload failed from '{}': {}.").format(ra.url, str(e))
)
@@ -139,8 +137,9 @@ def _download_artifact(self, artifact, checksum, file_path):
setattr(artifact, checksum, dl_result.artifact_attributes[checksum])
restored = True
break
- self.stdout.write(_("Deleting unreparable file {}".format(file_path)))
- artifact.file.delete(save=False)
+ if not restored:
+ self.stdout.write(_("Deleting unrepairable file {}".format(file_path)))
+ artifact.file.delete(save=False)
else:
break
return restored
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -4,7 +4,6 @@
import asyncio
import hashlib
-from aiohttp.client_exceptions import ClientResponseError
from asgiref.sync import sync_to_async
from django.db import transaction
from rest_framework.serializers import ValidationError
@@ -68,7 +67,7 @@ async def _repair_ca(content_artifact, repaired=None):
"Artifact {} is unrepairable - no remote source".format(content_artifact.artifact)
)
log.warning(
- "Deleting file for the unreparable artifact {}".format(content_artifact.artifact)
+ "Deleting file for the unrepairable artifact {}".format(content_artifact.artifact)
)
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
@@ -78,7 +77,7 @@ async def _repair_ca(content_artifact, repaired=None):
downloader = detail_remote.get_downloader(remote_artifact)
try:
dl_result = await downloader.run()
- except ClientResponseError as e:
+ except Exception as e:
log.warn(_("Redownload failed from '{}': {}.").format(remote_artifact.url, str(e)))
else:
if dl_result.artifact_attributes["sha256"] == content_artifact.artifact.sha256:
@@ -91,7 +90,7 @@ async def _repair_ca(content_artifact, repaired=None):
if repaired is not None:
await repaired.aincrement()
return True
- log.warning("Deleting file for the unreparable artifact {}".format(content_artifact.artifact))
+ log.warning("Deleting file for the unrepairable artifact {}".format(content_artifact.artifact))
await sync_to_async(content_artifact.artifact.file.delete)(save=False)
return False
| Repair API should be tolerant of any failure during the repair process
**Version**
main and older
**Describe the bug**
We have made repair api tolerant to a 404 error but that is not the only error that someone can receive.
If remote is associated with a proxy and that proxy is limiting connections or misconfigured someway, Then TimeoutException would be a common thing to occur as well.
If Firewall is not properly configured that may even result in "Connection Reset".
No matter what the failure is, The Repair API, should not halt but continue processing rest of the units that it has identified as corrupted or missing.
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173692#c13
https://github.com/pulp/pulpcore/issues/3611 cope with 404
| I would request you to look at 6.13.3 Failed QA BZ's traceback which is here https://bugzilla.redhat.com/show_bug.cgi?id=2218980#c2 and we need to consider "DigestValidationError" error as well in the fix of upstream issue request https://github.com/pulp/pulpcore/issues/4111, as this checksum verification is another issue CU are facing. | 2023-07-21T16:23:39 |
|
pulp/pulpcore | 4,129 | pulp__pulpcore-4129 | [
"2250"
] | 59e989aca7e80470662778381315e9bd0849094a | diff --git a/pulpcore/app/access_policy.py b/pulpcore/app/access_policy.py
--- a/pulpcore/app/access_policy.py
+++ b/pulpcore/app/access_policy.py
@@ -1,4 +1,3 @@
-from functools import lru_cache
from rest_access_policy import AccessPolicy
from rest_framework.exceptions import APIException
@@ -12,7 +11,6 @@ class AccessPolicyFromDB(AccessPolicy):
"""
@staticmethod
- @lru_cache
def get_access_policy(view):
"""
Retrieves the AccessPolicy from the DB or None if it doesn't exist.
| Gunicorn consuming excessive amounts of memory
**Version**
3.16.z
**Describe the bug**
Gunicorn consuming excessive amounts of memory, 3.5-4gb
**To Reproduce**
Unclear
**Expected behavior**
Probably not to have a single gunicorn process use 4gb of memory
**Additional context**
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2035873
Katello forum discussion: https://community.theforeman.org/t/katello-4-5-foreman-3-3-memory-leak-in-gunicorn/29658/22
| One workaround is to enable process recycling for gunicorn workers. This doesn't get at the root cause though, which I'm curious about. Also how does it grow over time, does it grow slowly or are there specific requests that leave large amounts of memory pinned?
> One workaround is to enable process recycling for gunicorn workers.
We have a user doing exactly this, with some success. See https://bugzilla.redhat.com/show_bug.cgi?id=1994397#c43
copy pasting for transparency so it's in one place
```
To enable gunicorn worker recycling the procedure goes like this:
1. Create a gunicorn.conf.py under /var/lib/pulp containing max_requests and max_requests_jitter values
# cat /var/lib/pulp/gunicorn.conf.py
max_requests = 1000
max_requests_jitter = 50
2. restart the specific service systemctl restart pulpcore-api
3. gunicorn workers will then be automatically recycled every 1000 requests (with a jitter to avoid all workers restarting at the same time) One can reduce the max_requests value to have more aggressive recycling.
```
The installer [passes the gunicorn options along via command line params](https://github.com/pulp/pulp_installer/blob/669dbdde710762c59a6840c1b97cca119d6f6c0a/roles/pulp_content/templates/pulpcore-content.service.j2#L27-L32), so we could use `--max-requests` and `--max-requests-jitter` options like we do today.
To implement this as a default I expected an installer + operator + single container to all make the changes, and pulpcore to document it's benefit.
This might not (fully) be a leak, look at what it's doing, it's fetching the full details of RPM packages from the /pulp/api/v3/rpm/packages/ endpoint. So: serializing 100-1000 packages at a time. That might still be a stretch but maybe if it's doing so really inefficiently?
@dralley I agree. My concern (besides the efficiency of that code) is that for subsequent requests the memory isn't released until the recycling occurs.
Added a link to a Katello forum discussion that is ongoing. Some discoveries so far:
* it's definitely the API server rather than anything else
* process recycling does help, but the memory is re-leaked extremely quickly
* it's worse with multiple API server processes running
Trimming the (RPM) changelogs appears to help. So there is likely overlap with https://github.com/pulp/pulpcore/issues/2138
Sounds like it's responsible for not just memory usage issues but also potentially timeouts (there is a BZ for that as well)
Closing in favor of https://github.com/pulp/pulpcore/issues/2138
Katello / Satellite are the main impacted users right now, and they now have multiple workarounds.
Could this issue be reopened? This is not solved yet. We are running pulpcore version 3.21.4
We have some scripts in place for automating some pulp settings. The scripts that we are using use the python pulp client bindings, in these it is not possible to set the fields that the api returns in these bindings.
I can replicate the issue with the following api commands:
Ram usage (of the virtual machine) before the command: 3.7Gi of 24 Gi
```
# pulp rpm content list --name 'self-build-package' --limit 1000 | jq .[].location_href | wc -l
267
```
Ram usage (of the virtual machine) after execurting the command: 6.6Gi of 24Gi
Our pulp virtual machine ran out of memory once when the pulpcore-api service was using 12.7Gi of memory. This was cleared after restarting the service, but that should not be the case.
@maartenbeeckmans How old is this instance, and are you working with RHEL RPMs synced from at least 6 months ago? If so, could you try out the changelog trimming feature and report back on whether it helps?
Also what is your pulp_rpm version? You only mentioned pulpcore.
Versions can be seen below, we are using the rpm's provided by theforeman.
```sh
# pulp status | jq .versions
[
{
"component": "core",
"version": "3.21.4",
"package": "pulpcore"
},
{
"component": "rpm",
"version": "3.18.9",
"package": "pulp-rpm"
},
{
"component": "file",
"version": "1.11.1",
"package": "pulp-file"
},
{
"component": "deb",
"version": "2.20.0",
"package": "pulp_deb"
},
{
"component": "container",
"version": "2.14.3",
"package": "pulp-container"
}
]
```
I've tried a number of different debugging approaches, I know that the issue is all of the RPM changelogs loaded while listing the packages, but I don't know why they're leaking. My best guess is that maybe the django queryset cache is being used on the viewset, but I'm not confident of that.
https://github.com/pulp/pulpcore/issues/2138 would help to work around it as does process recycling
So finally we have an answer to this saga. [This line](https://github.com/pulp/pulpcore/pull/2826/files/bc85beb34796682071e72d5ee44a9425c2b86b75#diff-f3d57ed180a61179704601b8a978567bf8e5c5c54e95458ed8b0b49bbc150f3cR15) was caching the view objects, rather than serving the intended goal of reducing the number of lookups.
Thanks to Hao Yu for finding the root cause (I need find out how!) and also Gerrod for putting in some time investigating once that was identified. | 2023-07-25T16:49:45 |
|
pulp/pulpcore | 4,130 | pulp__pulpcore-4130 | [
"4107"
] | 35df2cdd22ea64cb470940c032735f7e1508d073 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
| Honestly why is it wrapped inside `asyncio.wait` anyway?
Also please read the python docs carefully about when things got added. There was confusion about the availability of create_task vs. ensure_future depending on the python version before.
We need to maintain at least python 3.8 compatibility. | 2023-07-25T19:42:09 |
|
pulp/pulpcore | 4,150 | pulp__pulpcore-4150 | [
"4107"
] | 1bf29ccd6b94c8606191e8d521926132034d1223 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -187,8 +187,8 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
| Honestly why is it wrapped inside `asyncio.wait` anyway?
Also please read the python docs carefully about when things got added. There was confusion about the availability of create_task vs. ensure_future depending on the python version before.
We need to maintain at least python 3.8 compatibility. | 2023-07-26T16:23:15 |
|
pulp/pulpcore | 4,151 | pulp__pulpcore-4151 | [
"4107"
] | ad581ab23a69521fb4d8f9fec15f91cf62c829f9 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -194,8 +194,8 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
| Honestly why is it wrapped inside `asyncio.wait` anyway?
Also please read the python docs carefully about when things got added. There was confusion about the availability of create_task vs. ensure_future depending on the python version before.
We need to maintain at least python 3.8 compatibility. | 2023-07-26T16:23:28 |
|
pulp/pulpcore | 4,152 | pulp__pulpcore-4152 | [
"4107"
] | c3d53f870ca0a95e98c0dc83e58d17c862389319 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
| Honestly why is it wrapped inside `asyncio.wait` anyway?
Also please read the python docs carefully about when things got added. There was confusion about the availability of create_task vs. ensure_future depending on the python version before.
We need to maintain at least python 3.8 compatibility. | 2023-07-26T16:23:42 |
|
pulp/pulpcore | 4,153 | pulp__pulpcore-4153 | [
"4107"
] | 3eaefb75c2a24b9768a0674655b35db30dce587a | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
| Honestly why is it wrapped inside `asyncio.wait` anyway?
Also please read the python docs carefully about when things got added. There was confusion about the availability of create_task vs. ensure_future depending on the python version before.
We need to maintain at least python 3.8 compatibility. | 2023-07-26T16:23:55 |
|
pulp/pulpcore | 4,154 | pulp__pulpcore-4154 | [
"4107"
] | 8e7283343c799c19f5670a0b9e65b971155d8099 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
| Honestly why is it wrapped inside `asyncio.wait` anyway?
Also please read the python docs carefully about when things got added. There was confusion about the availability of create_task vs. ensure_future depending on the python version before.
We need to maintain at least python 3.8 compatibility. | 2023-07-26T16:24:08 |
|
pulp/pulpcore | 4,155 | pulp__pulpcore-4155 | [
"4107"
] | 04dd07b3ce91a45757634670f24eb7851bafa631 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
| Honestly why is it wrapped inside `asyncio.wait` anyway?
Also please read the python docs carefully about when things got added. There was confusion about the availability of create_task vs. ensure_future depending on the python version before.
We need to maintain at least python 3.8 compatibility. | 2023-07-26T16:24:21 |
|
pulp/pulpcore | 4,156 | pulp__pulpcore-4156 | [
"4107"
] | 8eb9092768de789da7df8d8e3fa87a95b3f1f645 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
| Honestly why is it wrapped inside `asyncio.wait` anyway?
Also please read the python docs carefully about when things got added. There was confusion about the availability of create_task vs. ensure_future depending on the python version before.
We need to maintain at least python 3.8 compatibility. | 2023-07-26T16:24:34 |
|
pulp/pulpcore | 4,160 | pulp__pulpcore-4160 | [
"4068"
] | 4ced14e949d2311627b82380e63d0853247df764 | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -302,6 +302,10 @@
"from pulpcore.tasking.util import cancel_task",
]
+# What percentage of available-workers will pulpimport use at a time, max
+# By default, use all available workers.
+IMPORT_WORKERS_PERCENT = 100
+
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://dynaconf.readthedocs.io/en/latest/guides/django.html
from dynaconf import DjangoDynaconf, Validator # noqa
diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -8,6 +8,7 @@
from gettext import gettext as _
from logging import getLogger
+from django.conf import settings
from django.core.files.storage import default_storage
from django.db.models import F
from naya.json import stream_array, tokenize
@@ -35,6 +36,7 @@
RepositoryResource,
)
from pulpcore.constants import TASK_STATES
+from pulpcore.tasking.pulpcore_worker import Worker
from pulpcore.tasking.tasks import dispatch
from pulpcore.plugin.importexport import BaseContentResource
@@ -506,6 +508,18 @@ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
default_storage.save(base_path, f)
# Now import repositories, in parallel.
+
+ # We want to be able to limit the number of available-workers that import will consume,
+ # so that pulp can continue to work while doing an import. We accomplish this by creating
+ # a reserved-resource string for each repo-import-task based on that repo's index in
+ # the dispatch loop, mod number-of-workers-to-consume.
+ #
+ # By default (setting is not-set), import will continue to use 100% of the available
+ # workers.
+ import_workers_percent = int(settings.get("IMPORT_WORKERS_PERCENT", 100))
+ total_workers = Worker.objects.online_workers().count()
+ import_workers = max(1, int(total_workers * (import_workers_percent / 100.0)))
+
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -517,14 +531,16 @@ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
)
gpr.save()
- for src_repo in data:
+ for index, src_repo in enumerate(data):
+ # Lock the repo we're importing-into
dest_repo_name = _get_destination_repo_name(importer, src_repo["name"])
-
+ # pulpcore-worker limiter
+ worker_rsrc = f"import-worker-{index % import_workers}"
+ exclusive_resources = [worker_rsrc]
try:
dest_repo = Repository.objects.get(name=dest_repo_name)
except Repository.DoesNotExist:
if create_repositories:
- exclusive_resources = []
dest_repo_pk = ""
else:
log.warning(
@@ -534,7 +550,7 @@ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
)
continue
else:
- exclusive_resources = [dest_repo]
+ exclusive_resources.append(dest_repo)
dest_repo_pk = dest_repo.pk
dispatch(
| PulpImport can overload the task-queue
**Version**
main
**Describe the bug**
PulpImport dispatches a task-per-repository. For large imports, with large repositories, this means that Pulp can't do anything else until the import is finished or nearly so, as all workers are busy importing repositories.
It would be good to teach the import-process to arrange to not use up more than, say, 25% of the available workers.
| Also we could introduce a setting for the import task that would make the % value configurable.
I am adding a prio-list to this issue since this problem impairs some of our customer's environment. | 2023-07-26T20:08:07 |
|
pulp/pulpcore | 4,161 | pulp__pulpcore-4161 | [
"4159"
] | 8e1503dd8274203a44e27b8db5ea80aac6d39917 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in pb.iter(artifacts):
+ for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,6 +112,7 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
+ pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = []
+ artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,10 +509,15 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
else:
- vers_artifacts = version.artifacts.all()
- artifacts.extend(vers_artifacts)
+ vers_artifacts = version.artifacts
+
+ if artifacts:
+ artifacts.union(vers_artifacts)
+ else:
+ artifacts = vers_artifacts
+
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
| PulpExport ArtifactResource.json has duplicate entries for overlapping content.
**Version**
main
**Describe the bug**
PulpExport is typically used to export the content of many repositories. It gathers up the unique Artifacts themselves into a single directory. However, the ArtifactResource.json file which is used to import the datasbe metadata is just a **list** of artifacts found in all the repositories.
In the presence of exported repositories with duplicate content (e.g., Satellite CVs with many of the same base repositories in them), the resulting Artifact.json file can get very large. The deduplication is handled correctly, but it takes up more (sometimes A LOT more) space and takes a lot more time to import as the import-export engine figures out whether a row is a duplicate or not.
**To Reproduce**
```
pulp file remote create --name many --policy immediate --url https://fixtures.pulpproject.org/file-many/PULP_MANIFEST
for r in {0..100}; do \
pulp file repository create --name "repo-$r" --remote many \
pulp file repository sync --name "repo-$r" \
done
pulp exporter pulp create --name many --path /tmp/exports/ --repository (href-of-one-repo) --repository...
pulp export pulp run --exporter many
# extract the Artifact.json from the resulting tar.gz
tar xvf export-01899388-9d86-75d1-8da0-8c2ebf466d87-20230726_1849.tar.gz pulpcore.app.modelresource.ArtifactResource.json
cat pulpcore.app.modelresource.ArtifactResource.json | jq '.[] | .sha256' | sort | uniq -c
```
**Expected behavior**
Artifact,json should show 1 entry per sha256.
**Actual behavior**
Artifact.json shows **number-of-repositories** copies of a given sha256.
**Additional context**
To create the exporter with all the repositories, I did something like the following:
```
# get all the repo-hrefs into a file, "hrefs"
pulp file repository list --limit 200 --field pulp_href | jq -r '.[] | .pulp_href' > hrefs
# Edit the file to start each line with ` --repository ` and end it with `\$`
# Add this as the first line
pulp exporter pulp create --name many --path /tmp/exports/ \
# Now run as a single command
source ./hrefs
```
There are several end-users being affected by this, even though they may not know it yet. The larger the installation, the more painful this bug becomes.
| 2023-07-26T20:42:43 |
||
pulp/pulpcore | 4,182 | pulp__pulpcore-4182 | [
"3610"
] | 4ced14e949d2311627b82380e63d0853247df764 | diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -9,6 +13,8 @@
)
from pulpcore.app.util import get_domain
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -76,10 +82,16 @@ def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
| Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
| Isn't that the expected behaviour? When an artifact is used by another content then reclaim must not delete it.
OK, maybe there is a check missing that we do not even attempt to delete it in that case.
The issue is that it shouldn't perform invalid actions in the first place, not that the database catches them.
use this repo to reproduce `Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server` | 2023-07-28T16:35:10 |
|
pulp/pulpcore | 4,185 | pulp__pulpcore-4185 | [
"3610"
] | 1b9a85bf1db68c594d7684d3c90d090f8823d24a | diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -8,6 +12,8 @@
RepositoryVersion,
)
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -74,10 +80,16 @@ def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
| Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
| Isn't that the expected behaviour? When an artifact is used by another content then reclaim must not delete it.
OK, maybe there is a check missing that we do not even attempt to delete it in that case.
The issue is that it shouldn't perform invalid actions in the first place, not that the database catches them.
use this repo to reproduce `Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server` | 2023-07-28T18:01:42 |
|
pulp/pulpcore | 4,186 | pulp__pulpcore-4186 | [
"3610"
] | 0244dfdcbb0c56b029d14422bdce2eaa932f2d3f | diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -8,6 +12,8 @@
RepositoryVersion,
)
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -74,10 +80,16 @@ def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
| Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
| Isn't that the expected behaviour? When an artifact is used by another content then reclaim must not delete it.
OK, maybe there is a check missing that we do not even attempt to delete it in that case.
The issue is that it shouldn't perform invalid actions in the first place, not that the database catches them.
use this repo to reproduce `Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server` | 2023-07-28T18:01:55 |
|
pulp/pulpcore | 4,187 | pulp__pulpcore-4187 | [
"3610"
] | cd2851549cdc8951cf3ef1b3514ec41b8b05fdd8 | diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -8,6 +12,8 @@
RepositoryVersion,
)
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -74,10 +80,16 @@ def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
| Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
| Isn't that the expected behaviour? When an artifact is used by another content then reclaim must not delete it.
OK, maybe there is a check missing that we do not even attempt to delete it in that case.
The issue is that it shouldn't perform invalid actions in the first place, not that the database catches them.
use this repo to reproduce `Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server` | 2023-07-28T18:02:08 |
|
pulp/pulpcore | 4,188 | pulp__pulpcore-4188 | [
"3610"
] | e0ad59effe8a411cbe97f0e3ccf270a31e44177a | diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -9,6 +13,8 @@
)
from pulpcore.app.util import get_domain
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -76,10 +82,16 @@ def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
| Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
| Isn't that the expected behaviour? When an artifact is used by another content then reclaim must not delete it.
OK, maybe there is a check missing that we do not even attempt to delete it in that case.
The issue is that it shouldn't perform invalid actions in the first place, not that the database catches them.
use this repo to reproduce `Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server` | 2023-07-28T18:02:21 |
|
pulp/pulpcore | 4,189 | pulp__pulpcore-4189 | [
"3610"
] | a810cfd86645ac2507c7169725bc67012930340f | diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -9,6 +13,8 @@
)
from pulpcore.app.util import get_domain
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -76,10 +82,16 @@ def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
| Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
| Isn't that the expected behaviour? When an artifact is used by another content then reclaim must not delete it.
OK, maybe there is a check missing that we do not even attempt to delete it in that case.
The issue is that it shouldn't perform invalid actions in the first place, not that the database catches them.
use this repo to reproduce `Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server` | 2023-07-28T18:02:34 |
|
pulp/pulpcore | 4,190 | pulp__pulpcore-4190 | [
"3610"
] | 587518be2646f8bdd976e3f052445ad19528dad4 | diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -1,3 +1,7 @@
+from logging import getLogger
+
+from django.db.models.deletion import ProtectedError
+
from pulpcore.app.models import (
Artifact,
Content,
@@ -9,6 +13,8 @@
)
from pulpcore.app.util import get_domain
+log = getLogger(__name__)
+
def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
"""
@@ -76,10 +82,16 @@ def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
counter = 0
interval = 100
for artifact in artifacts_to_delete.iterator():
- # we need to manually call delete() because it cleans up the file on the filesystem
- artifact.delete()
- progress_bar.done += 1
- counter += 1
+ try:
+ # we need to manually call delete() because it cleans up the file on the filesystem
+ artifact.delete()
+ except ProtectedError as e:
+ # Rarely artifact could be shared between to different content units.
+ # Just log and skip the artifact deletion in this case
+ log.info(e)
+ else:
+ progress_bar.done += 1
+ counter += 1
if counter >= interval:
progress_bar.save()
| Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'."
**Version**
3.16, but probably all versions
**Describe the bug**
- Reclaim space for repository fails with the following error.
~~~
Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})"
~~~
**To Reproduce**
See BZ
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2169322
| Isn't that the expected behaviour? When an artifact is used by another content then reclaim must not delete it.
OK, maybe there is a check missing that we do not even attempt to delete it in that case.
The issue is that it shouldn't perform invalid actions in the first place, not that the database catches them.
use this repo to reproduce `Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server` | 2023-07-28T18:02:47 |
|
pulp/pulpcore | 4,191 | pulp__pulpcore-4191 | [
"4068"
] | 151ca6bc12b988a6e830fc4b87f4d3c09c053d86 | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -296,6 +296,10 @@
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
+# What percentage of available-workers will pulpimport use at a time, max
+# By default, use all available workers.
+IMPORT_WORKERS_PERCENT = 100
+
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://dynaconf.readthedocs.io/en/latest/guides/django.html
import dynaconf # noqa
diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -8,6 +8,7 @@
from gettext import gettext as _
from logging import getLogger
+from django.conf import settings
from django.core.files.storage import default_storage
from django.db.models import F
from naya.json import stream_array, tokenize
@@ -28,6 +29,7 @@
Repository,
Task,
TaskGroup,
+ Worker,
)
from pulpcore.app.modelresource import (
ArtifactResource,
@@ -482,6 +484,18 @@ def validate_and_assemble(toc_filename):
default_storage.save(base_path, f)
# Now import repositories, in parallel.
+
+ # We want to be able to limit the number of available-workers that import will consume,
+ # so that pulp can continue to work while doing an import. We accomplish this by creating
+ # a reserved-resource string for each repo-import-task based on that repo's index in
+ # the dispatch loop, mod number-of-workers-to-consume.
+ #
+ # By default (setting is not-set), import will continue to use 100% of the available
+ # workers.
+ import_workers_percent = int(settings.get("IMPORT_WORKERS_PERCENT", 100))
+ total_workers = Worker.objects.online_workers().count()
+ import_workers = max(1, int(total_workers * (import_workers_percent / 100.0)))
+
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -493,7 +507,10 @@ def validate_and_assemble(toc_filename):
)
gpr.save()
- for src_repo in data:
+ for index, src_repo in enumerate(data):
+ # pulpcore-worker limiter
+ worker_rsrc = f"import-worker-{index % import_workers}"
+ exclusive_resources = [worker_rsrc]
try:
dest_repo = _destination_repo(importer, src_repo["name"])
except Repository.DoesNotExist:
@@ -503,6 +520,8 @@ def validate_and_assemble(toc_filename):
)
)
continue
+ else:
+ exclusive_resources.append(dest_repo)
dispatch(
import_repository_version,
| PulpImport can overload the task-queue
**Version**
main
**Describe the bug**
PulpImport dispatches a task-per-repository. For large imports, with large repositories, this means that Pulp can't do anything else until the import is finished or nearly so, as all workers are busy importing repositories.
It would be good to teach the import-process to arrange to not use up more than, say, 25% of the available workers.
| Also we could introduce a setting for the import task that would make the % value configurable.
I am adding a prio-list to this issue since this problem impairs some of our customer's environment. | 2023-07-28T18:13:21 |
|
pulp/pulpcore | 4,192 | pulp__pulpcore-4192 | [
"4068"
] | 159b5deb5dc3df1f7727ab2530d502f7871edf23 | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -294,6 +294,10 @@
HIDE_GUARDED_DISTRIBUTIONS = False
+# What percentage of available-workers will pulpimport use at a time, max
+# By default, use all available workers.
+IMPORT_WORKERS_PERCENT = 100
+
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://dynaconf.readthedocs.io/en/latest/guides/django.html
from dynaconf import DjangoDynaconf, Validator # noqa
diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -8,6 +8,7 @@
from gettext import gettext as _
from logging import getLogger
+from django.conf import settings
from django.core.files.storage import default_storage
from django.db.models import F
from naya.json import stream_array, tokenize
@@ -28,6 +29,7 @@
Repository,
Task,
TaskGroup,
+ Worker,
)
from pulpcore.app.modelresource import (
ArtifactResource,
@@ -506,6 +508,18 @@ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
default_storage.save(base_path, f)
# Now import repositories, in parallel.
+
+ # We want to be able to limit the number of available-workers that import will consume,
+ # so that pulp can continue to work while doing an import. We accomplish this by creating
+ # a reserved-resource string for each repo-import-task based on that repo's index in
+ # the dispatch loop, mod number-of-workers-to-consume.
+ #
+ # By default (setting is not-set), import will continue to use 100% of the available
+ # workers.
+ import_workers_percent = int(settings.get("IMPORT_WORKERS_PERCENT", 100))
+ total_workers = Worker.objects.online_workers().count()
+ import_workers = max(1, int(total_workers * (import_workers_percent / 100.0)))
+
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -517,14 +531,16 @@ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
)
gpr.save()
- for src_repo in data:
+ for index, src_repo in enumerate(data):
+ # Lock the repo we're importing-into
dest_repo_name = _get_destination_repo_name(importer, src_repo["name"])
-
+ # pulpcore-worker limiter
+ worker_rsrc = f"import-worker-{index % import_workers}"
+ exclusive_resources = [worker_rsrc]
try:
dest_repo = Repository.objects.get(name=dest_repo_name)
except Repository.DoesNotExist:
if create_repositories:
- exclusive_resources = []
dest_repo_pk = ""
else:
log.warning(
@@ -534,7 +550,7 @@ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
)
continue
else:
- exclusive_resources = [dest_repo]
+ exclusive_resources.append(dest_repo)
dest_repo_pk = dest_repo.pk
dispatch(
| PulpImport can overload the task-queue
**Version**
main
**Describe the bug**
PulpImport dispatches a task-per-repository. For large imports, with large repositories, this means that Pulp can't do anything else until the import is finished or nearly so, as all workers are busy importing repositories.
It would be good to teach the import-process to arrange to not use up more than, say, 25% of the available workers.
| Also we could introduce a setting for the import task that would make the % value configurable.
I am adding a prio-list to this issue since this problem impairs some of our customer's environment. | 2023-07-28T18:16:25 |
|
pulp/pulpcore | 4,193 | pulp__pulpcore-4193 | [
"4068"
] | d378c6c877ed1df501c9734769535924005b502f | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -295,6 +295,10 @@
DOMAIN_ENABLED = False
+# What percentage of available-workers will pulpimport use at a time, max
+# By default, use all available workers.
+IMPORT_WORKERS_PERCENT = 100
+
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://dynaconf.readthedocs.io/en/latest/guides/django.html
from dynaconf import DjangoDynaconf, Validator # noqa
diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -8,6 +8,7 @@
from gettext import gettext as _
from logging import getLogger
+from django.conf import settings
from django.core.files.storage import default_storage
from django.db.models import F
from naya.json import stream_array, tokenize
@@ -28,6 +29,7 @@
Repository,
Task,
TaskGroup,
+ Worker,
)
from pulpcore.app.modelresource import (
ArtifactResource,
@@ -490,6 +492,18 @@ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
default_storage.save(base_path, f)
# Now import repositories, in parallel.
+
+ # We want to be able to limit the number of available-workers that import will consume,
+ # so that pulp can continue to work while doing an import. We accomplish this by creating
+ # a reserved-resource string for each repo-import-task based on that repo's index in
+ # the dispatch loop, mod number-of-workers-to-consume.
+ #
+ # By default (setting is not-set), import will continue to use 100% of the available
+ # workers.
+ import_workers_percent = int(settings.get("IMPORT_WORKERS_PERCENT", 100))
+ total_workers = Worker.objects.online_workers().count()
+ import_workers = max(1, int(total_workers * (import_workers_percent / 100.0)))
+
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -501,14 +515,16 @@ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
)
gpr.save()
- for src_repo in data:
+ for index, src_repo in enumerate(data):
+ # Lock the repo we're importing-into
dest_repo_name = _get_destination_repo_name(importer, src_repo["name"])
-
+ # pulpcore-worker limiter
+ worker_rsrc = f"import-worker-{index % import_workers}"
+ exclusive_resources = [worker_rsrc]
try:
dest_repo = Repository.objects.get(name=dest_repo_name)
except Repository.DoesNotExist:
if create_repositories:
- exclusive_resources = []
dest_repo_pk = ""
else:
log.warning(
@@ -518,7 +534,7 @@ def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
)
continue
else:
- exclusive_resources = [dest_repo]
+ exclusive_resources.append(dest_repo)
dest_repo_pk = dest_repo.pk
dispatch(
| PulpImport can overload the task-queue
**Version**
main
**Describe the bug**
PulpImport dispatches a task-per-repository. For large imports, with large repositories, this means that Pulp can't do anything else until the import is finished or nearly so, as all workers are busy importing repositories.
It would be good to teach the import-process to arrange to not use up more than, say, 25% of the available workers.
| Also we could introduce a setting for the import task that would make the % value configurable.
I am adding a prio-list to this issue since this problem impairs some of our customer's environment. | 2023-07-28T18:20:16 |
|
pulp/pulpcore | 4,196 | pulp__pulpcore-4196 | [
"4068"
] | 9de0f119e4f39ad43104ee9219d8dfb6dbf15289 | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -291,6 +291,10 @@
TELEMETRY = True
+# What percentage of available-workers will pulpimport use at a time, max
+# By default, use all available workers.
+IMPORT_WORKERS_PERCENT = 100
+
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://dynaconf.readthedocs.io/en/latest/guides/django.html
from dynaconf import DjangoDynaconf, Validator # noqa
diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -8,6 +8,7 @@
from gettext import gettext as _
from logging import getLogger
+from django.conf import settings
from django.core.files.storage import default_storage
from django.db.models import F
from naya.json import stream_array, tokenize
@@ -28,6 +29,7 @@
Repository,
Task,
TaskGroup,
+ Worker,
)
from pulpcore.app.modelresource import (
ArtifactResource,
@@ -489,6 +491,18 @@ def validate_and_assemble(toc_filename):
default_storage.save(base_path, f)
# Now import repositories, in parallel.
+
+ # We want to be able to limit the number of available-workers that import will consume,
+ # so that pulp can continue to work while doing an import. We accomplish this by creating
+ # a reserved-resource string for each repo-import-task based on that repo's index in
+ # the dispatch loop, mod number-of-workers-to-consume.
+ #
+ # By default (setting is not-set), import will continue to use 100% of the available
+ # workers.
+ import_workers_percent = int(settings.get("IMPORT_WORKERS_PERCENT", 100))
+ total_workers = Worker.objects.online_workers().count()
+ import_workers = max(1, int(total_workers * (import_workers_percent / 100.0)))
+
with open(os.path.join(temp_dir, REPO_FILE), "r") as repo_data_file:
data = json.load(repo_data_file)
gpr = GroupProgressReport(
@@ -500,14 +514,16 @@ def validate_and_assemble(toc_filename):
)
gpr.save()
- for src_repo in data:
+ for index, src_repo in enumerate(data):
+ # Lock the repo we're importing-into
dest_repo_name = _get_destination_repo_name(importer, src_repo["name"])
-
+ # pulpcore-worker limiter
+ worker_rsrc = f"import-worker-{index % import_workers}"
+ exclusive_resources = [worker_rsrc]
try:
dest_repo = Repository.objects.get(name=dest_repo_name)
except Repository.DoesNotExist:
if create_repositories:
- exclusive_resources = []
dest_repo_pk = ""
else:
log.warning(
@@ -517,7 +533,7 @@ def validate_and_assemble(toc_filename):
)
continue
else:
- exclusive_resources = [dest_repo]
+ exclusive_resources.append(dest_repo)
dest_repo_pk = dest_repo.pk
dispatch(
| PulpImport can overload the task-queue
**Version**
main
**Describe the bug**
PulpImport dispatches a task-per-repository. For large imports, with large repositories, this means that Pulp can't do anything else until the import is finished or nearly so, as all workers are busy importing repositories.
It would be good to teach the import-process to arrange to not use up more than, say, 25% of the available workers.
| Also we could introduce a setting for the import task that would make the % value configurable.
I am adding a prio-list to this issue since this problem impairs some of our customer's environment. | 2023-07-28T19:32:57 |
|
pulp/pulpcore | 4,197 | pulp__pulpcore-4197 | [
"4159"
] | 711b2133a1d77b74035d3ca2890eacc12cb7a8de | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in pb.iter(artifacts):
+ for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,6 +112,7 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
+ pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = []
+ artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,10 +509,15 @@ def _do_export(pulp_exporter, tar, the_export):
RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
else:
- vers_artifacts = version.artifacts.all()
- artifacts.extend(vers_artifacts)
+ vers_artifacts = version.artifacts
+
+ if artifacts:
+ artifacts.union(vers_artifacts)
+ else:
+ artifacts = vers_artifacts
+
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
| PulpExport ArtifactResource.json has duplicate entries for overlapping content.
**Version**
main
**Describe the bug**
PulpExport is typically used to export the content of many repositories. It gathers up the unique Artifacts themselves into a single directory. However, the ArtifactResource.json file which is used to import the datasbe metadata is just a **list** of artifacts found in all the repositories.
In the presence of exported repositories with duplicate content (e.g., Satellite CVs with many of the same base repositories in them), the resulting Artifact.json file can get very large. The deduplication is handled correctly, but it takes up more (sometimes A LOT more) space and takes a lot more time to import as the import-export engine figures out whether a row is a duplicate or not.
**To Reproduce**
```
pulp file remote create --name many --policy immediate --url https://fixtures.pulpproject.org/file-many/PULP_MANIFEST
for r in {0..100}; do \
pulp file repository create --name "repo-$r" --remote many \
pulp file repository sync --name "repo-$r" \
done
pulp exporter pulp create --name many --path /tmp/exports/ --repository (href-of-one-repo) --repository...
pulp export pulp run --exporter many
# extract the Artifact.json from the resulting tar.gz
tar xvf export-01899388-9d86-75d1-8da0-8c2ebf466d87-20230726_1849.tar.gz pulpcore.app.modelresource.ArtifactResource.json
cat pulpcore.app.modelresource.ArtifactResource.json | jq '.[] | .sha256' | sort | uniq -c
```
**Expected behavior**
Artifact,json should show 1 entry per sha256.
**Actual behavior**
Artifact.json shows **number-of-repositories** copies of a given sha256.
**Additional context**
To create the exporter with all the repositories, I did something like the following:
```
# get all the repo-hrefs into a file, "hrefs"
pulp file repository list --limit 200 --field pulp_href | jq -r '.[] | .pulp_href' > hrefs
# Edit the file to start each line with ` --repository ` and end it with `\$`
# Add this as the first line
pulp exporter pulp create --name many --path /tmp/exports/ \
# Now run as a single command
source ./hrefs
```
There are several end-users being affected by this, even though they may not know it yet. The larger the installation, the more painful this bug becomes.
| 2023-07-29T15:06:29 |
||
pulp/pulpcore | 4,198 | pulp__pulpcore-4198 | [
"4159"
] | 3f96ae15a37b359a774dfad8f62bf1d516249869 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in pb.iter(artifacts):
+ for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,6 +112,7 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
+ pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = []
+ artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,10 +509,15 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
else:
- vers_artifacts = version.artifacts.all()
- artifacts.extend(vers_artifacts)
+ vers_artifacts = version.artifacts
+
+ if artifacts:
+ artifacts.union(vers_artifacts)
+ else:
+ artifacts = vers_artifacts
+
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
| PulpExport ArtifactResource.json has duplicate entries for overlapping content.
**Version**
main
**Describe the bug**
PulpExport is typically used to export the content of many repositories. It gathers up the unique Artifacts themselves into a single directory. However, the ArtifactResource.json file which is used to import the datasbe metadata is just a **list** of artifacts found in all the repositories.
In the presence of exported repositories with duplicate content (e.g., Satellite CVs with many of the same base repositories in them), the resulting Artifact.json file can get very large. The deduplication is handled correctly, but it takes up more (sometimes A LOT more) space and takes a lot more time to import as the import-export engine figures out whether a row is a duplicate or not.
**To Reproduce**
```
pulp file remote create --name many --policy immediate --url https://fixtures.pulpproject.org/file-many/PULP_MANIFEST
for r in {0..100}; do \
pulp file repository create --name "repo-$r" --remote many \
pulp file repository sync --name "repo-$r" \
done
pulp exporter pulp create --name many --path /tmp/exports/ --repository (href-of-one-repo) --repository...
pulp export pulp run --exporter many
# extract the Artifact.json from the resulting tar.gz
tar xvf export-01899388-9d86-75d1-8da0-8c2ebf466d87-20230726_1849.tar.gz pulpcore.app.modelresource.ArtifactResource.json
cat pulpcore.app.modelresource.ArtifactResource.json | jq '.[] | .sha256' | sort | uniq -c
```
**Expected behavior**
Artifact,json should show 1 entry per sha256.
**Actual behavior**
Artifact.json shows **number-of-repositories** copies of a given sha256.
**Additional context**
To create the exporter with all the repositories, I did something like the following:
```
# get all the repo-hrefs into a file, "hrefs"
pulp file repository list --limit 200 --field pulp_href | jq -r '.[] | .pulp_href' > hrefs
# Edit the file to start each line with ` --repository ` and end it with `\$`
# Add this as the first line
pulp exporter pulp create --name many --path /tmp/exports/ \
# Now run as a single command
source ./hrefs
```
There are several end-users being affected by this, even though they may not know it yet. The larger the installation, the more painful this bug becomes.
| 2023-07-29T15:06:42 |
||
pulp/pulpcore | 4,199 | pulp__pulpcore-4199 | [
"4159"
] | eed822278c273822dcdc34aeeea7ec882eacfa7e | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in pb.iter(artifacts):
+ for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,6 +112,7 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
+ pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = []
+ artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,10 +509,15 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
else:
- vers_artifacts = version.artifacts.all()
- artifacts.extend(vers_artifacts)
+ vers_artifacts = version.artifacts
+
+ if artifacts:
+ artifacts.union(vers_artifacts)
+ else:
+ artifacts = vers_artifacts
+
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
| PulpExport ArtifactResource.json has duplicate entries for overlapping content.
**Version**
main
**Describe the bug**
PulpExport is typically used to export the content of many repositories. It gathers up the unique Artifacts themselves into a single directory. However, the ArtifactResource.json file which is used to import the datasbe metadata is just a **list** of artifacts found in all the repositories.
In the presence of exported repositories with duplicate content (e.g., Satellite CVs with many of the same base repositories in them), the resulting Artifact.json file can get very large. The deduplication is handled correctly, but it takes up more (sometimes A LOT more) space and takes a lot more time to import as the import-export engine figures out whether a row is a duplicate or not.
**To Reproduce**
```
pulp file remote create --name many --policy immediate --url https://fixtures.pulpproject.org/file-many/PULP_MANIFEST
for r in {0..100}; do \
pulp file repository create --name "repo-$r" --remote many \
pulp file repository sync --name "repo-$r" \
done
pulp exporter pulp create --name many --path /tmp/exports/ --repository (href-of-one-repo) --repository...
pulp export pulp run --exporter many
# extract the Artifact.json from the resulting tar.gz
tar xvf export-01899388-9d86-75d1-8da0-8c2ebf466d87-20230726_1849.tar.gz pulpcore.app.modelresource.ArtifactResource.json
cat pulpcore.app.modelresource.ArtifactResource.json | jq '.[] | .sha256' | sort | uniq -c
```
**Expected behavior**
Artifact,json should show 1 entry per sha256.
**Actual behavior**
Artifact.json shows **number-of-repositories** copies of a given sha256.
**Additional context**
To create the exporter with all the repositories, I did something like the following:
```
# get all the repo-hrefs into a file, "hrefs"
pulp file repository list --limit 200 --field pulp_href | jq -r '.[] | .pulp_href' > hrefs
# Edit the file to start each line with ` --repository ` and end it with `\$`
# Add this as the first line
pulp exporter pulp create --name many --path /tmp/exports/ \
# Now run as a single command
source ./hrefs
```
There are several end-users being affected by this, even though they may not know it yet. The larger the installation, the more painful this bug becomes.
| 2023-07-29T15:06:54 |
||
pulp/pulpcore | 4,200 | pulp__pulpcore-4200 | [
"4159"
] | b4e38e4f31b77fb6064c8cc0362b5a93812fb348 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in pb.iter(artifacts):
+ for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,6 +112,7 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
+ pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -492,7 +492,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = []
+ artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -501,10 +501,15 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
else:
- vers_artifacts = version.artifacts.all()
- artifacts.extend(vers_artifacts)
+ vers_artifacts = version.artifacts
+
+ if artifacts:
+ artifacts.union(vers_artifacts)
+ else:
+ artifacts = vers_artifacts
+
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
| PulpExport ArtifactResource.json has duplicate entries for overlapping content.
**Version**
main
**Describe the bug**
PulpExport is typically used to export the content of many repositories. It gathers up the unique Artifacts themselves into a single directory. However, the ArtifactResource.json file which is used to import the datasbe metadata is just a **list** of artifacts found in all the repositories.
In the presence of exported repositories with duplicate content (e.g., Satellite CVs with many of the same base repositories in them), the resulting Artifact.json file can get very large. The deduplication is handled correctly, but it takes up more (sometimes A LOT more) space and takes a lot more time to import as the import-export engine figures out whether a row is a duplicate or not.
**To Reproduce**
```
pulp file remote create --name many --policy immediate --url https://fixtures.pulpproject.org/file-many/PULP_MANIFEST
for r in {0..100}; do \
pulp file repository create --name "repo-$r" --remote many \
pulp file repository sync --name "repo-$r" \
done
pulp exporter pulp create --name many --path /tmp/exports/ --repository (href-of-one-repo) --repository...
pulp export pulp run --exporter many
# extract the Artifact.json from the resulting tar.gz
tar xvf export-01899388-9d86-75d1-8da0-8c2ebf466d87-20230726_1849.tar.gz pulpcore.app.modelresource.ArtifactResource.json
cat pulpcore.app.modelresource.ArtifactResource.json | jq '.[] | .sha256' | sort | uniq -c
```
**Expected behavior**
Artifact,json should show 1 entry per sha256.
**Actual behavior**
Artifact.json shows **number-of-repositories** copies of a given sha256.
**Additional context**
To create the exporter with all the repositories, I did something like the following:
```
# get all the repo-hrefs into a file, "hrefs"
pulp file repository list --limit 200 --field pulp_href | jq -r '.[] | .pulp_href' > hrefs
# Edit the file to start each line with ` --repository ` and end it with `\$`
# Add this as the first line
pulp exporter pulp create --name many --path /tmp/exports/ \
# Now run as a single command
source ./hrefs
```
There are several end-users being affected by this, even though they may not know it yet. The larger the installation, the more painful this bug becomes.
| 2023-07-29T15:07:07 |
||
pulp/pulpcore | 4,201 | pulp__pulpcore-4201 | [
"4159"
] | 63b3dddeb229710f09e2484ecbfe025cfd7f7171 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in pb.iter(artifacts):
+ for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,6 +112,7 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
+ pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -495,7 +495,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = []
+ artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -504,10 +504,15 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
else:
- vers_artifacts = version.artifacts.all()
- artifacts.extend(vers_artifacts)
+ vers_artifacts = version.artifacts
+
+ if artifacts:
+ artifacts.union(vers_artifacts)
+ else:
+ artifacts = vers_artifacts
+
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
| PulpExport ArtifactResource.json has duplicate entries for overlapping content.
**Version**
main
**Describe the bug**
PulpExport is typically used to export the content of many repositories. It gathers up the unique Artifacts themselves into a single directory. However, the ArtifactResource.json file which is used to import the datasbe metadata is just a **list** of artifacts found in all the repositories.
In the presence of exported repositories with duplicate content (e.g., Satellite CVs with many of the same base repositories in them), the resulting Artifact.json file can get very large. The deduplication is handled correctly, but it takes up more (sometimes A LOT more) space and takes a lot more time to import as the import-export engine figures out whether a row is a duplicate or not.
**To Reproduce**
```
pulp file remote create --name many --policy immediate --url https://fixtures.pulpproject.org/file-many/PULP_MANIFEST
for r in {0..100}; do \
pulp file repository create --name "repo-$r" --remote many \
pulp file repository sync --name "repo-$r" \
done
pulp exporter pulp create --name many --path /tmp/exports/ --repository (href-of-one-repo) --repository...
pulp export pulp run --exporter many
# extract the Artifact.json from the resulting tar.gz
tar xvf export-01899388-9d86-75d1-8da0-8c2ebf466d87-20230726_1849.tar.gz pulpcore.app.modelresource.ArtifactResource.json
cat pulpcore.app.modelresource.ArtifactResource.json | jq '.[] | .sha256' | sort | uniq -c
```
**Expected behavior**
Artifact,json should show 1 entry per sha256.
**Actual behavior**
Artifact.json shows **number-of-repositories** copies of a given sha256.
**Additional context**
To create the exporter with all the repositories, I did something like the following:
```
# get all the repo-hrefs into a file, "hrefs"
pulp file repository list --limit 200 --field pulp_href | jq -r '.[] | .pulp_href' > hrefs
# Edit the file to start each line with ` --repository ` and end it with `\$`
# Add this as the first line
pulp exporter pulp create --name many --path /tmp/exports/ \
# Now run as a single command
source ./hrefs
```
There are several end-users being affected by this, even though they may not know it yet. The larger the installation, the more painful this bug becomes.
| 2023-07-29T15:07:19 |
||
pulp/pulpcore | 4,202 | pulp__pulpcore-4202 | [
"4159"
] | 72e0a910641378b232c024b473a02851664694be | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in pb.iter(artifacts):
+ for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,6 +112,7 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
+ pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = []
+ artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,10 +509,15 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
else:
- vers_artifacts = version.artifacts.all()
- artifacts.extend(vers_artifacts)
+ vers_artifacts = version.artifacts
+
+ if artifacts:
+ artifacts.union(vers_artifacts)
+ else:
+ artifacts = vers_artifacts
+
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
| PulpExport ArtifactResource.json has duplicate entries for overlapping content.
**Version**
main
**Describe the bug**
PulpExport is typically used to export the content of many repositories. It gathers up the unique Artifacts themselves into a single directory. However, the ArtifactResource.json file which is used to import the datasbe metadata is just a **list** of artifacts found in all the repositories.
In the presence of exported repositories with duplicate content (e.g., Satellite CVs with many of the same base repositories in them), the resulting Artifact.json file can get very large. The deduplication is handled correctly, but it takes up more (sometimes A LOT more) space and takes a lot more time to import as the import-export engine figures out whether a row is a duplicate or not.
**To Reproduce**
```
pulp file remote create --name many --policy immediate --url https://fixtures.pulpproject.org/file-many/PULP_MANIFEST
for r in {0..100}; do \
pulp file repository create --name "repo-$r" --remote many \
pulp file repository sync --name "repo-$r" \
done
pulp exporter pulp create --name many --path /tmp/exports/ --repository (href-of-one-repo) --repository...
pulp export pulp run --exporter many
# extract the Artifact.json from the resulting tar.gz
tar xvf export-01899388-9d86-75d1-8da0-8c2ebf466d87-20230726_1849.tar.gz pulpcore.app.modelresource.ArtifactResource.json
cat pulpcore.app.modelresource.ArtifactResource.json | jq '.[] | .sha256' | sort | uniq -c
```
**Expected behavior**
Artifact,json should show 1 entry per sha256.
**Actual behavior**
Artifact.json shows **number-of-repositories** copies of a given sha256.
**Additional context**
To create the exporter with all the repositories, I did something like the following:
```
# get all the repo-hrefs into a file, "hrefs"
pulp file repository list --limit 200 --field pulp_href | jq -r '.[] | .pulp_href' > hrefs
# Edit the file to start each line with ` --repository ` and end it with `\$`
# Add this as the first line
pulp exporter pulp create --name many --path /tmp/exports/ \
# Now run as a single command
source ./hrefs
```
There are several end-users being affected by this, even though they may not know it yet. The larger the installation, the more painful this bug becomes.
| 2023-07-29T15:07:32 |
||
pulp/pulpcore | 4,203 | pulp__pulpcore-4203 | [
"4051"
] | c3078a9f5a4e32e0676ff979b0ddc41249a8b2a9 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -113,7 +113,6 @@ def _export_publication_to_file_system(
path (str): Path to place the exported data
publication_pk (str): Publication pk
"""
- difference_content_artifacts = []
content_artifacts = ContentArtifact.objects.filter(
pk__in=publication.published_artifact.values_list("content_artifact__pk", flat=True)
)
@@ -121,11 +120,6 @@ def _export_publication_to_file_system(
start_version_content_artifacts = ContentArtifact.objects.filter(
artifact__in=start_repo_version.artifacts
)
- difference_content_artifacts = set(
- content_artifacts.difference(start_version_content_artifacts).values_list(
- "pk", flat=True
- )
- )
if publication.pass_through:
content_artifacts |= ContentArtifact.objects.filter(
@@ -136,11 +130,20 @@ def _export_publication_to_file_system(
# In some cases we may want to disable this validation
_validate_fs_export(content_artifacts)
+ difference_content_artifacts = []
+ if start_repo_version:
+ difference_content_artifacts = set(
+ content_artifacts.difference(start_version_content_artifacts).values_list(
+ "pk", flat=True
+ )
+ )
+
relative_path_to_artifacts = {}
if publication.pass_through:
relative_path_to_artifacts = {
ca.relative_path: ca.artifact
for ca in content_artifacts.select_related("artifact").iterator()
+ if (start_repo_version is None) or (ca.pk in difference_content_artifacts)
}
for pa in publication.published_artifact.select_related(
| File System Exporter ignores start_repository_version and exports everything for pass-through publications
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
Pulp 3.18 and later
**Describe the bug**
The file system exporter ignores start_repository_version and exports everything for pass-through publications. So FilePublications for example are marked as pass_through publications. The export task in this case seems to ignore the start_repository_version and dumps everything over.
Note: If you gave it a repo version and start_repo version it returns the right content . This behavior is only when pass a publication and a start_repo_version (which is what Katello uses).
**To Reproduce**
Steps to reproduce the behavior:
- Have a file repo with multiple versions and publications (make sure the versions have some new files)
- Try to export this repo via the file system exporter and provide a publication and start_version
**Expected behavior**
Exported content should have the published_metadata + difference in content i.e. publication.repository_version.content - start_repository_version.content
** Actual **
Everything gets copied.
Bugzilla https://bugzilla.redhat.com/show_bug.cgi?id=2172564
| 2023-07-29T17:09:14 |
||
pulp/pulpcore | 4,204 | pulp__pulpcore-4204 | [
"3941"
] | c3078a9f5a4e32e0676ff979b0ddc41249a8b2a9 | diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -143,12 +143,17 @@ def _export_publication_to_file_system(
for ca in content_artifacts.select_related("artifact").iterator()
}
+ publication_metadata_paths = set(
+ publication.published_metadata.values_list("relative_path", flat=True)
+ )
for pa in publication.published_artifact.select_related(
"content_artifact", "content_artifact__artifact"
).iterator():
# Artifact isn't guaranteed to be present
if pa.content_artifact.artifact and (
- start_repo_version is None or pa.content_artifact.pk in difference_content_artifacts
+ start_repo_version is None
+ or pa.relative_path in publication_metadata_paths
+ or pa.content_artifact.pk in difference_content_artifacts
):
relative_path_to_artifacts[pa.relative_path] = pa.content_artifact.artifact
| fs_exporter does not include productid certificate on incremental exports
**Version**
3.18.16
**Describe the bug**
Using fs_exporter to export incrementally a repository that has a productid certificate will not copy the productid certificate.
This happens because we calculate the differences between repository version being export and start_version. As the productid is always the same, then this artifact is not listed as new on the list of differences and is the not included on the export.
We should ensure all artifacts that belong to metadata are exported.
**To Reproduce**
1. Have a repository that contains a productid certificate
2. Generate a complete export of such repository
3. Export an incremental version of this repository using fs_exporter
4. Check that the productid certificate of the repository is missing from the export
**Expected behavior**
All metadata files to be exported, regardless if the export is complete or incremental
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2212523
| Copying over my comment from the PR:
----
I would consider this either a Katello or a documentation bug then. This is the way this feature was described when it was requested / added:
https://github.com/pulp/pulpcore/issues/3413
> Idea is
>
> * User has already exported Rhel - 7 (using the fs exporter) - full export
> * User consumes content from the export
> * Several days later the user now wants the new content for RHEL 7. Instead of exporting 50GB worth of content again the user's export should only consist of rpms that got added after the start version and latest metadata
> * User should be able to **copy the contents of this export over the regular rhel export directory** and use that to sync repodata.
From Pulp's perspective I don't think there is a bug here.
So there is / was some misunderstandings around the original design. The intention was for the incremental filesystem export to also be syncable on its own, which means it needs to have all the metadata, even if it didn't change, whereas the current incremental implementation only includes artifacts that changed and expects the user to copy it on top of an existing filesystem export.
If copying isn't an option, then we could change the implementation to export metadata as well. Copying Grant's suggestion:
>Maybe somewhere around here https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/export.py#L146-L155 we could look for for all PublishedMetadata associated with the passed-in publication, and force-add them to relative_path_to_artifacts - I think that will address the need, without exposing pulpcore to "knowing things about plugins Core Was Not Meant to Know" | 2023-07-29T17:09:51 |
|
pulp/pulpcore | 4,205 | pulp__pulpcore-4205 | [
"3737"
] | c3078a9f5a4e32e0676ff979b0ddc41249a8b2a9 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -337,6 +337,18 @@ def validate_toc(toc_filename):
raise ValidationError(_("Missing 'files' or 'meta' keys in table-of-contents!"))
base_dir = os.path.dirname(toc_filename)
+
+ # Regardless of what the TOC says, it's possible for a previous import to have
+ # failed after successfully creating the combined file. If the TOC specifies multiple
+ # chunks, but the "expected result" exists, ignore the chunk-list and process as if
+ # it's all there ever was.
+ top_level_file = os.path.join(base_dir, the_toc["meta"]["file"])
+ if len(the_toc["files"]) > 1 and os.path.isfile(top_level_file):
+ the_toc["files"] = {the_toc["meta"]["file"]: the_toc["meta"]["global_hash"]}
+
+ # At this point, we either have the original chunks, or we're validating the
+ # full-file as a single chunk. Validate the hash(es).
+
# Points at chunks that exist?
missing_files = []
for f in sorted(the_toc["files"].keys()):
@@ -371,17 +383,7 @@ def validate_toc(toc_filename):
return the_toc
- def validate_and_assemble(toc_filename):
- """Validate checksums of, and reassemble, chunks in table-of-contents file."""
- the_toc = validate_toc(toc_filename)
- toc_dir = os.path.dirname(toc_filename)
- result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
-
- # if we have only one entry in "files", it must be the full .tar.gz - return it
- if len(the_toc["files"]) == 1:
- return os.path.join(toc_dir, list(the_toc["files"].keys())[0])
-
- # We have multiple chunks.
+ def reassemble(the_toc, toc_dir, result_file):
# reassemble into one file 'next to' the toc and return the resulting full-path
chunk_size = int(the_toc["meta"]["chunk_size"])
offset = 0
@@ -434,6 +436,20 @@ def validate_and_assemble(toc_filename):
# Let the rest of the import process do its thing on the new combined-file.
return result_file
+ def validate_and_assemble(toc_filename):
+ """Validate checksums of, and reassemble, chunks in table-of-contents file."""
+ the_toc = validate_toc(toc_filename)
+ toc_dir = os.path.dirname(toc_filename)
+ result_file = os.path.join(toc_dir, the_toc["meta"]["file"])
+
+ # if we have only one entry in "files", it must be the full .tar.gz.
+ # Return the filename from the meta-section.
+ if len(the_toc["files"]) == 1:
+ return result_file
+
+ # We have multiple chunks. Reassemble them and return the result.
+ return reassemble(the_toc, toc_dir, result_file)
+
if toc:
log.info(_("Validating TOC {}.").format(toc))
path = validate_and_assemble(toc)
| Can't rerun a failed content-import task if it was exported using chunks
**Version**
All?
**Describe the bug**
When importing content that was exported in chunks, the importer process concatenate the chunks into a single file in order to import.
If that import task fails for some reason, after the chunks were already combined into a single file, the user can't simply re-run the same command to retry the import. Satellite will complain that the chunks are missing.
Checking the directory, we can see that the chunks are gone and only the master file is present (together with metadata.json and TOC file).
**To Reproduce**
Steps to Reproduce:
1. Export something using chunks
2. Start the import on another satellite. Monitor the data directory until all the chunk files are gone and only the main file is present. At this moment, kill the pulpcore-worker which is processing the import. Wait until the task returns with error.
3. Repeat the same import command. Pulp will error out with an error like this:
~~~
Feb 27 16:38:56 reproducer-import pulpcore-worker-3[156191]: pulp [6c91f855-9959-43b1-864f-925393ae025a]: pulpcore.tasking.pulpcore_worker:INFO: Task ddf45eb1-adea-480e-9114-748baa7bd7bf failed ([ErrorDetail(string="Missing import-chunks named in table-of-contents: ['export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0000', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0001', 'export-fc4273a4-6320-4fd5-98c9-6adfe9461781-20230227_2132.tar.gz.0002'].", code='invalid')])
~~~
Actual results:
Re-running same import fails, complaining about chunks missing. User needs to modify the TOC file manually OR split the master file in chunks again OR copy the files again in order to run the import.
**Expected behavior**
Pulp could be smart enough to identify either all the chunks are present OR the global file file. If the checksum matches, move forward with the import.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2173757
| 2023-07-29T17:10:54 |
||
pulp/pulpcore | 4,211 | pulp__pulpcore-4211 | [
"4210"
] | ed3314079e7e199ae99db4e1f05ed1668e4664f7 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
+ for artifact in pb.iter(artifacts):
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,7 +112,6 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
- pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
+ artifacts = set()
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,14 +509,11 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
- vers_artifacts = version.artifacts
+ vers_artifacts = version.artifacts.all()
- if artifacts:
- artifacts.union(vers_artifacts)
- else:
- artifacts = vers_artifacts
+ artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
| Export artifact-dedup optimizations break pulp_rpm export.
**Version**
main, 18, 21, 22, 23, 28, 29
**Describe the bug**
After the changes for #4159 were released, pulp_rpm nightly tests started failing. This turns out to be the result of a subtle bug in the optimizations introduced for that fix.
This has to be fixed and released ASAP to all versions #4159 was releasxed into.
**To Reproduce**
* Run https://github.com/pulp/pulp_rpm/blob/main/pulp_rpm/tests/functional/api/test_pulpimport.py#L287
| 2023-07-31T16:42:28 |
||
pulp/pulpcore | 4,212 | pulp__pulpcore-4212 | [
"4210"
] | 9b6cf397fd3907d500b347d7e9d5f3d2b50694fa | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
+ for artifact in pb.iter(artifacts):
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,7 +112,6 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
- pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
+ artifacts = set()
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,14 +509,11 @@ def _do_export(pulp_exporter, tar, the_export):
RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
- vers_artifacts = version.artifacts
+ vers_artifacts = version.artifacts.all()
- if artifacts:
- artifacts.union(vers_artifacts)
- else:
- artifacts = vers_artifacts
+ artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
| Export artifact-dedup optimizations break pulp_rpm export.
**Version**
main, 18, 21, 22, 23, 28, 29
**Describe the bug**
After the changes for #4159 were released, pulp_rpm nightly tests started failing. This turns out to be the result of a subtle bug in the optimizations introduced for that fix.
This has to be fixed and released ASAP to all versions #4159 was releasxed into.
**To Reproduce**
* Run https://github.com/pulp/pulp_rpm/blob/main/pulp_rpm/tests/functional/api/test_pulpimport.py#L287
| 2023-07-31T17:30:40 |
||
pulp/pulpcore | 4,213 | pulp__pulpcore-4213 | [
"4210"
] | 4d3de452f303115c36eb74ee0e5e5e446a4b8715 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
+ for artifact in pb.iter(artifacts):
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,7 +112,6 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
- pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
+ artifacts = set()
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,14 +509,11 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
- vers_artifacts = version.artifacts
+ vers_artifacts = version.artifacts.all()
- if artifacts:
- artifacts.union(vers_artifacts)
- else:
- artifacts = vers_artifacts
+ artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
| Export artifact-dedup optimizations break pulp_rpm export.
**Version**
main, 18, 21, 22, 23, 28, 29
**Describe the bug**
After the changes for #4159 were released, pulp_rpm nightly tests started failing. This turns out to be the result of a subtle bug in the optimizations introduced for that fix.
This has to be fixed and released ASAP to all versions #4159 was releasxed into.
**To Reproduce**
* Run https://github.com/pulp/pulp_rpm/blob/main/pulp_rpm/tests/functional/api/test_pulpimport.py#L287
| 2023-07-31T17:30:54 |
||
pulp/pulpcore | 4,214 | pulp__pulpcore-4214 | [
"4210"
] | c00a8337fa8875d90284975075582b5e975a61d4 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
+ for artifact in pb.iter(artifacts):
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,7 +112,6 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
- pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
+ artifacts = set()
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,14 +509,11 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
- vers_artifacts = version.artifacts
+ vers_artifacts = version.artifacts.all()
- if artifacts:
- artifacts.union(vers_artifacts)
- else:
- artifacts = vers_artifacts
+ artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
| Export artifact-dedup optimizations break pulp_rpm export.
**Version**
main, 18, 21, 22, 23, 28, 29
**Describe the bug**
After the changes for #4159 were released, pulp_rpm nightly tests started failing. This turns out to be the result of a subtle bug in the optimizations introduced for that fix.
This has to be fixed and released ASAP to all versions #4159 was releasxed into.
**To Reproduce**
* Run https://github.com/pulp/pulp_rpm/blob/main/pulp_rpm/tests/functional/api/test_pulpimport.py#L287
| 2023-07-31T17:31:07 |
||
pulp/pulpcore | 4,215 | pulp__pulpcore-4215 | [
"4210"
] | 619f00ac262df5ff4a948993e31f717a81187987 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
+ for artifact in pb.iter(artifacts):
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,7 +112,6 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
- pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
+ artifacts = set()
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,14 +509,11 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
- vers_artifacts = version.artifacts
+ vers_artifacts = version.artifacts.all()
- if artifacts:
- artifacts.union(vers_artifacts)
- else:
- artifacts = vers_artifacts
+ artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
| Export artifact-dedup optimizations break pulp_rpm export.
**Version**
main, 18, 21, 22, 23, 28, 29
**Describe the bug**
After the changes for #4159 were released, pulp_rpm nightly tests started failing. This turns out to be the result of a subtle bug in the optimizations introduced for that fix.
This has to be fixed and released ASAP to all versions #4159 was releasxed into.
**To Reproduce**
* Run https://github.com/pulp/pulp_rpm/blob/main/pulp_rpm/tests/functional/api/test_pulpimport.py#L287
| 2023-07-31T17:31:20 |
||
pulp/pulpcore | 4,216 | pulp__pulpcore-4216 | [
"4210"
] | 819ffd61cf45fb5f44ef71b201b3ce34379dabd6 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
+ for artifact in pb.iter(artifacts):
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,7 +112,6 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
- pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -495,7 +495,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
+ artifacts = set()
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -504,14 +504,11 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
- vers_artifacts = version.artifacts
+ vers_artifacts = version.artifacts.all()
- if artifacts:
- artifacts.union(vers_artifacts)
- else:
- artifacts = vers_artifacts
+ artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
| Export artifact-dedup optimizations break pulp_rpm export.
**Version**
main, 18, 21, 22, 23, 28, 29
**Describe the bug**
After the changes for #4159 were released, pulp_rpm nightly tests started failing. This turns out to be the result of a subtle bug in the optimizations introduced for that fix.
This has to be fixed and released ASAP to all versions #4159 was releasxed into.
**To Reproduce**
* Run https://github.com/pulp/pulp_rpm/blob/main/pulp_rpm/tests/functional/api/test_pulpimport.py#L287
| 2023-07-31T17:31:34 |
||
pulp/pulpcore | 4,217 | pulp__pulpcore-4217 | [
"4210"
] | db151b11f0140ab080e9ae2ff5af31124c1e1560 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,14 +94,14 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): QuerySet of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
"""
data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
with ProgressReport(**data) as pb:
- for artifact in artifacts.iterator(): # chunk_size= defaults to 2000 at a fetch
+ for artifact in pb.iter(artifacts):
dest = artifact.file.name
if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
with tempfile.TemporaryDirectory(dir=".") as temp_dir:
@@ -112,7 +112,6 @@ def export_artifacts(export, artifacts):
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
- pb.increment()
resource = ArtifactResource()
resource.queryset = artifacts
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -500,7 +500,7 @@ def _do_export(pulp_exporter, tar, the_export):
starting_versions = _get_starting_versions(do_incremental, pulp_exporter, the_export)
vers_match = _version_match(ending_versions, starting_versions)
# Gather up versions and artifacts
- artifacts = None # Will be a QuerySet selecting the Artifacts that need to be exported
+ artifacts = set()
for version in ending_versions:
# Check version-content to make sure we're not being asked to export
# an on_demand repo
@@ -509,14 +509,11 @@ def _do_export(pulp_exporter, tar, the_export):
raise RuntimeError(_("Remote artifacts cannot be exported."))
if do_incremental:
- vers_artifacts = version.artifacts.difference(vers_match[version].artifacts)
+ vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
- vers_artifacts = version.artifacts
+ vers_artifacts = version.artifacts.all()
- if artifacts:
- artifacts.union(vers_artifacts)
- else:
- artifacts = vers_artifacts
+ artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
| Export artifact-dedup optimizations break pulp_rpm export.
**Version**
main, 18, 21, 22, 23, 28, 29
**Describe the bug**
After the changes for #4159 were released, pulp_rpm nightly tests started failing. This turns out to be the result of a subtle bug in the optimizations introduced for that fix.
This has to be fixed and released ASAP to all versions #4159 was releasxed into.
**To Reproduce**
* Run https://github.com/pulp/pulp_rpm/blob/main/pulp_rpm/tests/functional/api/test_pulpimport.py#L287
| 2023-07-31T17:31:48 |
||
pulp/pulpcore | 4,230 | pulp__pulpcore-4230 | [
"4229"
] | 8f9a9499347ebd966d862068f30ca5b26122c91d | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -172,7 +172,7 @@ async def finalize(self):
self.validate_size()
log.debug(f"Downloaded file from {self.url}")
- def fetch(self):
+ def fetch(self, extra_data=None):
"""
Run the download synchronously and return the `DownloadResult`.
@@ -182,7 +182,7 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- result = asyncio.get_event_loop().run_until_complete(self.run())
+ result = asyncio.get_event_loop().run_until_complete(self.run(extra_data=extra_data))
return result
def _record_size_and_digests_for_data(self, data):
| Enable passing extra data to the fetch method of BaseDownloader
This will enable plugins to issue head requests if needed.
```
try:
response = downloader.fetch(
extra_data={"headers": V2_ACCEPT_HEADERS, "http_method": "head"}
)
except ClientResponseError:
raise ManifestNotFound(reference=pk)
```
| 2023-08-01T15:14:32 |
||
pulp/pulpcore | 4,232 | pulp__pulpcore-4232 | [
"4231"
] | 8f9a9499347ebd966d862068f30ca5b26122c91d | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,7 +94,7 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): List of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -512,14 +512,13 @@ def _do_export(pulp_exporter, tar, the_export):
vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
vers_artifacts = version.artifacts.all()
-
artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
# Note: we've already handled "what about incrementals" when building the 'artifacts' list
- export_artifacts(the_export, artifacts)
+ export_artifacts(the_export, list(artifacts))
# Export the repository-version data, per-version
for version in ending_versions:
export_content(the_export, version)
| Export artifact-dedup fix from #4210 incomplete
Version
main, 18, 21, 22, 23, 28, 29
Describe the bug
The change for https://github.com/pulp/pulpcore/issues/4210 failed to completely fix the problem uncovered in pulp_rpm.
`django-importexport.export()` apparently **really** wants either a QuerySet or a **list** (specifically).
Fix and backport to the above versions.
| 2023-08-01T15:42:07 |
||
pulp/pulpcore | 4,233 | pulp__pulpcore-4233 | [
"4231"
] | 35cf6ed44c147794fe47f4dc5f5b888ed146aec3 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,7 +94,7 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): List of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -512,14 +512,13 @@ def _do_export(pulp_exporter, tar, the_export):
vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
vers_artifacts = version.artifacts.all()
-
artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
# Note: we've already handled "what about incrementals" when building the 'artifacts' list
- export_artifacts(the_export, artifacts)
+ export_artifacts(the_export, list(artifacts))
# Export the repository-version data, per-version
for version in ending_versions:
export_content(the_export, version)
| Export artifact-dedup fix from #4210 incomplete
Version
main, 18, 21, 22, 23, 28, 29
Describe the bug
The change for https://github.com/pulp/pulpcore/issues/4210 failed to completely fix the problem uncovered in pulp_rpm.
`django-importexport.export()` apparently **really** wants either a QuerySet or a **list** (specifically).
Fix and backport to the above versions.
| 2023-08-01T16:31:24 |
||
pulp/pulpcore | 4,234 | pulp__pulpcore-4234 | [
"4231"
] | d5c5382a6f211cb4da32e1655c2dfd04f1202401 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -94,7 +94,7 @@ def export_artifacts(export, artifacts):
Args:
export (django.db.models.PulpExport): export instance that's doing the export
- artifacts (django.db.models.Artifacts): Set of artifacts in all repos being exported
+ artifacts (django.db.models.Artifacts): List of artifacts in all repos being exported
Raises:
ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
diff --git a/pulpcore/app/tasks/export.py b/pulpcore/app/tasks/export.py
--- a/pulpcore/app/tasks/export.py
+++ b/pulpcore/app/tasks/export.py
@@ -507,14 +507,13 @@ def _do_export(pulp_exporter, tar, the_export):
vers_artifacts = version.artifacts.difference(vers_match[version].artifacts).all()
else:
vers_artifacts = version.artifacts.all()
-
artifacts.update(vers_artifacts)
# export plugin-version-info
export_versions(the_export, plugin_version_info)
# Export the top-level entities (artifacts and repositories)
# Note: we've already handled "what about incrementals" when building the 'artifacts' list
- export_artifacts(the_export, artifacts)
+ export_artifacts(the_export, list(artifacts))
# Export the repository-version data, per-version
for version in ending_versions:
export_content(the_export, version)
| Export artifact-dedup fix from #4210 incomplete
Version
main, 18, 21, 22, 23, 28, 29
Describe the bug
The change for https://github.com/pulp/pulpcore/issues/4210 failed to completely fix the problem uncovered in pulp_rpm.
`django-importexport.export()` apparently **really** wants either a QuerySet or a **list** (specifically).
Fix and backport to the above versions.
| 2023-08-01T16:31:24 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.