repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
pulp/pulpcore | 2,232 | pulp__pulpcore-2232 | [
"2192"
] | 3debed4d7cdad4ee17b7749df877bb8b78596823 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -118,10 +118,6 @@ def _combine_content_mappings(map1, map2):
)
)
- # Export the connection between content and artifacts
- resource = ContentArtifactResource(repository_version)
- _write_export(export.tarfile, resource, dest_dir)
-
# content mapping is used by repo versions with subrepos (eg distribution tree repos)
content_mapping = {}
@@ -138,6 +134,10 @@ def _combine_content_mappings(map1, map2):
content_mapping, resource.content_mapping
)
+ # Export the connection between content and artifacts
+ resource = ContentArtifactResource(repository_version, content_mapping)
+ _write_export(export.tarfile, resource, dest_dir)
+
msg = (
f"Exporting content for {plugin_name} "
f"repository-version {repository_version.repository.name}/{repository_version.number}"
diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py
--- a/pulpcore/app/modelresource.py
+++ b/pulpcore/app/modelresource.py
@@ -66,12 +66,19 @@ class ContentArtifactResource(QueryModelResource):
ContentArtifact is different from other import-export entities because it has no 'natural key'
other than a pulp_id, which aren't shared across instances. We do some magic to link up
ContentArtifacts to their matching (already-imported) Content.
+
+ Some plugin-models have sub-repositories. We take advantage of the content-mapping
+ machinery to account for those contentartifacts as well.
"""
artifact = fields.Field(
column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
)
+ def __init__(self, repo_version=None, content_mapping=None):
+ self.content_mapping = content_mapping
+ super().__init__(repo_version)
+
def before_import_row(self, row, **kwargs):
"""
Fixes the content-ptr of an incoming content-artifact row at import time.
@@ -92,9 +99,15 @@ def before_import_row(self, row, **kwargs):
row["content"] = str(linked_content.pulp_id)
def set_up_queryset(self):
- return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(
- "content", "relative_path"
- )
+ vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)
+ if self.content_mapping:
+ all_content = []
+ for content_ids in self.content_mapping.values():
+ all_content.extend(content_ids)
+ vers_content = vers_content.union(
+ ContentArtifact.objects.filter(content__in=all_content)
+ )
+ return vers_content.order_by("content", "relative_path")
class Meta:
model = ContentArtifact
| PulpImport/Export of kickstart repos with subrepos broken
See https://bugzilla.redhat.com/show_bug.cgi?id=2040870 for details.
| The import/export engine in pulpcore needs to expose a hook to plugins to let them describe contained/dependent/sub repos. Content, Artifact, and ContentArtifact processing currently make assumptions that are incomplete in the presence of subrepos, and our testing was insufficient to uncover the problem. | 2022-02-16T16:37:04 |
|
pulp/pulpcore | 2,233 | pulp__pulpcore-2233 | [
"2192"
] | d3667988893dfed90f368efa477c4d8980793d30 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -144,10 +144,6 @@ def _combine_content_mappings(map1, map2):
)
)
- # Export the connection between content and artifacts
- resource = ContentArtifactResource(repository_version)
- _write_export(export.tarfile, resource, dest_dir)
-
# content mapping is used by repo versions with subrepos (eg distribution tree repos)
content_mapping = {}
@@ -164,6 +160,10 @@ def _combine_content_mappings(map1, map2):
content_mapping, resource.content_mapping
)
+ # Export the connection between content and artifacts
+ resource = ContentArtifactResource(repository_version, content_mapping)
+ _write_export(export.tarfile, resource, dest_dir)
+
msg = (
f"Exporting content for {plugin_name} "
f"repository-version {repository_version.repository.name}/{repository_version.number}"
diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py
--- a/pulpcore/app/modelresource.py
+++ b/pulpcore/app/modelresource.py
@@ -66,12 +66,19 @@ class ContentArtifactResource(QueryModelResource):
ContentArtifact is different from other import-export entities because it has no 'natural key'
other than a pulp_id, which aren't shared across instances. We do some magic to link up
ContentArtifacts to their matching (already-imported) Content.
+
+ Some plugin-models have sub-repositories. We take advantage of the content-mapping
+ machinery to account for those contentartifacts as well.
"""
artifact = fields.Field(
column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
)
+ def __init__(self, repo_version=None, content_mapping=None):
+ self.content_mapping = content_mapping
+ super().__init__(repo_version)
+
def before_import_row(self, row, **kwargs):
"""
Fixes the content-ptr of an incoming content-artifact row at import time.
@@ -92,9 +99,15 @@ def before_import_row(self, row, **kwargs):
row["content"] = str(linked_content.pulp_id)
def set_up_queryset(self):
- return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(
- "content", "relative_path"
- )
+ vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)
+ if self.content_mapping:
+ all_content = []
+ for content_ids in self.content_mapping.values():
+ all_content.extend(content_ids)
+ vers_content = vers_content.union(
+ ContentArtifact.objects.filter(content__in=all_content)
+ )
+ return vers_content.order_by("content", "relative_path")
class Meta:
model = ContentArtifact
| PulpImport/Export of kickstart repos with subrepos broken
See https://bugzilla.redhat.com/show_bug.cgi?id=2040870 for details.
| The import/export engine in pulpcore needs to expose a hook to plugins to let them describe contained/dependent/sub repos. Content, Artifact, and ContentArtifact processing currently make assumptions that are incomplete in the presence of subrepos, and our testing was insufficient to uncover the problem. | 2022-02-16T16:37:04 |
|
pulp/pulpcore | 2,234 | pulp__pulpcore-2234 | [
"2192"
] | b544c67e4b64ba8a6ca850836779ddb97b291226 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -143,10 +143,6 @@ def _combine_content_mappings(map1, map2):
)
)
- # Export the connection between content and artifacts
- resource = ContentArtifactResource(repository_version)
- _write_export(export.tarfile, resource, dest_dir)
-
# content mapping is used by repo versions with subrepos (eg distribution tree repos)
content_mapping = {}
@@ -163,6 +159,10 @@ def _combine_content_mappings(map1, map2):
content_mapping, resource.content_mapping
)
+ # Export the connection between content and artifacts
+ resource = ContentArtifactResource(repository_version, content_mapping)
+ _write_export(export.tarfile, resource, dest_dir)
+
msg = (
f"Exporting content for {plugin_name} "
f"repository-version {repository_version.repository.name}/{repository_version.number}"
diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py
--- a/pulpcore/app/modelresource.py
+++ b/pulpcore/app/modelresource.py
@@ -66,12 +66,19 @@ class ContentArtifactResource(QueryModelResource):
ContentArtifact is different from other import-export entities because it has no 'natural key'
other than a pulp_id, which aren't shared across instances. We do some magic to link up
ContentArtifacts to their matching (already-imported) Content.
+
+ Some plugin-models have sub-repositories. We take advantage of the content-mapping
+ machinery to account for those contentartifacts as well.
"""
artifact = fields.Field(
column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
)
+ def __init__(self, repo_version=None, content_mapping=None):
+ self.content_mapping = content_mapping
+ super().__init__(repo_version)
+
def before_import_row(self, row, **kwargs):
"""
Fixes the content-ptr of an incoming content-artifact row at import time.
@@ -92,9 +99,15 @@ def before_import_row(self, row, **kwargs):
row["content"] = str(linked_content.pulp_id)
def set_up_queryset(self):
- return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(
- "content", "relative_path"
- )
+ vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)
+ if self.content_mapping:
+ all_content = []
+ for content_ids in self.content_mapping.values():
+ all_content.extend(content_ids)
+ vers_content = vers_content.union(
+ ContentArtifact.objects.filter(content__in=all_content)
+ )
+ return vers_content.order_by("content", "relative_path")
class Meta:
model = ContentArtifact
| PulpImport/Export of kickstart repos with subrepos broken
See https://bugzilla.redhat.com/show_bug.cgi?id=2040870 for details.
| The import/export engine in pulpcore needs to expose a hook to plugins to let them describe contained/dependent/sub repos. Content, Artifact, and ContentArtifact processing currently make assumptions that are incomplete in the presence of subrepos, and our testing was insufficient to uncover the problem. | 2022-02-16T16:37:04 |
|
pulp/pulpcore | 2,238 | pulp__pulpcore-2238 | [
"2229"
] | d3667988893dfed90f368efa477c4d8980793d30 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -14,12 +14,11 @@
from functools import lru_cache
from itertools import chain
-from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, connection, models, transaction
+from django.db import IntegrityError, models, transaction
from django.forms.models import model_to_dict
from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
@@ -112,37 +111,9 @@ def touch(self):
that updating that row's timestamp-of-interest is the responsibility of whoever currently
owns it, this results in correct data, while closing the window on deadlocks.
"""
- # Build the list of ids we need to work on, since we're going to be building a
- # SQL-query "by hand" in a moment.
- pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
- if not pulp_ids:
- return None
- ids_str = ",".join(pulp_ids)
- # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
- # If we are an instance-of Content or its subclasses, we want to update the Content table.
- # Otherwise, use the table associated w/ the query.
- db_table = (
- Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
- )
- cursor = connection.cursor()
with transaction.atomic():
- # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
- # require code calling touch() on a Model whose table-name-str was carefully chosen to
- # be Bad - but, good habits...
- stmt = sql.SQL(
- "UPDATE {table_name} "
- " SET timestamp_of_interest = NOW() "
- " WHERE pulp_id IN ("
- " SELECT pulp_id "
- " FROM {table_name} "
- " WHERE pulp_id in ({ids}) "
- " ORDER BY pulp_id "
- " FOR UPDATE "
- " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
- )
- rslt = cursor.execute(stmt)
- cursor.close()
- return rslt
+ sub_q = self.order_by("pk").select_for_update(skip_locked=True)
+ return self.filter(pk__in=sub_q).update(timestamp_of_interest=now())
class QueryMixin:
| Update touch() to not have to use raw sql (!!)
**Version**
main
**Describe the bug**
In response to "why doesn't select_for_update() do what we expect", this response from Django team:
https://code.djangoproject.com/ticket/33516#comment:1
showed The Way to invoke it successfully.
Let's get rid of raw-sql-execution in the touch() method.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
https://github.com/pulp/pulpcore/issues/2157
| 2022-02-16T17:03:56 |
||
pulp/pulpcore | 2,239 | pulp__pulpcore-2239 | [
"2229"
] | 3debed4d7cdad4ee17b7749df877bb8b78596823 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -12,12 +12,11 @@
import gnupg
from itertools import chain
-from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, connection, models, transaction
+from django.db import IntegrityError, models, transaction
from django.forms.models import model_to_dict
from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
@@ -109,37 +108,9 @@ def touch(self):
that updating that row's timestamp-of-interest is the responsibility of whoever currently
owns it, this results in correct data, while closing the window on deadlocks.
"""
- # Build the list of ids we need to work on, since we're going to be building a
- # SQL-query "by hand" in a moment.
- pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
- if not pulp_ids:
- return None
- ids_str = ",".join(pulp_ids)
- # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
- # If we are an instance-of Content or its subclasses, we want to update the Content table.
- # Otherwise, use the table associated w/ the query.
- db_table = (
- Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
- )
- cursor = connection.cursor()
with transaction.atomic():
- # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
- # require code calling touch() on a Model whose table-name-str was carefully chosen to
- # be Bad - but, good habits...
- stmt = sql.SQL(
- "UPDATE {table_name} "
- " SET timestamp_of_interest = NOW() "
- " WHERE pulp_id IN ("
- " SELECT pulp_id "
- " FROM {table_name} "
- " WHERE pulp_id in ({ids}) "
- " ORDER BY pulp_id "
- " FOR UPDATE "
- " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
- )
- rslt = cursor.execute(stmt)
- cursor.close()
- return rslt
+ sub_q = self.order_by("pk").select_for_update(skip_locked=True)
+ return self.filter(pk__in=sub_q).update(timestamp_of_interest=now())
class QueryMixin:
| Update touch() to not have to use raw sql (!!)
**Version**
main
**Describe the bug**
In response to "why doesn't select_for_update() do what we expect", this response from Django team:
https://code.djangoproject.com/ticket/33516#comment:1
showed The Way to invoke it successfully.
Let's get rid of raw-sql-execution in the touch() method.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
https://github.com/pulp/pulpcore/issues/2157
| 2022-02-16T17:04:12 |
||
pulp/pulpcore | 2,240 | pulp__pulpcore-2240 | [
"2229"
] | b544c67e4b64ba8a6ca850836779ddb97b291226 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -13,12 +13,11 @@
from functools import lru_cache
from itertools import chain
-from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, connection, models, transaction
+from django.db import IntegrityError, models, transaction
from django.forms.models import model_to_dict
from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
@@ -111,37 +110,9 @@ def touch(self):
that updating that row's timestamp-of-interest is the responsibility of whoever currently
owns it, this results in correct data, while closing the window on deadlocks.
"""
- # Build the list of ids we need to work on, since we're going to be building a
- # SQL-query "by hand" in a moment.
- pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
- if not pulp_ids:
- return None
- ids_str = ",".join(pulp_ids)
- # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
- # If we are an instance-of Content or its subclasses, we want to update the Content table.
- # Otherwise, use the table associated w/ the query.
- db_table = (
- Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
- )
- cursor = connection.cursor()
with transaction.atomic():
- # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
- # require code calling touch() on a Model whose table-name-str was carefully chosen to
- # be Bad - but, good habits...
- stmt = sql.SQL(
- "UPDATE {table_name} "
- " SET timestamp_of_interest = NOW() "
- " WHERE pulp_id IN ("
- " SELECT pulp_id "
- " FROM {table_name} "
- " WHERE pulp_id in ({ids}) "
- " ORDER BY pulp_id "
- " FOR UPDATE "
- " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
- )
- rslt = cursor.execute(stmt)
- cursor.close()
- return rslt
+ sub_q = self.order_by("pk").select_for_update(skip_locked=True)
+ return self.filter(pk__in=sub_q).update(timestamp_of_interest=now())
class QueryMixin:
| Update touch() to not have to use raw sql (!!)
**Version**
main
**Describe the bug**
In response to "why doesn't select_for_update() do what we expect", this response from Django team:
https://code.djangoproject.com/ticket/33516#comment:1
showed The Way to invoke it successfully.
Let's get rid of raw-sql-execution in the touch() method.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
https://github.com/pulp/pulpcore/issues/2157
| 2022-02-16T17:04:27 |
||
pulp/pulpcore | 2,242 | pulp__pulpcore-2242 | [
"2229"
] | 04099f19f304185ca8d60c8b71befe01d8904cfe | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -11,13 +11,13 @@
import gnupg
from itertools import chain
-from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, connection, models, transaction
+from django.db import IntegrityError, models, transaction
from django.forms.models import model_to_dict
+from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
@@ -108,37 +108,9 @@ def touch(self):
that updating that row's timestamp-of-interest is the responsibility of whoever currently
owns it, this results in correct data, while closing the window on deadlocks.
"""
- # Build the list of ids we need to work on, since we're going to be building a
- # SQL-query "by hand" in a moment.
- pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
- if not pulp_ids:
- return None
- ids_str = ",".join(pulp_ids)
- # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
- # If we are an instance-of Content or its subclasses, we want to update the Content table.
- # Otherwise, use the table associated w/ the query.
- db_table = (
- Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
- )
- cursor = connection.cursor()
with transaction.atomic():
- # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
- # require code calling touch() on a Model whose table-name-str was carefully chosen to
- # be Bad - but, good habits...
- stmt = sql.SQL(
- "UPDATE {table_name} "
- " SET timestamp_of_interest = NOW() "
- " WHERE pulp_id IN ("
- " SELECT pulp_id "
- " FROM {table_name} "
- " WHERE pulp_id in ({ids}) "
- " ORDER BY pulp_id "
- " FOR UPDATE "
- " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
- )
- rslt = cursor.execute(stmt)
- cursor.close()
- return rslt
+ sub_q = self.order_by("pk").select_for_update(skip_locked=True)
+ return self.filter(pk__in=sub_q).update(timestamp_of_interest=now())
class QueryMixin:
| Update touch() to not have to use raw sql (!!)
**Version**
main
**Describe the bug**
In response to "why doesn't select_for_update() do what we expect", this response from Django team:
https://code.djangoproject.com/ticket/33516#comment:1
showed The Way to invoke it successfully.
Let's get rid of raw-sql-execution in the touch() method.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
https://github.com/pulp/pulpcore/issues/2157
| 2022-02-16T20:54:15 |
||
pulp/pulpcore | 2,245 | pulp__pulpcore-2245 | [
"2247"
] | b343e68a7a4ff35786da041ebbd2695f33ca185c | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -373,7 +373,7 @@ def validate_and_assemble(toc_filename):
)
CreatedResource.objects.create(content_object=the_import)
- with tempfile.TemporaryDirectory() as temp_dir:
+ with tempfile.TemporaryDirectory(dir=".") as temp_dir:
with tarfile.open(path, "r:gz") as tar:
tar.extractall(path=temp_dir)
| PulpImporter assumes tempfiles can always go to /tmp
This issue is a copy of https://pulp.plan.io/issues/8610 , to allow us to backport the fix from core/3.17 into 14/15/16 correctly.
**Version**
core/3.14+
**Describe the bug**
importer.pulp_import uses tempfile.TemporaryDirectory() in places like this:
https://github.com/pulp/pulpcore/blob/master/pulpcore/app/tasks/importer.py#L118
If your /tmp is small, and your export is Large, this can cause Bad Things to happen.
We should perhas set dir= to the workers work-directory?
| 2022-02-21T20:14:00 |
||
pulp/pulpcore | 2,246 | pulp__pulpcore-2246 | [
"2247"
] | 7ffa98926b532f44603f4c995a0fa0c24c205b7c | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -377,7 +377,7 @@ def validate_and_assemble(toc_filename):
current_task.refresh_from_db()
CreatedResource.objects.create(content_object=task_group)
- with tempfile.TemporaryDirectory() as temp_dir:
+ with tempfile.TemporaryDirectory(dir=".") as temp_dir:
with tarfile.open(path, "r:gz") as tar:
tar.extractall(path=temp_dir)
| PulpImporter assumes tempfiles can always go to /tmp
This issue is a copy of https://pulp.plan.io/issues/8610 , to allow us to backport the fix from core/3.17 into 14/15/16 correctly.
**Version**
core/3.14+
**Describe the bug**
importer.pulp_import uses tempfile.TemporaryDirectory() in places like this:
https://github.com/pulp/pulpcore/blob/master/pulpcore/app/tasks/importer.py#L118
If your /tmp is small, and your export is Large, this can cause Bad Things to happen.
We should perhas set dir= to the workers work-directory?
| 2022-02-21T20:21:42 |
||
pulp/pulpcore | 2,248 | pulp__pulpcore-2248 | [
"2247"
] | 211bd047caff90e887203ebaddf6da7276f23be7 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -378,7 +378,7 @@ def validate_and_assemble(toc_filename):
current_task.refresh_from_db()
CreatedResource.objects.create(content_object=task_group)
- with tempfile.TemporaryDirectory() as temp_dir:
+ with tempfile.TemporaryDirectory(dir=".") as temp_dir:
with tarfile.open(path, "r:gz") as tar:
tar.extractall(path=temp_dir)
| PulpImporter assumes tempfiles can always go to /tmp
This issue is a copy of https://pulp.plan.io/issues/8610 , to allow us to backport the fix from core/3.17 into 14/15/16 correctly.
**Version**
core/3.14+
**Describe the bug**
importer.pulp_import uses tempfile.TemporaryDirectory() in places like this:
https://github.com/pulp/pulpcore/blob/master/pulpcore/app/tasks/importer.py#L118
If your /tmp is small, and your export is Large, this can cause Bad Things to happen.
We should perhas set dir= to the workers work-directory?
| 2022-02-22T15:28:59 |
||
pulp/pulpcore | 2,261 | pulp__pulpcore-2261 | [
"2267"
] | d2f13d1322bb05a07f9cb206d634af2fc7647b65 | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -4,6 +4,7 @@
from contextlib import suppress
from gettext import gettext as _
from os import path
+from collections import defaultdict
import logging
import django
@@ -884,9 +885,11 @@ def _squash(self, repo_relations, next_version):
# delete any relationships added in the version being deleted and removed in the next one.
repo_relations.filter(version_added=self, version_removed=next_version).delete()
- # If the same content is deleted in version, but added back in next_version
- # set version_removed field in relation to None, and remove relation adding the content
- # in next_version
+ # If the same content is deleted in version, but added back in next_version then:
+ # - set version_removed field in relation to version_removed of the relation adding
+ # the content in next version because the content can be removed again after the
+ # next_version
+ # - and remove relation adding the content in next_version
content_added = repo_relations.filter(version_added=next_version).values_list("content_id")
# use list() to force the evaluation of the queryset, otherwise queryset is affected
@@ -897,13 +900,26 @@ def _squash(self, repo_relations, next_version):
)
)
- repo_relations.filter(
- version_removed=self, content_id__in=content_removed_and_readded
- ).update(version_removed=None)
-
- repo_relations.filter(
+ repo_contents_readded_in_next_version = repo_relations.filter(
version_added=next_version, content_id__in=content_removed_and_readded
- ).delete()
+ )
+
+ # Since the readded contents can be removed again by any subsequent version after the
+ # next version. Get the mapping of readded contents and their versions removed to use
+ # later. The version removed id will be None if a content is not removed.
+ version_removed_id_content_id_map = defaultdict(list)
+ for readded_repo_content in repo_contents_readded_in_next_version.iterator():
+ version_removed_id_content_id_map[readded_repo_content.version_removed_id].append(
+ readded_repo_content.content_id
+ )
+
+ repo_contents_readded_in_next_version.delete()
+
+ # Update the version removed of the readded contents
+ for version_removed_id, content_ids in version_removed_id_content_id_map.items():
+ repo_relations.filter(version_removed=self, content_id__in=content_ids).update(
+ version_removed_id=version_removed_id
+ )
# "squash" by moving other additions and removals forward to the next version
repo_relations.filter(version_added=self).update(version_added=next_version)
| diff --git a/pulpcore/tests/conftest_pulp_file.py b/pulpcore/tests/conftest_pulp_file.py
--- a/pulpcore/tests/conftest_pulp_file.py
+++ b/pulpcore/tests/conftest_pulp_file.py
@@ -8,6 +8,7 @@
from pulpcore.client.pulp_file import (
ContentFilesApi,
RepositoriesFileApi,
+ RepositoriesFileVersionsApi,
RemotesFileApi,
)
from pulp_smash.pulp3.utils import gen_repo
@@ -35,6 +36,11 @@ def file_repo_api_client(file_client):
return RepositoriesFileApi(file_client)
[email protected](scope="session")
+def file_repo_version_api_client(file_client):
+ return RepositoriesFileVersionsApi(file_client)
+
+
@pytest.fixture
def file_repo(file_repo_api_client, gen_object_with_cleanup):
return gen_object_with_cleanup(file_repo_api_client, gen_repo())
diff --git a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
@@ -1,8 +1,11 @@
"""Tests related to repository versions."""
import unittest
+import pytest
from random import choice, randint, sample
from time import sleep
from urllib.parse import urlsplit
+from tempfile import NamedTemporaryFile
+from hashlib import sha256
from pulp_smash import api, config, utils
from pulp_smash.exceptions import TaskReportError
@@ -396,6 +399,134 @@ def test_delete_publication(self):
self.client.get(publication["pulp_href"])
[email protected]
+def test_squash_repo_version(
+ file_repo_api_client, file_repo_version_api_client, content_file_api_client, file_repo
+):
+ """Test that the deletion of a repository version properly squashes the content.
+
+ - Setup versions like:
+ Version 0: <empty>
+ add: ABCDE
+ Version 1: ABCDE
+ delete: BCDE; add: FGHI
+ Version 2: AFGHI -- to be deleted
+ delete: GI; add: CD
+ Version 3: ACDFH -- to be squashed into
+ delete: DH; add: EI
+ Version 4: ACEFI
+ - Delete version 2.
+ - Check the content of all remaining versions.
+ """
+ content_units = {}
+ for name in ["A", "B", "C", "D", "E", "F", "G", "H", "I"]:
+ try:
+ content_units[name] = content_file_api_client.list(
+ relative_path=name, sha256=sha256(name.encode()).hexdigest()
+ ).results[0]
+ except IndexError:
+ with NamedTemporaryFile() as tf:
+ tf.write(name.encode())
+ tf.flush()
+ response = content_file_api_client.create(relative_path=name, file=tf.name)
+ result = monitor_task(response.task)
+ content_units[name] = content_file_api_client.read(result.created_resources[0])
+ response1 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["A", "B", "C", "D", "E"]
+ ]
+ },
+ )
+
+ response2 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["B", "C", "D", "E"]
+ ],
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["F", "G", "H", "I"]
+ ],
+ },
+ )
+
+ response3 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["G", "I"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["C", "D"]
+ ],
+ },
+ )
+
+ response4 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["D", "H"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["E", "I"]
+ ],
+ },
+ )
+ version1 = file_repo_version_api_client.read(monitor_task(response1.task).created_resources[0])
+ version2 = file_repo_version_api_client.read(monitor_task(response2.task).created_resources[0])
+ version3 = file_repo_version_api_client.read(monitor_task(response3.task).created_resources[0])
+ version4 = file_repo_version_api_client.read(monitor_task(response4.task).created_resources[0])
+
+ # Check version state before deletion
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version2.content_summary.added["file.file"]["count"] == 4
+ assert version2.content_summary.removed["file.file"]["count"] == 4
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content2 = content_file_api_client.list(repository_version=version2.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content2.results)) == {"A", "F", "G", "H", "I"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+ monitor_task(file_repo_version_api_client.delete(version2.pulp_href).task)
+
+ # Check version state after deletion (Version 2 is gone...)
+ version1 = file_repo_version_api_client.read(version1.pulp_href)
+ version3 = file_repo_version_api_client.read(version3.pulp_href)
+ version4 = file_repo_version_api_client.read(version4.pulp_href)
+
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+
class ContentImmutableRepoVersionTestCase(unittest.TestCase):
"""Test whether the content present in a repo version is immutable.
| Deleting repository versions can loose track of later content deletion
**Version**
pulpcore 3.18
**Describe the bug**
When deleting a repository version that deletes a content that is added back in the subsequent version, but deleted again in a later version that deletion in the later version is lost.
**To Reproduce**
Steps to reproduce the behavior:
```bash
#!/bin/bash
set -eu
pulp file repository destroy --name test_delete_versions || true
pulp file repository create --name test_delete_versions
for NAME in "aaaa" "bbbb" "cccc" "dddd" "eeee" "ffff" "gggg" "hhhh" "jjjj"
do
echo "$NAME" > "$NAME"
pulp file content upload --relative-path "$NAME" --file "$NAME" || true
declare $NAME='{"sha256": "'"$(sha256sum --binary $NAME | cut -d" " -f1)"'", "relative_path": "'"$NAME"'"}'
done
pulp file repository content modify --repository test_delete_versions --add-content '['"$aaaa"', '"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']' --add-content '['"$ffff"', '"$gggg"', '"$hhhh"', '"$jjjj"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$gggg"', '"$jjjj"']' --add-content '['"$cccc"', '"$dddd"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$dddd"', '"$hhhh"']' --add-content '['"$eeee"', '"$jjjj"']'
pulp file repository version list --repository test_delete_versions
# pulp file repository content list --repository test_delete_versions
pulp file repository version destroy --repository test_delete_versions --version 2
pulp file repository version list --repository test_delete_versions
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 1 | jq -r '.[].relative_path' | sort)" = $'aaaa\nbbbb\ncccc\ndddd\neeee' ]
then
echo Version 1 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 3 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\ndddd\nffff\nhhhh' ]
then
echo Version 3 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 4 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\neeee\nffff\njjjj' ]
then
echo Version 4 is wrong.
fi
```
**Expected behavior**
Content in the respository versions should not change.
**Additional context**
| 2022-02-24T08:17:36 |
|
pulp/pulpcore | 2,263 | pulp__pulpcore-2263 | [
"2262"
] | 0fd71da6666d07c7fa5c153ba427bfc90f6da96f | diff --git a/pulpcore/plugin/authentication/__init__.py b/pulpcore/plugin/authentication/__init__.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/plugin/authentication/__init__.py
@@ -0,0 +1 @@
+from pulpcore.app.authentication import PulpRemoteUserAuthentication # noqa
| Expose PulpRemoteUserAuthentication to plugin writers
This will allow plugin writers to take a rest_framework remote authentication class into account when building protected endpoints.
| 2022-02-24T12:50:34 |
||
pulp/pulpcore | 2,264 | pulp__pulpcore-2264 | [
"2268"
] | a1788e9d5248ebb5e68aa3bc3f28d3db91d1d421 | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -11,8 +11,10 @@
from dynaconf import settings
from django.core.validators import MinValueValidator
from django.db import models, transaction
+from django.db.models import Q
from django.urls import reverse
from django_lifecycle import AFTER_UPDATE, BEFORE_DELETE, hook
+from rest_framework.exceptions import APIException
from pulpcore.app.util import batch_qs, get_view_name_for_model
from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
@@ -67,7 +69,7 @@ class Meta:
verbose_name_plural = "repositories"
def on_new_version(self, version):
- """Called when new repository versions are created.
+ """Called after a new repository version has been created.
Subclasses are expected to override this to do useful things.
@@ -219,10 +221,25 @@ def artifacts_for_version(version):
return Artifact.objects.filter(content__pk__in=version.content)
@hook(AFTER_UPDATE, when="retain_repo_versions", has_changed=True)
+ def _cleanup_old_versions_hook(self):
+ # Do not attempt to clean up anything, while there is a transaction involving repo versions
+ # still in flight.
+ transaction.on_commit(self.cleanup_old_versions)
+
def cleanup_old_versions(self):
"""Cleanup old repository versions based on retain_repo_versions."""
+ # I am still curious how, but it was reported that this state can happen in day to day
+ # operations but its easy to reproduce manually in the pulpcore shell:
+ # https://github.com/pulp/pulpcore/issues/2268
+ if self.versions.filter(complete=False).exists():
+ raise RuntimeError(
+ _("Attempt to cleanup old versions, while a new version is in flight.")
+ )
if self.retain_repo_versions:
- for version in self.versions.order_by("-number")[self.retain_repo_versions :]:
+ # Consider only completed versions for cleanup
+ for version in self.versions.complete().order_by("-number")[
+ self.retain_repo_versions :
+ ]:
_logger.info(
_("Deleting repository version {} due to version retention limit.").format(
version
@@ -527,7 +544,7 @@ class RepositoryVersionQuerySet(models.QuerySet):
"""A queryset that provides repository version filtering methods."""
def complete(self):
- return self.exclude(complete=False)
+ return self.filter(complete=True)
def with_content(self, content):
"""
@@ -923,22 +940,34 @@ def delete(self, **kwargs):
Deletion of a complete RepositoryVersion should be done in a RQ Job.
"""
if self.complete:
+ if self.repository.versions.complete().count() <= 1:
+ raise APIException(_("Attempt to delete the last remaining version."))
if settings.CACHE_ENABLED:
base_paths = self.distribution_set.values_list("base_path", flat=True)
if base_paths:
Cache().delete(base_key=base_paths)
- repo_relations = RepositoryContent.objects.filter(repository=self.repository)
- try:
- next_version = self.next()
- self._squash(repo_relations, next_version)
-
- except RepositoryVersion.DoesNotExist:
- # version is the latest version so simply update repo contents
- # and delete the version
- repo_relations.filter(version_added=self).delete()
- repo_relations.filter(version_removed=self).update(version_removed=None)
- super().delete(**kwargs)
+ # Handle the manipulation of the repository version content and its final deletion in
+ # the same transaction.
+ with transaction.atomic():
+ repo_relations = RepositoryContent.objects.filter(
+ repository=self.repository
+ ).select_for_update()
+ try:
+ next_version = self.next()
+ self._squash(repo_relations, next_version)
+
+ except RepositoryVersion.DoesNotExist:
+ # version is the latest version so simply update repo contents
+ # and delete the version
+ repo_relations.filter(version_added=self).delete()
+ repo_relations.filter(version_removed=self).update(version_removed=None)
+
+ if repo_relations.filter(Q(version_added=self) | Q(version_removed=self)).exists():
+ raise RuntimeError(
+ _("Some repo relations of this version were not translated.")
+ )
+ super().delete(**kwargs)
else:
with transaction.atomic():
@@ -983,6 +1012,10 @@ def __enter__(self):
Returns:
RepositoryVersion: self
"""
+ if self.complete:
+ raise RuntimeError(
+ _("This Repository version is complete. It cannot be modified further.")
+ )
repository = self.repository.cast()
repository.initialize_new_version(self)
return self
@@ -1011,15 +1044,16 @@ def __exit__(self, exc_type, exc_value, traceback):
unsupported_types = content_types_seen - content_types_supported
if unsupported_types:
raise ValueError(
- "Saw unsupported content types {}".format(unsupported_types)
+ _("Saw unsupported content types {}").format(unsupported_types)
)
self.complete = True
self.repository.next_version = self.number + 1
- self.repository.save()
- self.save()
+ with transaction.atomic():
+ self.repository.save()
+ self.save()
+ self._compute_counts()
self.repository.cleanup_old_versions()
- self._compute_counts()
repository.on_new_version(self)
except Exception:
self.delete()
| Repository new_version results in zero content in repo
**Version**
pulpcore==3.17.3, pulp_ansible==0.11.1 installed as part of galaxy_ng in openshift hosted cloud env
**Describe the bug**
For a Repository that had content, code ran to add content, and the resulting new RepositoryVersion had zero content.
**To Reproduce**
Steps to reproduce the behavior:
Run this code outside of a task (understood this should always be run in task via `dispatch()`)
```
with repo.new_version(base_version=repo.latest_version()) as dest_version:
dest_version.add_content(version_qs)
```
on a quiet system where no other code is interacting with the repo. When this is run with `repo.retain_repo_versions` = 1, it often results in `psycopg2.errors.ForeignKeyViolation: insert or update on table "core_repositoryversion" violates foreign key constraint "core_repositoryversi_base_version_id_5230045f_fk_core_repo"` and then no repository version and therefore no content in repository. In attempting to reproduce I have not see this problem when `repo.retain_repo_versions` > 1.
**Expected behavior**
Expect RepositoryVersion to have previous Content plus newly added Content.
| 2022-02-24T15:56:48 |
||
pulp/pulpcore | 2,271 | pulp__pulpcore-2271 | [
"2244"
] | 38fa0241f79fd526607da8c6d321e5ce5ef13574 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -330,7 +330,7 @@ def init_and_validate(file, expected_digests=None, expected_size=None):
if expected_size:
if size != expected_size:
- raise SizeValidationError()
+ raise SizeValidationError(size, expected_size)
if expected_digests:
for algorithm, expected_digest in expected_digests.items():
@@ -340,8 +340,9 @@ def init_and_validate(file, expected_digests=None, expected_size=None):
algorithm
)
)
- if expected_digest != hashers[algorithm].hexdigest():
- raise DigestValidationError()
+ actual_digest = hashers[algorithm].hexdigest()
+ if expected_digest != actual_digest:
+ raise DigestValidationError(actual_digest, expected_digest)
attributes = {"size": size, "file": file}
for algorithm in Artifact.DIGEST_FIELDS:
@@ -443,7 +444,7 @@ def init_and_validate(file, expected_digests=None, expected_size=None):
if expected_size:
if size != expected_size:
- raise SizeValidationError()
+ raise SizeValidationError(size, expected_size)
if expected_digests:
for algorithm, expected_digest in expected_digests.items():
@@ -453,8 +454,9 @@ def init_and_validate(file, expected_digests=None, expected_size=None):
algorithm
)
)
- if expected_digest != hashers[algorithm].hexdigest():
- raise DigestValidationError()
+ actual_digest = hashers[algorithm].hexdigest()
+ if expected_digest != actual_digest:
+ raise DigestValidationError(actual_digest, expected_digest)
return PulpTemporaryFile(file=file)
diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -219,8 +219,9 @@ def validate_digests(self):
"""
if self.expected_digests:
for algorithm, expected_digest in self.expected_digests.items():
- if expected_digest != self._digests[algorithm].hexdigest():
- raise DigestValidationError(self.url)
+ actual_digest = self._digests[algorithm].hexdigest()
+ if actual_digest != expected_digest:
+ raise DigestValidationError(actual_digest, expected_digest, url=self.url)
def validate_size(self):
"""
@@ -232,8 +233,10 @@ def validate_size(self):
:meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.
"""
if self.expected_size:
- if self._size != self.expected_size:
- raise SizeValidationError(self.url)
+ actual_size = self._size
+ expected_size = self.expected_size
+ if actual_size != expected_size:
+ raise SizeValidationError(actual_size, expected_size, url=self.url)
async def run(self, extra_data=None):
"""
diff --git a/pulpcore/exceptions/validation.py b/pulpcore/exceptions/validation.py
--- a/pulpcore/exceptions/validation.py
+++ b/pulpcore/exceptions/validation.py
@@ -16,17 +16,24 @@ class DigestValidationError(ValidationError):
Raised when a file fails to validate a digest checksum.
"""
- def __init__(self, *args):
+ def __init__(self, actual, expected, *args, url=None, **kwargs):
super().__init__("PLP0003")
- if args:
- self.url = args[0]
+ self.url = url
+ self.actual = actual
+ self.expected = expected
def __str__(self):
- if hasattr(self, "url"):
- return _("A file located at the url {} failed validation due to checksum.").format(
- self.url
+ if self.url:
+ msg = _(
+ "A file located at the url {url} failed validation due to checksum. "
+ "Expected '{expected}', Actual '{actual}'"
)
- return _("A file failed validation due to checksum.")
+ return msg.format(url=self.url, expected=self.expected, actual=self.actual)
+ else:
+ msg = _(
+ "A file failed validation due to checksum. Expected '{expected}', Actual '{actual}'"
+ )
+ return msg.format(expected=self.expected, actual=self.actual)
class SizeValidationError(ValidationError):
@@ -34,15 +41,24 @@ class SizeValidationError(ValidationError):
Raised when a file fails to validate a size checksum.
"""
- def __init__(self, *args):
+ def __init__(self, actual, expected, *args, url=None, **kwargs):
super().__init__("PLP0004")
- if args:
- self.url = args[0]
+ self.url = url
+ self.actual = actual
+ self.expected = expected
def __str__(self):
- if hasattr(self, "url"):
- return _("A file located at the url {} failed validation due to size.").format(self.url)
- return _("A file failed validation due to size.")
+ if self.url:
+ msg = _(
+ "A file located at the url {url} failed validation due to size. "
+ "Expected '{expected}', Actual '{actual}'"
+ )
+ return msg.format(url=self.url, expected=self.expected, actual=self.actual)
+ else:
+ msg = _(
+ "A file failed validation due to size. Expected '{expected}', Actual '{actual}'"
+ )
+ return msg.format(expected=self.expected, actual=self.actual)
class MissingDigestValidationError(Exception):
| When an error occurs during content sync, due to hash conflicts, report conflicting hashes
**Is your feature request related to a problem? Please describe.**
Recently had an issue relating to synchronising content, due to a conflict between an expected hash, and an actual hash of the data recieved.
Currently, we print out the URL of the content being grabbed, but no information relating to the conflicting hashses.
**Describe the solution you'd like**
I would like the error message to be extended to include both the expected hash as well as the hash of the retrieved data.
**Describe alternatives you've considered**
Spending a lot of time running around in a circle
**Additional context**
After making some code changes to achieve the above, it made tracking down https://github.com/pulp/pulp_rpm/issues/2407 much much much easier.
| Moving to pulpcore to be implemented in the core downloader error-reporting | 2022-03-01T04:48:04 |
|
pulp/pulpcore | 2,272 | pulp__pulpcore-2272 | [
"2268"
] | cb3405436dba1968c806b3f3f4048f8f74760e54 | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -11,8 +11,10 @@
from dynaconf import settings
from django.core.validators import MinValueValidator
from django.db import models, transaction
+from django.db.models import Q
from django.urls import reverse
from django_lifecycle import AFTER_UPDATE, BEFORE_DELETE, hook
+from rest_framework.exceptions import APIException
from pulpcore.app.util import batch_qs, get_view_name_for_model
from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
@@ -67,7 +69,7 @@ class Meta:
verbose_name_plural = "repositories"
def on_new_version(self, version):
- """Called when new repository versions are created.
+ """Called after a new repository version has been created.
Subclasses are expected to override this to do useful things.
@@ -219,10 +221,25 @@ def artifacts_for_version(version):
return Artifact.objects.filter(content__pk__in=version.content)
@hook(AFTER_UPDATE, when="retain_repo_versions", has_changed=True)
+ def _cleanup_old_versions_hook(self):
+ # Do not attempt to clean up anything, while there is a transaction involving repo versions
+ # still in flight.
+ transaction.on_commit(self.cleanup_old_versions)
+
def cleanup_old_versions(self):
"""Cleanup old repository versions based on retain_repo_versions."""
+ # I am still curious how, but it was reported that this state can happen in day to day
+ # operations but its easy to reproduce manually in the pulpcore shell:
+ # https://github.com/pulp/pulpcore/issues/2268
+ if self.versions.filter(complete=False).exists():
+ raise RuntimeError(
+ _("Attempt to cleanup old versions, while a new version is in flight.")
+ )
if self.retain_repo_versions:
- for version in self.versions.order_by("-number")[self.retain_repo_versions :]:
+ # Consider only completed versions for cleanup
+ for version in self.versions.complete().order_by("-number")[
+ self.retain_repo_versions :
+ ]:
_logger.info(
_("Deleting repository version {} due to version retention limit.").format(
version
@@ -527,7 +544,7 @@ class RepositoryVersionQuerySet(models.QuerySet):
"""A queryset that provides repository version filtering methods."""
def complete(self):
- return self.exclude(complete=False)
+ return self.filter(complete=True)
def with_content(self, content):
"""
@@ -923,22 +940,34 @@ def delete(self, **kwargs):
Deletion of a complete RepositoryVersion should be done in a RQ Job.
"""
if self.complete:
+ if self.repository.versions.complete().count() <= 1:
+ raise APIException(_("Attempt to delete the last remaining version."))
if settings.CACHE_ENABLED:
base_paths = self.distribution_set.values_list("base_path", flat=True)
if base_paths:
Cache().delete(base_key=base_paths)
- repo_relations = RepositoryContent.objects.filter(repository=self.repository)
- try:
- next_version = self.next()
- self._squash(repo_relations, next_version)
-
- except RepositoryVersion.DoesNotExist:
- # version is the latest version so simply update repo contents
- # and delete the version
- repo_relations.filter(version_added=self).delete()
- repo_relations.filter(version_removed=self).update(version_removed=None)
- super().delete(**kwargs)
+ # Handle the manipulation of the repository version content and its final deletion in
+ # the same transaction.
+ with transaction.atomic():
+ repo_relations = RepositoryContent.objects.filter(
+ repository=self.repository
+ ).select_for_update()
+ try:
+ next_version = self.next()
+ self._squash(repo_relations, next_version)
+
+ except RepositoryVersion.DoesNotExist:
+ # version is the latest version so simply update repo contents
+ # and delete the version
+ repo_relations.filter(version_added=self).delete()
+ repo_relations.filter(version_removed=self).update(version_removed=None)
+
+ if repo_relations.filter(Q(version_added=self) | Q(version_removed=self)).exists():
+ raise RuntimeError(
+ _("Some repo relations of this version were not translated.")
+ )
+ super().delete(**kwargs)
else:
with transaction.atomic():
@@ -983,6 +1012,10 @@ def __enter__(self):
Returns:
RepositoryVersion: self
"""
+ if self.complete:
+ raise RuntimeError(
+ _("This Repository version is complete. It cannot be modified further.")
+ )
repository = self.repository.cast()
repository.initialize_new_version(self)
return self
@@ -1011,15 +1044,16 @@ def __exit__(self, exc_type, exc_value, traceback):
unsupported_types = content_types_seen - content_types_supported
if unsupported_types:
raise ValueError(
- "Saw unsupported content types {}".format(unsupported_types)
+ _("Saw unsupported content types {}").format(unsupported_types)
)
self.complete = True
self.repository.next_version = self.number + 1
- self.repository.save()
- self.save()
+ with transaction.atomic():
+ self.repository.save()
+ self.save()
+ self._compute_counts()
self.repository.cleanup_old_versions()
- self._compute_counts()
repository.on_new_version(self)
except Exception:
self.delete()
| Repository new_version results in zero content in repo
**Version**
pulpcore==3.17.3, pulp_ansible==0.11.1 installed as part of galaxy_ng in openshift hosted cloud env
**Describe the bug**
For a Repository that had content, code ran to add content, and the resulting new RepositoryVersion had zero content.
**To Reproduce**
Steps to reproduce the behavior:
Run this code outside of a task (understood this should always be run in task via `dispatch()`)
```
with repo.new_version(base_version=repo.latest_version()) as dest_version:
dest_version.add_content(version_qs)
```
on a quiet system where no other code is interacting with the repo. When this is run with `repo.retain_repo_versions` = 1, it often results in `psycopg2.errors.ForeignKeyViolation: insert or update on table "core_repositoryversion" violates foreign key constraint "core_repositoryversi_base_version_id_5230045f_fk_core_repo"` and then no repository version and therefore no content in repository. In attempting to reproduce I have not see this problem when `repo.retain_repo_versions` > 1.
**Expected behavior**
Expect RepositoryVersion to have previous Content plus newly added Content.
| 2022-03-01T09:35:37 |
||
pulp/pulpcore | 2,273 | pulp__pulpcore-2273 | [
"2268"
] | fdb807d3048830b54b77269ca137f690c55ec25d | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -11,8 +11,10 @@
from dynaconf import settings
from django.core.validators import MinValueValidator
from django.db import models, transaction
+from django.db.models import Q
from django.urls import reverse
from django_lifecycle import AFTER_UPDATE, BEFORE_DELETE, hook
+from rest_framework.exceptions import APIException
from pulpcore.app.util import batch_qs, get_view_name_for_model
from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
@@ -67,7 +69,7 @@ class Meta:
verbose_name_plural = "repositories"
def on_new_version(self, version):
- """Called when new repository versions are created.
+ """Called after a new repository version has been created.
Subclasses are expected to override this to do useful things.
@@ -219,10 +221,25 @@ def artifacts_for_version(version):
return Artifact.objects.filter(content__pk__in=version.content)
@hook(AFTER_UPDATE, when="retain_repo_versions", has_changed=True)
+ def _cleanup_old_versions_hook(self):
+ # Do not attempt to clean up anything, while there is a transaction involving repo versions
+ # still in flight.
+ transaction.on_commit(self.cleanup_old_versions)
+
def cleanup_old_versions(self):
"""Cleanup old repository versions based on retain_repo_versions."""
+ # I am still curious how, but it was reported that this state can happen in day to day
+ # operations but its easy to reproduce manually in the pulpcore shell:
+ # https://github.com/pulp/pulpcore/issues/2268
+ if self.versions.filter(complete=False).exists():
+ raise RuntimeError(
+ _("Attempt to cleanup old versions, while a new version is in flight.")
+ )
if self.retain_repo_versions:
- for version in self.versions.order_by("-number")[self.retain_repo_versions :]:
+ # Consider only completed versions for cleanup
+ for version in self.versions.complete().order_by("-number")[
+ self.retain_repo_versions :
+ ]:
_logger.info(
_("Deleting repository version {} due to version retention limit.").format(
version
@@ -527,7 +544,7 @@ class RepositoryVersionQuerySet(models.QuerySet):
"""A queryset that provides repository version filtering methods."""
def complete(self):
- return self.exclude(complete=False)
+ return self.filter(complete=True)
def with_content(self, content):
"""
@@ -923,22 +940,34 @@ def delete(self, **kwargs):
Deletion of a complete RepositoryVersion should be done in a RQ Job.
"""
if self.complete:
+ if self.repository.versions.complete().count() <= 1:
+ raise APIException(_("Attempt to delete the last remaining version."))
if settings.CACHE_ENABLED:
base_paths = self.distribution_set.values_list("base_path", flat=True)
if base_paths:
Cache().delete(base_key=base_paths)
- repo_relations = RepositoryContent.objects.filter(repository=self.repository)
- try:
- next_version = self.next()
- self._squash(repo_relations, next_version)
-
- except RepositoryVersion.DoesNotExist:
- # version is the latest version so simply update repo contents
- # and delete the version
- repo_relations.filter(version_added=self).delete()
- repo_relations.filter(version_removed=self).update(version_removed=None)
- super().delete(**kwargs)
+ # Handle the manipulation of the repository version content and its final deletion in
+ # the same transaction.
+ with transaction.atomic():
+ repo_relations = RepositoryContent.objects.filter(
+ repository=self.repository
+ ).select_for_update()
+ try:
+ next_version = self.next()
+ self._squash(repo_relations, next_version)
+
+ except RepositoryVersion.DoesNotExist:
+ # version is the latest version so simply update repo contents
+ # and delete the version
+ repo_relations.filter(version_added=self).delete()
+ repo_relations.filter(version_removed=self).update(version_removed=None)
+
+ if repo_relations.filter(Q(version_added=self) | Q(version_removed=self)).exists():
+ raise RuntimeError(
+ _("Some repo relations of this version were not translated.")
+ )
+ super().delete(**kwargs)
else:
with transaction.atomic():
@@ -983,6 +1012,10 @@ def __enter__(self):
Returns:
RepositoryVersion: self
"""
+ if self.complete:
+ raise RuntimeError(
+ _("This Repository version is complete. It cannot be modified further.")
+ )
repository = self.repository.cast()
repository.initialize_new_version(self)
return self
@@ -1011,15 +1044,16 @@ def __exit__(self, exc_type, exc_value, traceback):
unsupported_types = content_types_seen - content_types_supported
if unsupported_types:
raise ValueError(
- "Saw unsupported content types {}".format(unsupported_types)
+ _("Saw unsupported content types {}").format(unsupported_types)
)
self.complete = True
self.repository.next_version = self.number + 1
- self.repository.save()
- self.save()
+ with transaction.atomic():
+ self.repository.save()
+ self.save()
+ self._compute_counts()
self.repository.cleanup_old_versions()
- self._compute_counts()
repository.on_new_version(self)
except Exception:
self.delete()
| Repository new_version results in zero content in repo
**Version**
pulpcore==3.17.3, pulp_ansible==0.11.1 installed as part of galaxy_ng in openshift hosted cloud env
**Describe the bug**
For a Repository that had content, code ran to add content, and the resulting new RepositoryVersion had zero content.
**To Reproduce**
Steps to reproduce the behavior:
Run this code outside of a task (understood this should always be run in task via `dispatch()`)
```
with repo.new_version(base_version=repo.latest_version()) as dest_version:
dest_version.add_content(version_qs)
```
on a quiet system where no other code is interacting with the repo. When this is run with `repo.retain_repo_versions` = 1, it often results in `psycopg2.errors.ForeignKeyViolation: insert or update on table "core_repositoryversion" violates foreign key constraint "core_repositoryversi_base_version_id_5230045f_fk_core_repo"` and then no repository version and therefore no content in repository. In attempting to reproduce I have not see this problem when `repo.retain_repo_versions` > 1.
**Expected behavior**
Expect RepositoryVersion to have previous Content plus newly added Content.
| 2022-03-01T09:35:59 |
||
pulp/pulpcore | 2,274 | pulp__pulpcore-2274 | [
"2084"
] | fd92f81d5adaa93093c5a93755696ec531e77e57 | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -11,8 +11,10 @@
from dynaconf import settings
from django.core.validators import MinValueValidator
from django.db import models, transaction
+from django.db.models import Q
from django.urls import reverse
from django_lifecycle import AFTER_UPDATE, BEFORE_DELETE, hook
+from rest_framework.exceptions import APIException
from pulpcore.app.util import batch_qs, get_view_name_for_model
from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
@@ -67,7 +69,7 @@ class Meta:
verbose_name_plural = "repositories"
def on_new_version(self, version):
- """Called when new repository versions are created.
+ """Called after a new repository version has been created.
Subclasses are expected to override this to do useful things.
@@ -200,10 +202,25 @@ def artifacts_for_version(version):
return Artifact.objects.filter(content__pk__in=version.content)
@hook(AFTER_UPDATE, when="retain_repo_versions", has_changed=True)
+ def _cleanup_old_versions_hook(self):
+ # Do not attempt to clean up anything, while there is a transaction involving repo versions
+ # still in flight.
+ transaction.on_commit(self.cleanup_old_versions)
+
def cleanup_old_versions(self):
"""Cleanup old repository versions based on retain_repo_versions."""
+ # I am still curious how, but it was reported that this state can happen in day to day
+ # operations but its easy to reproduce manually in the pulpcore shell:
+ # https://github.com/pulp/pulpcore/issues/2268
+ if self.versions.filter(complete=False).exists():
+ raise RuntimeError(
+ _("Attempt to cleanup old versions, while a new version is in flight.")
+ )
if self.retain_repo_versions:
- for version in self.versions.order_by("-number")[self.retain_repo_versions :]:
+ # Consider only completed versions for cleanup
+ for version in self.versions.complete().order_by("-number")[
+ self.retain_repo_versions :
+ ]:
_logger.info(
_("Deleting repository version {} due to version retention limit.").format(
version
@@ -496,7 +513,7 @@ class RepositoryVersionQuerySet(models.QuerySet):
"""A queryset that provides repository version filtering methods."""
def complete(self):
- return self.exclude(complete=False)
+ return self.filter(complete=True)
def with_content(self, content):
"""
@@ -878,6 +895,9 @@ def _squash(self, repo_relations, next_version):
repo_relations.filter(version_added=self).update(version_added=next_version)
repo_relations.filter(version_removed=self).update(version_removed=next_version)
+ # Update next version's counts as they have been modified
+ next_version._compute_counts()
+
def delete(self, **kwargs):
"""
Deletes a RepositoryVersion
@@ -889,22 +909,34 @@ def delete(self, **kwargs):
Deletion of a complete RepositoryVersion should be done in a RQ Job.
"""
if self.complete:
+ if self.repository.versions.complete().count() <= 1:
+ raise APIException(_("Attempt to delete the last remaining version."))
if settings.CACHE_ENABLED:
base_paths = self.distribution_set.values_list("base_path", flat=True)
if base_paths:
Cache().delete(base_key=base_paths)
- repo_relations = RepositoryContent.objects.filter(repository=self.repository)
- try:
- next_version = self.next()
- self._squash(repo_relations, next_version)
-
- except RepositoryVersion.DoesNotExist:
- # version is the latest version so simply update repo contents
- # and delete the version
- repo_relations.filter(version_added=self).delete()
- repo_relations.filter(version_removed=self).update(version_removed=None)
- super().delete(**kwargs)
+ # Handle the manipulation of the repository version content and its final deletion in
+ # the same transaction.
+ with transaction.atomic():
+ repo_relations = RepositoryContent.objects.filter(
+ repository=self.repository
+ ).select_for_update()
+ try:
+ next_version = self.next()
+ self._squash(repo_relations, next_version)
+
+ except RepositoryVersion.DoesNotExist:
+ # version is the latest version so simply update repo contents
+ # and delete the version
+ repo_relations.filter(version_added=self).delete()
+ repo_relations.filter(version_removed=self).update(version_removed=None)
+
+ if repo_relations.filter(Q(version_added=self) | Q(version_removed=self)).exists():
+ raise RuntimeError(
+ _("Some repo relations of this version were not translated.")
+ )
+ super().delete(**kwargs)
else:
with transaction.atomic():
@@ -949,6 +981,10 @@ def __enter__(self):
Returns:
RepositoryVersion: self
"""
+ if self.complete:
+ raise RuntimeError(
+ _("This Repository version is complete. It cannot be modified further.")
+ )
return self
def __exit__(self, exc_type, exc_value, traceback):
@@ -975,15 +1011,16 @@ def __exit__(self, exc_type, exc_value, traceback):
unsupported_types = content_types_seen - content_types_supported
if unsupported_types:
raise ValueError(
- "Saw unsupported content types {}".format(unsupported_types)
+ _("Saw unsupported content types {}").format(unsupported_types)
)
self.complete = True
self.repository.next_version = self.number + 1
- self.repository.save()
- self.save()
+ with transaction.atomic():
+ self.repository.save()
+ self.save()
+ self._compute_counts()
self.repository.cleanup_old_versions()
- self._compute_counts()
repository.on_new_version(self)
except Exception:
self.delete()
| diff --git a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
@@ -357,12 +357,16 @@ def test_delete_last_version(self):
def test_delete_middle_version(self):
"""Delete a middle version."""
- index = randint(1, len(self.repo_version_hrefs) - 2)
+ index = randint(1, len(self.repo_version_hrefs) - 3)
delete_version(self.repo, self.repo_version_hrefs[index])
with self.assertRaises(HTTPError):
get_content(self.repo, self.repo_version_hrefs[index])
+ # Check added count is updated properly
+ added = get_added_content_summary(self.repo, self.repo_version_hrefs[index + 1])
+ self.assertEqual(added["file.file"], 2)
+
for repo_version_href in self.repo_version_hrefs[index + 1 :]:
artifact_paths = get_artifact_paths(self.repo, repo_version_href)
self.assertIn(self.content[index]["artifact"], artifact_paths)
| Content summary shows incorrect numbers when previous version is deleted
Author: daviddavis (daviddavis)
Redmine Issue: 8798, https://pulp.plan.io/issues/8798
---
Not sure if this is a bug or intentional but I noticed while working on #8793.
If you create two repo versions (version 1 and 2) and add a content item to each, and then delete version 1, the "added" count will show a count of 1 even though compared to version 0, version 2 has 2 items added:
```
"content_summary": {
"added": {
"file.file": {
"count": 1,
"href": "/pulp/api/v3/content/file/files/?repository_version_added=/pulp/api/v3/repositories/file/file/f53f39d7-341d-4c4c-8835-1fcddb359791/versions/2/"
}
},
"removed": {},
"present": {
"file.file": {
"count": 2,
"href": "/pulp/api/v3/content/file/files/?repository_version=/pulp/api/v3/repositories/file/file/f53f39d7-341d-4c4c-8835-1fcddb359791/versions/2/"
}
}
```
Also, when you query the content through the href provided by the content summary, you get back 2 units which doesn't seem to match the count provided:
```
$ http :/pulp/api/v3/content/file/files/?repository_version_added=/pulp/api/v3/repositories/file/file/f53f39d7-341d-4c4c-8835-1fcddb359791/versions/2/
{
"count": 2,
"next": null,
"previous": null,
"results": [...]
}
```
## Steps to reproduce
```
pulp file repository create --name test1 --remote file
pulp file repository sync --name test1
c1=$(pulp file content list | jq -r ".[0] | with_entries(select([.key] | inside([\"sha256\", \"relative_path\"])))")
c2=$(pulp file content list | jq -r ".[1] | with_entries(select([.key] | inside([\"sha256\", \"relative_path\"])))")
pulp file repository create --name test2
pulp file repository content modify --repository test2 --add-content "[$c1]"
pulp file repository content modify --repository test2 --add-content "[$c2]"
pulp file repository version destroy --repository test2 --version 1
pulp file repository version show --repository test2 --version 2
1. then query the content added
http :/pulp/api/v3/content/file/files/?repository_version_added=/pulp/api/v3/repositories/file/file/f53f39d7-341d-4c4c-8835-1fcddb359791/versions/2/
```
| From: @gerrod3 (gerrod)
Date: 2022-01-10T10:54:28Z
---
Applied in changeset commit:pulpcore|d767beb104fa95da4ee6521711cf6953eabda7f2. | 2022-03-01T09:47:36 |
pulp/pulpcore | 2,275 | pulp__pulpcore-2275 | [
"2276"
] | 6b842585c80b964e236d7b02dcada2a4453bbf78 | diff --git a/pulpcore/app/viewsets/user.py b/pulpcore/app/viewsets/user.py
--- a/pulpcore/app/viewsets/user.py
+++ b/pulpcore/app/viewsets/user.py
@@ -481,6 +481,7 @@ class Meta:
model = Role
fields = {
"name": NAME_FILTER_OPTIONS,
+ "description": ["exact", "iexact", "icontains", "contains"],
"locked": ["exact"],
}
| Filter user roles by description
**Is your feature request related to a problem? Please describe.**
We would like to give the user the power to filter roles by name and by description. The pulpcore does not allow filtering by description yet.
**Describe the solution you'd like**
Allow filtering by description. A case insensitive contains is enough for this field.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Connected issue: issues.redhat.com/browse/AAH-1409
| 2022-03-01T14:42:00 |
||
pulp/pulpcore | 2,277 | pulp__pulpcore-2277 | [
"2084"
] | 0eb1067c5bddf26e8e9fa7e3a91f99d0adcc736b | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -878,6 +878,9 @@ def _squash(self, repo_relations, next_version):
repo_relations.filter(version_added=self).update(version_added=next_version)
repo_relations.filter(version_removed=self).update(version_removed=next_version)
+ # Update next version's counts as they have been modified
+ next_version._compute_counts()
+
def delete(self, **kwargs):
"""
Deletes a RepositoryVersion
| diff --git a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
@@ -357,12 +357,16 @@ def test_delete_last_version(self):
def test_delete_middle_version(self):
"""Delete a middle version."""
- index = randint(1, len(self.repo_version_hrefs) - 2)
+ index = randint(1, len(self.repo_version_hrefs) - 3)
delete_version(self.repo, self.repo_version_hrefs[index])
with self.assertRaises(HTTPError):
get_content(self.repo, self.repo_version_hrefs[index])
+ # Check added count is updated properly
+ added = get_added_content_summary(self.repo, self.repo_version_hrefs[index + 1])
+ self.assertEqual(added["file.file"], 2)
+
for repo_version_href in self.repo_version_hrefs[index + 1 :]:
artifact_paths = get_artifact_paths(self.repo, repo_version_href)
self.assertIn(self.content[index]["artifact"], artifact_paths)
| Content summary shows incorrect numbers when previous version is deleted
Author: daviddavis (daviddavis)
Redmine Issue: 8798, https://pulp.plan.io/issues/8798
---
Not sure if this is a bug or intentional but I noticed while working on #8793.
If you create two repo versions (version 1 and 2) and add a content item to each, and then delete version 1, the "added" count will show a count of 1 even though compared to version 0, version 2 has 2 items added:
```
"content_summary": {
"added": {
"file.file": {
"count": 1,
"href": "/pulp/api/v3/content/file/files/?repository_version_added=/pulp/api/v3/repositories/file/file/f53f39d7-341d-4c4c-8835-1fcddb359791/versions/2/"
}
},
"removed": {},
"present": {
"file.file": {
"count": 2,
"href": "/pulp/api/v3/content/file/files/?repository_version=/pulp/api/v3/repositories/file/file/f53f39d7-341d-4c4c-8835-1fcddb359791/versions/2/"
}
}
```
Also, when you query the content through the href provided by the content summary, you get back 2 units which doesn't seem to match the count provided:
```
$ http :/pulp/api/v3/content/file/files/?repository_version_added=/pulp/api/v3/repositories/file/file/f53f39d7-341d-4c4c-8835-1fcddb359791/versions/2/
{
"count": 2,
"next": null,
"previous": null,
"results": [...]
}
```
## Steps to reproduce
```
pulp file repository create --name test1 --remote file
pulp file repository sync --name test1
c1=$(pulp file content list | jq -r ".[0] | with_entries(select([.key] | inside([\"sha256\", \"relative_path\"])))")
c2=$(pulp file content list | jq -r ".[1] | with_entries(select([.key] | inside([\"sha256\", \"relative_path\"])))")
pulp file repository create --name test2
pulp file repository content modify --repository test2 --add-content "[$c1]"
pulp file repository content modify --repository test2 --add-content "[$c2]"
pulp file repository version destroy --repository test2 --version 1
pulp file repository version show --repository test2 --version 2
1. then query the content added
http :/pulp/api/v3/content/file/files/?repository_version_added=/pulp/api/v3/repositories/file/file/f53f39d7-341d-4c4c-8835-1fcddb359791/versions/2/
```
| 2022-03-01T17:41:40 |
|
pulp/pulpcore | 2,290 | pulp__pulpcore-2290 | [
"2268"
] | 20e87f75e92a232ee610032ab4f30dab8c79774a | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -11,8 +11,10 @@
from dynaconf import settings
from django.core.validators import MinValueValidator
from django.db import models, transaction
+from django.db.models import Q
from django.urls import reverse
from django_lifecycle import AFTER_UPDATE, BEFORE_DELETE, hook
+from rest_framework.exceptions import APIException
from pulpcore.app.util import batch_qs, get_view_name_for_model
from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
@@ -67,7 +69,7 @@ class Meta:
verbose_name_plural = "repositories"
def on_new_version(self, version):
- """Called when new repository versions are created.
+ """Called after a new repository version has been created.
Subclasses are expected to override this to do useful things.
@@ -200,10 +202,25 @@ def artifacts_for_version(version):
return Artifact.objects.filter(content__pk__in=version.content)
@hook(AFTER_UPDATE, when="retain_repo_versions", has_changed=True)
+ def _cleanup_old_versions_hook(self):
+ # Do not attempt to clean up anything, while there is a transaction involving repo versions
+ # still in flight.
+ transaction.on_commit(self.cleanup_old_versions)
+
def cleanup_old_versions(self):
"""Cleanup old repository versions based on retain_repo_versions."""
+ # I am still curious how, but it was reported that this state can happen in day to day
+ # operations but its easy to reproduce manually in the pulpcore shell:
+ # https://github.com/pulp/pulpcore/issues/2268
+ if self.versions.filter(complete=False).exists():
+ raise RuntimeError(
+ _("Attempt to cleanup old versions, while a new version is in flight.")
+ )
if self.retain_repo_versions:
- for version in self.versions.order_by("-number")[self.retain_repo_versions :]:
+ # Consider only completed versions for cleanup
+ for version in self.versions.complete().order_by("-number")[
+ self.retain_repo_versions :
+ ]:
_logger.info(
_("Deleting repository version {} due to version retention limit.").format(
version
@@ -496,7 +513,7 @@ class RepositoryVersionQuerySet(models.QuerySet):
"""A queryset that provides repository version filtering methods."""
def complete(self):
- return self.exclude(complete=False)
+ return self.filter(complete=True)
def with_content(self, content):
"""
@@ -892,22 +909,34 @@ def delete(self, **kwargs):
Deletion of a complete RepositoryVersion should be done in a RQ Job.
"""
if self.complete:
+ if self.repository.versions.complete().count() <= 1:
+ raise APIException(_("Attempt to delete the last remaining version."))
if settings.CACHE_ENABLED:
base_paths = self.distribution_set.values_list("base_path", flat=True)
if base_paths:
Cache().delete(base_key=base_paths)
- repo_relations = RepositoryContent.objects.filter(repository=self.repository)
- try:
- next_version = self.next()
- self._squash(repo_relations, next_version)
-
- except RepositoryVersion.DoesNotExist:
- # version is the latest version so simply update repo contents
- # and delete the version
- repo_relations.filter(version_added=self).delete()
- repo_relations.filter(version_removed=self).update(version_removed=None)
- super().delete(**kwargs)
+ # Handle the manipulation of the repository version content and its final deletion in
+ # the same transaction.
+ with transaction.atomic():
+ repo_relations = RepositoryContent.objects.filter(
+ repository=self.repository
+ ).select_for_update()
+ try:
+ next_version = self.next()
+ self._squash(repo_relations, next_version)
+
+ except RepositoryVersion.DoesNotExist:
+ # version is the latest version so simply update repo contents
+ # and delete the version
+ repo_relations.filter(version_added=self).delete()
+ repo_relations.filter(version_removed=self).update(version_removed=None)
+
+ if repo_relations.filter(Q(version_added=self) | Q(version_removed=self)).exists():
+ raise RuntimeError(
+ _("Some repo relations of this version were not translated.")
+ )
+ super().delete(**kwargs)
else:
with transaction.atomic():
@@ -952,6 +981,10 @@ def __enter__(self):
Returns:
RepositoryVersion: self
"""
+ if self.complete:
+ raise RuntimeError(
+ _("This Repository version is complete. It cannot be modified further.")
+ )
return self
def __exit__(self, exc_type, exc_value, traceback):
@@ -978,15 +1011,16 @@ def __exit__(self, exc_type, exc_value, traceback):
unsupported_types = content_types_seen - content_types_supported
if unsupported_types:
raise ValueError(
- "Saw unsupported content types {}".format(unsupported_types)
+ _("Saw unsupported content types {}").format(unsupported_types)
)
self.complete = True
self.repository.next_version = self.number + 1
- self.repository.save()
- self.save()
+ with transaction.atomic():
+ self.repository.save()
+ self.save()
+ self._compute_counts()
self.repository.cleanup_old_versions()
- self._compute_counts()
repository.on_new_version(self)
except Exception:
self.delete()
| Repository new_version results in zero content in repo
**Version**
pulpcore==3.17.3, pulp_ansible==0.11.1 installed as part of galaxy_ng in openshift hosted cloud env
**Describe the bug**
For a Repository that had content, code ran to add content, and the resulting new RepositoryVersion had zero content.
**To Reproduce**
Steps to reproduce the behavior:
Run this code outside of a task (understood this should always be run in task via `dispatch()`)
```
with repo.new_version(base_version=repo.latest_version()) as dest_version:
dest_version.add_content(version_qs)
```
on a quiet system where no other code is interacting with the repo. When this is run with `repo.retain_repo_versions` = 1, it often results in `psycopg2.errors.ForeignKeyViolation: insert or update on table "core_repositoryversion" violates foreign key constraint "core_repositoryversi_base_version_id_5230045f_fk_core_repo"` and then no repository version and therefore no content in repository. In attempting to reproduce I have not see this problem when `repo.retain_repo_versions` > 1.
**Expected behavior**
Expect RepositoryVersion to have previous Content plus newly added Content.
| 2022-03-02T16:47:08 |
||
pulp/pulpcore | 2,292 | pulp__pulpcore-2292 | [
"2291"
] | 53f81bd15cfe9f49eaa082ccf5f36f64036f42d1 | diff --git a/pulpcore/app/urls.py b/pulpcore/app/urls.py
--- a/pulpcore/app/urls.py
+++ b/pulpcore/app/urls.py
@@ -8,6 +8,7 @@
SpectacularJSONAPIView,
SpectacularYAMLAPIView,
SpectacularRedocView,
+ SpectacularSwaggerView,
)
from rest_framework_nested import routers
@@ -167,12 +168,25 @@ def __repr__(self):
SpectacularRedocView.as_view(
authentication_classes=[],
permission_classes=[],
- url=f"{settings.V3_API_ROOT}docs/api.json?include_html=1",
+ url=f"{settings.V3_API_ROOT}docs/api.json?include_html=1&pk_path=1",
),
name="schema-redoc",
)
)
+urlpatterns.append(
+ path(
+ f"{API_ROOT}swagger/",
+ SpectacularSwaggerView.as_view(
+ authentication_classes=[],
+ permission_classes=[],
+ url=f"{settings.V3_API_ROOT}docs/api.json?include_html=1&pk_path=1",
+ ),
+ name="schema-swagger",
+ )
+)
+
+
all_routers = [root_router] + vs_tree.register_with(root_router)
for router in all_routers:
urlpatterns.append(path(API_ROOT, include(router.urls)))
diff --git a/pulpcore/openapi/__init__.py b/pulpcore/openapi/__init__.py
--- a/pulpcore/openapi/__init__.py
+++ b/pulpcore/openapi/__init__.py
@@ -363,6 +363,10 @@ def parse(self, input_request, public):
self._initialise_endpoints()
endpoints = self._get_paths_and_endpoints()
+ query_params = {}
+ if input_request:
+ query_params = {k.replace("amp;", ""): v for k, v in input_request.query_params.items()}
+
if spectacular_settings.SCHEMA_PATH_PREFIX is None:
# estimate common path prefix if none was given. only use it if we encountered more
# than one view to prevent emission of erroneous and unnecessary fallback names.
@@ -380,7 +384,7 @@ def parse(self, input_request, public):
# Adding plugin filter
plugins = None
# /pulp/api/v3/docs/api.json?plugin=pulp_file
- if input_request and "plugin" in input_request.query_params:
+ if input_request and "plugin" in query_params:
plugins = [input_request.query_params["plugin"]]
for path, path_regex, method, view in endpoints:
@@ -395,7 +399,8 @@ def parse(self, input_request, public):
schema = view.schema
- path = self.convert_endpoint_path_params(path, view, schema)
+ if input_request is None or "pk_path" not in query_params:
+ path = self.convert_endpoint_path_params(path, view, schema)
# beware that every access to schema yields a fresh object (descriptor pattern)
operation = schema.get_operation(path, path_regex, path_prefix, method, self.registry)
@@ -405,12 +410,12 @@ def parse(self, input_request, public):
continue
# Removes html tags from OpenAPI schema
- if input_request is None or "include_html" not in input_request.query_params:
+ if input_request is None or "include_html" not in query_params:
if "description" in operation:
operation["description"] = strip_tags(operation["description"])
# operationId as actions [list, read, sync, modify, create, delete, ...]
- if input_request and "bindings" in input_request.query_params:
+ if input_request and "bindings" in query_params:
tokenized_path = schema._tokenize_path()
tokenized_path = "_".join(
[t.replace("-", "_").replace("/", "_").lower() for t in tokenized_path]
| As a user I have human readable OpenAPI
| 2022-03-02T17:10:41 |
||
pulp/pulpcore | 2,295 | pulp__pulpcore-2295 | [
"2101"
] | 878456d6c4a39c1d682685e0104b04fe3c10f794 | diff --git a/pulpcore/app/migrations/0064_add_new_style_task_columns.py b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
--- a/pulpcore/app/migrations/0064_add_new_style_task_columns.py
+++ b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
@@ -4,16 +4,34 @@
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
+TASK_BATCH_SIZE = 1000
+
def copy_reserved_resources_record(apps, schema_editor):
Task = apps.get_model('core', 'Task')
- for task in Task.objects.iterator():
+
+ # Update _reserved_resource_record for all tasks, 1000 tasks at a time.
+ # When we hit 1K tasks, go to the db for the batch.
+ # Make sure to update the final batch!
+ tasks = []
+ for task in Task.objects.iterator(chunk_size=TASK_BATCH_SIZE):
task._reserved_resources_record = list(task.reserved_resources_record.values_list('resource', flat=True))
- task.save()
+ tasks.append(task)
+ if len(tasks) == TASK_BATCH_SIZE:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+ tasks.clear()
+
+ # Update last set of tasks
+ if len(tasks) > 0:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+
+def purge_reservedresources(apps, schema_editor):
+ TaskReservedResource = apps.get_model('core', 'TaskReservedResource')
+ TaskReservedResource.objects.all().delete()
-def noop(apps, schema_editor):
- pass
+ ReservedResource = apps.get_model('core', 'ReservedResource')
+ ReservedResource.objects.all().delete()
class Migration(migrations.Migration):
@@ -23,6 +41,12 @@ class Migration(migrations.Migration):
]
operations = [
+ # Purge any ReservedResource entries - if there are any, they're orphans
+ migrations.RunPython(
+ code=purge_reservedresources,
+ reverse_code=migrations.RunPython.noop,
+ ),
+ # Update entities for the new task-system
migrations.AddField(
model_name='task',
name='args',
@@ -59,7 +83,7 @@ class Migration(migrations.Migration):
),
migrations.RunPython(
code=copy_reserved_resources_record,
- reverse_code=noop,
+ reverse_code=migrations.RunPython.noop,
),
migrations.RemoveField(
model_name='taskreservedresourcerecord',
@@ -80,4 +104,5 @@ class Migration(migrations.Migration):
old_name='_reserved_resources_record',
new_name='reserved_resources_record',
),
+
]
| Clean up TaskReservedResources/task-table at migration to new-tasking-system
See https://bugzilla.redhat.com/show_bug.cgi?id=2031154 for details.
Migration that needs to be updated to purge taskreservedresource entries: 0064_add_new_style_task_columns.py
This wants to be cherrypicked into 3.14/15/16 (after which the offending table no longer exists)
| Add core_task truncation - makes migration faster, may cascade-delete core_taskreservedresource as well? | 2022-03-02T21:16:58 |
|
pulp/pulpcore | 2,296 | pulp__pulpcore-2296 | [
"2267"
] | b5b5f3a860f89e6ab99ac4c26e29df3be89c057d | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -4,6 +4,7 @@
from contextlib import suppress
from gettext import gettext as _
from os import path
+from collections import defaultdict
import logging
import django
@@ -901,9 +902,11 @@ def _squash(self, repo_relations, next_version):
# delete any relationships added in the version being deleted and removed in the next one.
repo_relations.filter(version_added=self, version_removed=next_version).delete()
- # If the same content is deleted in version, but added back in next_version
- # set version_removed field in relation to None, and remove relation adding the content
- # in next_version
+ # If the same content is deleted in version, but added back in next_version then:
+ # - set version_removed field in relation to version_removed of the relation adding
+ # the content in next version because the content can be removed again after the
+ # next_version
+ # - and remove relation adding the content in next_version
content_added = repo_relations.filter(version_added=next_version).values_list("content_id")
# use list() to force the evaluation of the queryset, otherwise queryset is affected
@@ -914,13 +917,26 @@ def _squash(self, repo_relations, next_version):
)
)
- repo_relations.filter(
- version_removed=self, content_id__in=content_removed_and_readded
- ).update(version_removed=None)
-
- repo_relations.filter(
+ repo_contents_readded_in_next_version = repo_relations.filter(
version_added=next_version, content_id__in=content_removed_and_readded
- ).delete()
+ )
+
+ # Since the readded contents can be removed again by any subsequent version after the
+ # next version. Get the mapping of readded contents and their versions removed to use
+ # later. The version removed id will be None if a content is not removed.
+ version_removed_id_content_id_map = defaultdict(list)
+ for readded_repo_content in repo_contents_readded_in_next_version.iterator():
+ version_removed_id_content_id_map[readded_repo_content.version_removed_id].append(
+ readded_repo_content.content_id
+ )
+
+ repo_contents_readded_in_next_version.delete()
+
+ # Update the version removed of the readded contents
+ for version_removed_id, content_ids in version_removed_id_content_id_map.items():
+ repo_relations.filter(version_removed=self, content_id__in=content_ids).update(
+ version_removed_id=version_removed_id
+ )
# "squash" by moving other additions and removals forward to the next version
repo_relations.filter(version_added=self).update(version_added=next_version)
| diff --git a/pulpcore/tests/conftest_pulp_file.py b/pulpcore/tests/conftest_pulp_file.py
--- a/pulpcore/tests/conftest_pulp_file.py
+++ b/pulpcore/tests/conftest_pulp_file.py
@@ -8,6 +8,7 @@
from pulpcore.client.pulp_file import (
ContentFilesApi,
RepositoriesFileApi,
+ RepositoriesFileVersionsApi,
RemotesFileApi,
)
from pulp_smash.pulp3.utils import gen_repo
@@ -35,6 +36,11 @@ def file_repo_api_client(file_client):
return RepositoriesFileApi(file_client)
[email protected](scope="session")
+def file_repo_version_api_client(file_client):
+ return RepositoriesFileVersionsApi(file_client)
+
+
@pytest.fixture
def file_repo(file_repo_api_client, gen_object_with_cleanup):
return gen_object_with_cleanup(file_repo_api_client, gen_repo())
diff --git a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
@@ -1,8 +1,11 @@
"""Tests related to repository versions."""
import unittest
+import pytest
from random import choice, randint, sample
from time import sleep
from urllib.parse import urlsplit
+from tempfile import NamedTemporaryFile
+from hashlib import sha256
from pulp_smash import api, config, utils
from pulp_smash.exceptions import TaskReportError
@@ -396,6 +399,134 @@ def test_delete_publication(self):
self.client.get(publication["pulp_href"])
[email protected]
+def test_squash_repo_version(
+ file_repo_api_client, file_repo_version_api_client, content_file_api_client, file_repo
+):
+ """Test that the deletion of a repository version properly squashes the content.
+
+ - Setup versions like:
+ Version 0: <empty>
+ add: ABCDE
+ Version 1: ABCDE
+ delete: BCDE; add: FGHI
+ Version 2: AFGHI -- to be deleted
+ delete: GI; add: CD
+ Version 3: ACDFH -- to be squashed into
+ delete: DH; add: EI
+ Version 4: ACEFI
+ - Delete version 2.
+ - Check the content of all remaining versions.
+ """
+ content_units = {}
+ for name in ["A", "B", "C", "D", "E", "F", "G", "H", "I"]:
+ try:
+ content_units[name] = content_file_api_client.list(
+ relative_path=name, sha256=sha256(name.encode()).hexdigest()
+ ).results[0]
+ except IndexError:
+ with NamedTemporaryFile() as tf:
+ tf.write(name.encode())
+ tf.flush()
+ response = content_file_api_client.create(relative_path=name, file=tf.name)
+ result = monitor_task(response.task)
+ content_units[name] = content_file_api_client.read(result.created_resources[0])
+ response1 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["A", "B", "C", "D", "E"]
+ ]
+ },
+ )
+
+ response2 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["B", "C", "D", "E"]
+ ],
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["F", "G", "H", "I"]
+ ],
+ },
+ )
+
+ response3 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["G", "I"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["C", "D"]
+ ],
+ },
+ )
+
+ response4 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["D", "H"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["E", "I"]
+ ],
+ },
+ )
+ version1 = file_repo_version_api_client.read(monitor_task(response1.task).created_resources[0])
+ version2 = file_repo_version_api_client.read(monitor_task(response2.task).created_resources[0])
+ version3 = file_repo_version_api_client.read(monitor_task(response3.task).created_resources[0])
+ version4 = file_repo_version_api_client.read(monitor_task(response4.task).created_resources[0])
+
+ # Check version state before deletion
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version2.content_summary.added["file.file"]["count"] == 4
+ assert version2.content_summary.removed["file.file"]["count"] == 4
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content2 = content_file_api_client.list(repository_version=version2.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content2.results)) == {"A", "F", "G", "H", "I"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+ monitor_task(file_repo_version_api_client.delete(version2.pulp_href).task)
+
+ # Check version state after deletion (Version 2 is gone...)
+ version1 = file_repo_version_api_client.read(version1.pulp_href)
+ version3 = file_repo_version_api_client.read(version3.pulp_href)
+ version4 = file_repo_version_api_client.read(version4.pulp_href)
+
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+
class ContentImmutableRepoVersionTestCase(unittest.TestCase):
"""Test whether the content present in a repo version is immutable.
| Deleting repository versions can loose track of later content deletion
**Version**
pulpcore 3.18
**Describe the bug**
When deleting a repository version that deletes a content that is added back in the subsequent version, but deleted again in a later version that deletion in the later version is lost.
**To Reproduce**
Steps to reproduce the behavior:
```bash
#!/bin/bash
set -eu
pulp file repository destroy --name test_delete_versions || true
pulp file repository create --name test_delete_versions
for NAME in "aaaa" "bbbb" "cccc" "dddd" "eeee" "ffff" "gggg" "hhhh" "jjjj"
do
echo "$NAME" > "$NAME"
pulp file content upload --relative-path "$NAME" --file "$NAME" || true
declare $NAME='{"sha256": "'"$(sha256sum --binary $NAME | cut -d" " -f1)"'", "relative_path": "'"$NAME"'"}'
done
pulp file repository content modify --repository test_delete_versions --add-content '['"$aaaa"', '"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']' --add-content '['"$ffff"', '"$gggg"', '"$hhhh"', '"$jjjj"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$gggg"', '"$jjjj"']' --add-content '['"$cccc"', '"$dddd"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$dddd"', '"$hhhh"']' --add-content '['"$eeee"', '"$jjjj"']'
pulp file repository version list --repository test_delete_versions
# pulp file repository content list --repository test_delete_versions
pulp file repository version destroy --repository test_delete_versions --version 2
pulp file repository version list --repository test_delete_versions
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 1 | jq -r '.[].relative_path' | sort)" = $'aaaa\nbbbb\ncccc\ndddd\neeee' ]
then
echo Version 1 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 3 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\ndddd\nffff\nhhhh' ]
then
echo Version 3 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 4 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\neeee\nffff\njjjj' ]
then
echo Version 4 is wrong.
fi
```
**Expected behavior**
Content in the respository versions should not change.
**Additional context**
| 2022-03-03T08:55:44 |
|
pulp/pulpcore | 2,297 | pulp__pulpcore-2297 | [
"2267"
] | 54aed008fff1bdca2c067e7f1ac03e5a100368a2 | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -4,6 +4,7 @@
from contextlib import suppress
from gettext import gettext as _
from os import path
+from collections import defaultdict
import logging
import django
@@ -901,9 +902,11 @@ def _squash(self, repo_relations, next_version):
# delete any relationships added in the version being deleted and removed in the next one.
repo_relations.filter(version_added=self, version_removed=next_version).delete()
- # If the same content is deleted in version, but added back in next_version
- # set version_removed field in relation to None, and remove relation adding the content
- # in next_version
+ # If the same content is deleted in version, but added back in next_version then:
+ # - set version_removed field in relation to version_removed of the relation adding
+ # the content in next version because the content can be removed again after the
+ # next_version
+ # - and remove relation adding the content in next_version
content_added = repo_relations.filter(version_added=next_version).values_list("content_id")
# use list() to force the evaluation of the queryset, otherwise queryset is affected
@@ -914,13 +917,26 @@ def _squash(self, repo_relations, next_version):
)
)
- repo_relations.filter(
- version_removed=self, content_id__in=content_removed_and_readded
- ).update(version_removed=None)
-
- repo_relations.filter(
+ repo_contents_readded_in_next_version = repo_relations.filter(
version_added=next_version, content_id__in=content_removed_and_readded
- ).delete()
+ )
+
+ # Since the readded contents can be removed again by any subsequent version after the
+ # next version. Get the mapping of readded contents and their versions removed to use
+ # later. The version removed id will be None if a content is not removed.
+ version_removed_id_content_id_map = defaultdict(list)
+ for readded_repo_content in repo_contents_readded_in_next_version.iterator():
+ version_removed_id_content_id_map[readded_repo_content.version_removed_id].append(
+ readded_repo_content.content_id
+ )
+
+ repo_contents_readded_in_next_version.delete()
+
+ # Update the version removed of the readded contents
+ for version_removed_id, content_ids in version_removed_id_content_id_map.items():
+ repo_relations.filter(version_removed=self, content_id__in=content_ids).update(
+ version_removed_id=version_removed_id
+ )
# "squash" by moving other additions and removals forward to the next version
repo_relations.filter(version_added=self).update(version_added=next_version)
| diff --git a/pulpcore/tests/conftest.py b/pulpcore/tests/conftest.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/conftest.py
@@ -0,0 +1 @@
+from .conftest_pulp_file import * # noqa
diff --git a/pulpcore/tests/conftest_pulp_file.py b/pulpcore/tests/conftest_pulp_file.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/conftest_pulp_file.py
@@ -0,0 +1,141 @@
+import logging
+import uuid
+
+from pathlib import Path
+
+import pytest
+
+from pulpcore.client.pulp_file import (
+ ContentFilesApi,
+ RepositoriesFileApi,
+ RepositoriesFileVersionsApi,
+ RemotesFileApi,
+)
+from pulp_smash.pulp3.utils import gen_repo
+
+from pulpcore.tests.functional.api.using_plugin.utils import (
+ gen_file_client,
+)
+
+
+_logger = logging.getLogger(__name__)
+
+
[email protected](scope="session")
+def file_client():
+ return gen_file_client()
+
+
[email protected](scope="session")
+def content_file_api_client(file_client):
+ return ContentFilesApi(file_client)
+
+
[email protected](scope="session")
+def file_repo_api_client(file_client):
+ return RepositoriesFileApi(file_client)
+
+
[email protected](scope="session")
+def file_repo_version_api_client(file_client):
+ return RepositoriesFileVersionsApi(file_client)
+
+
[email protected]
+def file_repo(file_repo_api_client, gen_object_with_cleanup):
+ return gen_object_with_cleanup(file_repo_api_client, gen_repo())
+
+
[email protected](scope="session")
+def file_remote_api_client(file_client):
+ return RemotesFileApi(file_client)
+
+
[email protected](scope="session")
+def file_fixtures_root():
+ return Path(__file__).parent / "fixtures"
+
+
[email protected]
+def file_fixture_server_ssl_client_cert_req(
+ ssl_ctx_req_client_auth, file_fixtures_root, gen_fixture_server
+):
+ yield gen_fixture_server(file_fixtures_root, ssl_ctx_req_client_auth)
+
+
[email protected]
+def file_fixture_server_ssl(ssl_ctx, file_fixtures_root, gen_fixture_server):
+ yield gen_fixture_server(file_fixtures_root, ssl_ctx)
+
+
[email protected]
+def file_fixture_server(file_fixtures_root, gen_fixture_server):
+ yield gen_fixture_server(file_fixtures_root, None)
+
+
[email protected]
+def file_fixture_gen_remote(file_fixture_server, file_remote_api_client, gen_object_with_cleanup):
+ def _file_fixture_gen_remote(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update({"url": str(url), "policy": policy, "name": str(uuid.uuid4())})
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote
+
+
[email protected]
+def file_fixture_gen_remote_ssl(
+ file_fixture_server_ssl,
+ file_remote_api_client,
+ tls_certificate_authority_cert,
+ gen_object_with_cleanup,
+):
+ def _file_fixture_gen_remote_ssl(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server_ssl.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update(
+ {
+ "url": str(url),
+ "policy": policy,
+ "name": str(uuid.uuid4()),
+ "ca_cert": tls_certificate_authority_cert,
+ }
+ )
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote_ssl
+
+
[email protected]
+def file_fixture_gen_remote_client_cert_req(
+ file_fixture_server_ssl_client_cert_req,
+ file_remote_api_client,
+ tls_certificate_authority_cert,
+ client_tls_certificate_cert_pem,
+ client_tls_certificate_key_pem,
+ gen_object_with_cleanup,
+):
+ def _file_fixture_gen_remote_client_cert_req(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server_ssl_client_cert_req.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update(
+ {
+ "url": str(url),
+ "policy": policy,
+ "name": str(uuid.uuid4()),
+ "ca_cert": tls_certificate_authority_cert,
+ "client_cert": client_tls_certificate_cert_pem,
+ "client_key": client_tls_certificate_key_pem,
+ }
+ )
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote_client_cert_req
+
+
[email protected]
+def file_fixture_gen_file_repo(file_repo_api_client, gen_object_with_cleanup):
+ """A factory to generate a File Repository with auto-deletion after the test run."""
+
+ def _file_fixture_gen_file_repo(**kwargs):
+ return gen_object_with_cleanup(file_repo_api_client, kwargs)
+
+ yield _file_fixture_gen_file_repo
diff --git a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
@@ -1,8 +1,11 @@
"""Tests related to repository versions."""
import unittest
+import pytest
from random import choice, randint, sample
from time import sleep
from urllib.parse import urlsplit
+from tempfile import NamedTemporaryFile
+from hashlib import sha256
from pulp_smash import api, config, utils
from pulp_smash.exceptions import TaskReportError
@@ -396,6 +399,134 @@ def test_delete_publication(self):
self.client.get(publication["pulp_href"])
[email protected]
+def test_squash_repo_version(
+ file_repo_api_client, file_repo_version_api_client, content_file_api_client, file_repo
+):
+ """Test that the deletion of a repository version properly squashes the content.
+
+ - Setup versions like:
+ Version 0: <empty>
+ add: ABCDE
+ Version 1: ABCDE
+ delete: BCDE; add: FGHI
+ Version 2: AFGHI -- to be deleted
+ delete: GI; add: CD
+ Version 3: ACDFH -- to be squashed into
+ delete: DH; add: EI
+ Version 4: ACEFI
+ - Delete version 2.
+ - Check the content of all remaining versions.
+ """
+ content_units = {}
+ for name in ["A", "B", "C", "D", "E", "F", "G", "H", "I"]:
+ try:
+ content_units[name] = content_file_api_client.list(
+ relative_path=name, sha256=sha256(name.encode()).hexdigest()
+ ).results[0]
+ except IndexError:
+ with NamedTemporaryFile() as tf:
+ tf.write(name.encode())
+ tf.flush()
+ response = content_file_api_client.create(relative_path=name, file=tf.name)
+ result = monitor_task(response.task)
+ content_units[name] = content_file_api_client.read(result.created_resources[0])
+ response1 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["A", "B", "C", "D", "E"]
+ ]
+ },
+ )
+
+ response2 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["B", "C", "D", "E"]
+ ],
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["F", "G", "H", "I"]
+ ],
+ },
+ )
+
+ response3 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["G", "I"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["C", "D"]
+ ],
+ },
+ )
+
+ response4 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["D", "H"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["E", "I"]
+ ],
+ },
+ )
+ version1 = file_repo_version_api_client.read(monitor_task(response1.task).created_resources[0])
+ version2 = file_repo_version_api_client.read(monitor_task(response2.task).created_resources[0])
+ version3 = file_repo_version_api_client.read(monitor_task(response3.task).created_resources[0])
+ version4 = file_repo_version_api_client.read(monitor_task(response4.task).created_resources[0])
+
+ # Check version state before deletion
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version2.content_summary.added["file.file"]["count"] == 4
+ assert version2.content_summary.removed["file.file"]["count"] == 4
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content2 = content_file_api_client.list(repository_version=version2.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content2.results)) == {"A", "F", "G", "H", "I"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+ monitor_task(file_repo_version_api_client.delete(version2.pulp_href).task)
+
+ # Check version state after deletion (Version 2 is gone...)
+ version1 = file_repo_version_api_client.read(version1.pulp_href)
+ version3 = file_repo_version_api_client.read(version3.pulp_href)
+ version4 = file_repo_version_api_client.read(version4.pulp_href)
+
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+
class ContentImmutableRepoVersionTestCase(unittest.TestCase):
"""Test whether the content present in a repo version is immutable.
| Deleting repository versions can loose track of later content deletion
**Version**
pulpcore 3.18
**Describe the bug**
When deleting a repository version that deletes a content that is added back in the subsequent version, but deleted again in a later version that deletion in the later version is lost.
**To Reproduce**
Steps to reproduce the behavior:
```bash
#!/bin/bash
set -eu
pulp file repository destroy --name test_delete_versions || true
pulp file repository create --name test_delete_versions
for NAME in "aaaa" "bbbb" "cccc" "dddd" "eeee" "ffff" "gggg" "hhhh" "jjjj"
do
echo "$NAME" > "$NAME"
pulp file content upload --relative-path "$NAME" --file "$NAME" || true
declare $NAME='{"sha256": "'"$(sha256sum --binary $NAME | cut -d" " -f1)"'", "relative_path": "'"$NAME"'"}'
done
pulp file repository content modify --repository test_delete_versions --add-content '['"$aaaa"', '"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']' --add-content '['"$ffff"', '"$gggg"', '"$hhhh"', '"$jjjj"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$gggg"', '"$jjjj"']' --add-content '['"$cccc"', '"$dddd"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$dddd"', '"$hhhh"']' --add-content '['"$eeee"', '"$jjjj"']'
pulp file repository version list --repository test_delete_versions
# pulp file repository content list --repository test_delete_versions
pulp file repository version destroy --repository test_delete_versions --version 2
pulp file repository version list --repository test_delete_versions
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 1 | jq -r '.[].relative_path' | sort)" = $'aaaa\nbbbb\ncccc\ndddd\neeee' ]
then
echo Version 1 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 3 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\ndddd\nffff\nhhhh' ]
then
echo Version 3 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 4 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\neeee\nffff\njjjj' ]
then
echo Version 4 is wrong.
fi
```
**Expected behavior**
Content in the respository versions should not change.
**Additional context**
| 2022-03-03T09:42:55 |
|
pulp/pulpcore | 2,298 | pulp__pulpcore-2298 | [
"2267"
] | 0c900232a15e71a0d0703c3866966f3c9c130d1f | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -4,6 +4,7 @@
from contextlib import suppress
from gettext import gettext as _
from os import path
+from collections import defaultdict
import logging
import django
@@ -870,9 +871,11 @@ def _squash(self, repo_relations, next_version):
# delete any relationships added in the version being deleted and removed in the next one.
repo_relations.filter(version_added=self, version_removed=next_version).delete()
- # If the same content is deleted in version, but added back in next_version
- # set version_removed field in relation to None, and remove relation adding the content
- # in next_version
+ # If the same content is deleted in version, but added back in next_version then:
+ # - set version_removed field in relation to version_removed of the relation adding
+ # the content in next version because the content can be removed again after the
+ # next_version
+ # - and remove relation adding the content in next_version
content_added = repo_relations.filter(version_added=next_version).values_list("content_id")
# use list() to force the evaluation of the queryset, otherwise queryset is affected
@@ -883,13 +886,26 @@ def _squash(self, repo_relations, next_version):
)
)
- repo_relations.filter(
- version_removed=self, content_id__in=content_removed_and_readded
- ).update(version_removed=None)
-
- repo_relations.filter(
+ repo_contents_readded_in_next_version = repo_relations.filter(
version_added=next_version, content_id__in=content_removed_and_readded
- ).delete()
+ )
+
+ # Since the readded contents can be removed again by any subsequent version after the
+ # next version. Get the mapping of readded contents and their versions removed to use
+ # later. The version removed id will be None if a content is not removed.
+ version_removed_id_content_id_map = defaultdict(list)
+ for readded_repo_content in repo_contents_readded_in_next_version.iterator():
+ version_removed_id_content_id_map[readded_repo_content.version_removed_id].append(
+ readded_repo_content.content_id
+ )
+
+ repo_contents_readded_in_next_version.delete()
+
+ # Update the version removed of the readded contents
+ for version_removed_id, content_ids in version_removed_id_content_id_map.items():
+ repo_relations.filter(version_removed=self, content_id__in=content_ids).update(
+ version_removed_id=version_removed_id
+ )
# "squash" by moving other additions and removals forward to the next version
repo_relations.filter(version_added=self).update(version_added=next_version)
| diff --git a/pulpcore/tests/conftest.py b/pulpcore/tests/conftest.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/conftest.py
@@ -0,0 +1 @@
+from .conftest_pulp_file import * # noqa
diff --git a/pulpcore/tests/conftest_pulp_file.py b/pulpcore/tests/conftest_pulp_file.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/conftest_pulp_file.py
@@ -0,0 +1,141 @@
+import logging
+import uuid
+
+from pathlib import Path
+
+import pytest
+
+from pulpcore.client.pulp_file import (
+ ContentFilesApi,
+ RepositoriesFileApi,
+ RepositoriesFileVersionsApi,
+ RemotesFileApi,
+)
+from pulp_smash.pulp3.utils import gen_repo
+
+from pulpcore.tests.functional.api.using_plugin.utils import (
+ gen_file_client,
+)
+
+
+_logger = logging.getLogger(__name__)
+
+
[email protected](scope="session")
+def file_client():
+ return gen_file_client()
+
+
[email protected](scope="session")
+def content_file_api_client(file_client):
+ return ContentFilesApi(file_client)
+
+
[email protected](scope="session")
+def file_repo_api_client(file_client):
+ return RepositoriesFileApi(file_client)
+
+
[email protected](scope="session")
+def file_repo_version_api_client(file_client):
+ return RepositoriesFileVersionsApi(file_client)
+
+
[email protected]
+def file_repo(file_repo_api_client, gen_object_with_cleanup):
+ return gen_object_with_cleanup(file_repo_api_client, gen_repo())
+
+
[email protected](scope="session")
+def file_remote_api_client(file_client):
+ return RemotesFileApi(file_client)
+
+
[email protected](scope="session")
+def file_fixtures_root():
+ return Path(__file__).parent / "fixtures"
+
+
[email protected]
+def file_fixture_server_ssl_client_cert_req(
+ ssl_ctx_req_client_auth, file_fixtures_root, gen_fixture_server
+):
+ yield gen_fixture_server(file_fixtures_root, ssl_ctx_req_client_auth)
+
+
[email protected]
+def file_fixture_server_ssl(ssl_ctx, file_fixtures_root, gen_fixture_server):
+ yield gen_fixture_server(file_fixtures_root, ssl_ctx)
+
+
[email protected]
+def file_fixture_server(file_fixtures_root, gen_fixture_server):
+ yield gen_fixture_server(file_fixtures_root, None)
+
+
[email protected]
+def file_fixture_gen_remote(file_fixture_server, file_remote_api_client, gen_object_with_cleanup):
+ def _file_fixture_gen_remote(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update({"url": str(url), "policy": policy, "name": str(uuid.uuid4())})
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote
+
+
[email protected]
+def file_fixture_gen_remote_ssl(
+ file_fixture_server_ssl,
+ file_remote_api_client,
+ tls_certificate_authority_cert,
+ gen_object_with_cleanup,
+):
+ def _file_fixture_gen_remote_ssl(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server_ssl.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update(
+ {
+ "url": str(url),
+ "policy": policy,
+ "name": str(uuid.uuid4()),
+ "ca_cert": tls_certificate_authority_cert,
+ }
+ )
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote_ssl
+
+
[email protected]
+def file_fixture_gen_remote_client_cert_req(
+ file_fixture_server_ssl_client_cert_req,
+ file_remote_api_client,
+ tls_certificate_authority_cert,
+ client_tls_certificate_cert_pem,
+ client_tls_certificate_key_pem,
+ gen_object_with_cleanup,
+):
+ def _file_fixture_gen_remote_client_cert_req(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server_ssl_client_cert_req.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update(
+ {
+ "url": str(url),
+ "policy": policy,
+ "name": str(uuid.uuid4()),
+ "ca_cert": tls_certificate_authority_cert,
+ "client_cert": client_tls_certificate_cert_pem,
+ "client_key": client_tls_certificate_key_pem,
+ }
+ )
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote_client_cert_req
+
+
[email protected]
+def file_fixture_gen_file_repo(file_repo_api_client, gen_object_with_cleanup):
+ """A factory to generate a File Repository with auto-deletion after the test run."""
+
+ def _file_fixture_gen_file_repo(**kwargs):
+ return gen_object_with_cleanup(file_repo_api_client, kwargs)
+
+ yield _file_fixture_gen_file_repo
diff --git a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
@@ -1,8 +1,11 @@
"""Tests related to repository versions."""
import unittest
+import pytest
from random import choice, randint, sample
from time import sleep
from urllib.parse import urlsplit
+from tempfile import NamedTemporaryFile
+from hashlib import sha256
from pulp_smash import api, config, utils
from pulp_smash.exceptions import TaskReportError
@@ -396,6 +399,134 @@ def test_delete_publication(self):
self.client.get(publication["pulp_href"])
[email protected]
+def test_squash_repo_version(
+ file_repo_api_client, file_repo_version_api_client, content_file_api_client, file_repo
+):
+ """Test that the deletion of a repository version properly squashes the content.
+
+ - Setup versions like:
+ Version 0: <empty>
+ add: ABCDE
+ Version 1: ABCDE
+ delete: BCDE; add: FGHI
+ Version 2: AFGHI -- to be deleted
+ delete: GI; add: CD
+ Version 3: ACDFH -- to be squashed into
+ delete: DH; add: EI
+ Version 4: ACEFI
+ - Delete version 2.
+ - Check the content of all remaining versions.
+ """
+ content_units = {}
+ for name in ["A", "B", "C", "D", "E", "F", "G", "H", "I"]:
+ try:
+ content_units[name] = content_file_api_client.list(
+ relative_path=name, sha256=sha256(name.encode()).hexdigest()
+ ).results[0]
+ except IndexError:
+ with NamedTemporaryFile() as tf:
+ tf.write(name.encode())
+ tf.flush()
+ response = content_file_api_client.create(relative_path=name, file=tf.name)
+ result = monitor_task(response.task)
+ content_units[name] = content_file_api_client.read(result.created_resources[0])
+ response1 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["A", "B", "C", "D", "E"]
+ ]
+ },
+ )
+
+ response2 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["B", "C", "D", "E"]
+ ],
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["F", "G", "H", "I"]
+ ],
+ },
+ )
+
+ response3 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["G", "I"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["C", "D"]
+ ],
+ },
+ )
+
+ response4 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["D", "H"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["E", "I"]
+ ],
+ },
+ )
+ version1 = file_repo_version_api_client.read(monitor_task(response1.task).created_resources[0])
+ version2 = file_repo_version_api_client.read(monitor_task(response2.task).created_resources[0])
+ version3 = file_repo_version_api_client.read(monitor_task(response3.task).created_resources[0])
+ version4 = file_repo_version_api_client.read(monitor_task(response4.task).created_resources[0])
+
+ # Check version state before deletion
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version2.content_summary.added["file.file"]["count"] == 4
+ assert version2.content_summary.removed["file.file"]["count"] == 4
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content2 = content_file_api_client.list(repository_version=version2.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content2.results)) == {"A", "F", "G", "H", "I"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+ monitor_task(file_repo_version_api_client.delete(version2.pulp_href).task)
+
+ # Check version state after deletion (Version 2 is gone...)
+ version1 = file_repo_version_api_client.read(version1.pulp_href)
+ version3 = file_repo_version_api_client.read(version3.pulp_href)
+ version4 = file_repo_version_api_client.read(version4.pulp_href)
+
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+
class ContentImmutableRepoVersionTestCase(unittest.TestCase):
"""Test whether the content present in a repo version is immutable.
| Deleting repository versions can loose track of later content deletion
**Version**
pulpcore 3.18
**Describe the bug**
When deleting a repository version that deletes a content that is added back in the subsequent version, but deleted again in a later version that deletion in the later version is lost.
**To Reproduce**
Steps to reproduce the behavior:
```bash
#!/bin/bash
set -eu
pulp file repository destroy --name test_delete_versions || true
pulp file repository create --name test_delete_versions
for NAME in "aaaa" "bbbb" "cccc" "dddd" "eeee" "ffff" "gggg" "hhhh" "jjjj"
do
echo "$NAME" > "$NAME"
pulp file content upload --relative-path "$NAME" --file "$NAME" || true
declare $NAME='{"sha256": "'"$(sha256sum --binary $NAME | cut -d" " -f1)"'", "relative_path": "'"$NAME"'"}'
done
pulp file repository content modify --repository test_delete_versions --add-content '['"$aaaa"', '"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']' --add-content '['"$ffff"', '"$gggg"', '"$hhhh"', '"$jjjj"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$gggg"', '"$jjjj"']' --add-content '['"$cccc"', '"$dddd"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$dddd"', '"$hhhh"']' --add-content '['"$eeee"', '"$jjjj"']'
pulp file repository version list --repository test_delete_versions
# pulp file repository content list --repository test_delete_versions
pulp file repository version destroy --repository test_delete_versions --version 2
pulp file repository version list --repository test_delete_versions
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 1 | jq -r '.[].relative_path' | sort)" = $'aaaa\nbbbb\ncccc\ndddd\neeee' ]
then
echo Version 1 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 3 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\ndddd\nffff\nhhhh' ]
then
echo Version 3 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 4 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\neeee\nffff\njjjj' ]
then
echo Version 4 is wrong.
fi
```
**Expected behavior**
Content in the respository versions should not change.
**Additional context**
| 2022-03-03T14:57:57 |
|
pulp/pulpcore | 2,302 | pulp__pulpcore-2302 | [
"2102"
] | f6abe778885c271f318137b5b31a8c0d56445e3b | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -54,14 +54,37 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
-def _import_file(fpath, resource_class, do_raise=True):
+def _import_file(fpath, resource_class, retry=False):
try:
log.info(_("Importing file {}.").format(fpath))
with open(fpath, "r") as json_file:
data = Dataset().load(json_file, format="json")
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- return resource.import_data(data, raise_errors=do_raise)
+ if retry:
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll do one
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ log.info(
+ _("...{} import-errors encountered importing {}, retrying").format(
+ a_result.totals["error"], fpath
+ )
+ )
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(_("FATAL import-failure importing {}").format(fpath))
+ raise
+ else:
+ a_result = resource.import_data(data, raise_errors=True)
+ return a_result
except AttributeError:
log.error(_("FAILURE importing file {}!").format(fpath))
raise
@@ -157,36 +180,14 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). We will try an import
- # that will simply record errors as they happen (rather than failing with an exception)
- # first. If errors happen, we'll do one retry before we give up on this repo-version's
- # import.
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {} from {}, retrying").format(
- a_result.totals["error"], filename, rv_name
- )
- )
- # Second attempt, we allow to raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = _import_file(os.path.join(rv_path, filename), res_class)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(
- _("FATAL import-failure importing {} from {}").format(filename, rv_name)
- )
- raise
-
+ a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
resulting_content_ids.extend(
row.object_id for row in a_result.rows if row.import_type in ("new", "update")
)
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource)
+ _import_file(ca_path, ContentArtifactResource, retry=True)
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
| Occasional UQ error core_repositoryversion_repository_id_number_3c54ce50_uniq on DistTree PulpImport
Rare failure, in this occasion invoked during a pulp-import testcase:
`pulp_rpm/tests/functional/api/test_pulpimport.py::DistributionTreePulpImportTestCase::test_import FAILED [ 81%]`
```
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task c455492e-bc13-4517-b86a-6069bca53ed7
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task 023517b6-401d-4f7e-be44-1d5814d0fe0c
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Task c455492e-bc13-4517-b86a-6069bca53ed7 failed (duplicate key value violates unique constraint "core_repositoryversion_repository_id_number_3c54ce50_uniq"
DETAIL: Key (repository_id, number)=(e84ff294-5d1b-4bfe-b5fb-e98d0c328537, 2) already exists.
)
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/local/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py", line 362, in _perform_task
result = func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/tasks/importer.py", line 206, in import_repository_version
with repo.new_version() as new_version:
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/repository.py", line 126, in new_version
version.save()
File "/usr/lib64/python3.8/contextlib.py", line 75, in inner
return func(*args, **kwds)
File "/usr/local/lib/python3.8/site-packages/django_lifecycle/mixins.py", line 134, in save
save(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 739, in save
self.save_base(using=using, force_insert=force_insert,
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 776, in save_base
updated = self._save_table(
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 881, in _save_table
results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 919, in _do_insert
return manager._insert(
File "/usr/local/lib/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
| 2022-03-04T15:45:23 |
||
pulp/pulpcore | 2,303 | pulp__pulpcore-2303 | [
"2102"
] | c0f04d8f991e45ccc6a5de13fcaf2564016ce4c7 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -54,14 +54,37 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
-def _import_file(fpath, resource_class, do_raise=True):
+def _import_file(fpath, resource_class, retry=False):
try:
log.info(_("Importing file {}.").format(fpath))
with open(fpath, "r") as json_file:
data = Dataset().load(json_file, format="json")
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- return resource.import_data(data, raise_errors=do_raise)
+ if retry:
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll do one
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ log.info(
+ _("...{} import-errors encountered importing {}, retrying").format(
+ a_result.totals["error"], fpath
+ )
+ )
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(_("FATAL import-failure importing {}").format(fpath))
+ raise
+ else:
+ a_result = resource.import_data(data, raise_errors=True)
+ return a_result
except AttributeError:
log.error(_("FAILURE importing file {}!").format(fpath))
raise
@@ -157,36 +180,14 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). We will try an import
- # that will simply record errors as they happen (rather than failing with an exception)
- # first. If errors happen, we'll do one retry before we give up on this repo-version's
- # import.
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {} from {}, retrying").format(
- a_result.totals["error"], filename, rv_name
- )
- )
- # Second attempt, we allow to raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = _import_file(os.path.join(rv_path, filename), res_class)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(
- _("FATAL import-failure importing {} from {}").format(filename, rv_name)
- )
- raise
-
+ a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
resulting_content_ids.extend(
row.object_id for row in a_result.rows if row.import_type in ("new", "update")
)
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource)
+ _import_file(ca_path, ContentArtifactResource, retry=True)
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
| Occasional UQ error core_repositoryversion_repository_id_number_3c54ce50_uniq on DistTree PulpImport
Rare failure, in this occasion invoked during a pulp-import testcase:
`pulp_rpm/tests/functional/api/test_pulpimport.py::DistributionTreePulpImportTestCase::test_import FAILED [ 81%]`
```
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task c455492e-bc13-4517-b86a-6069bca53ed7
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task 023517b6-401d-4f7e-be44-1d5814d0fe0c
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Task c455492e-bc13-4517-b86a-6069bca53ed7 failed (duplicate key value violates unique constraint "core_repositoryversion_repository_id_number_3c54ce50_uniq"
DETAIL: Key (repository_id, number)=(e84ff294-5d1b-4bfe-b5fb-e98d0c328537, 2) already exists.
)
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/local/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py", line 362, in _perform_task
result = func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/tasks/importer.py", line 206, in import_repository_version
with repo.new_version() as new_version:
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/repository.py", line 126, in new_version
version.save()
File "/usr/lib64/python3.8/contextlib.py", line 75, in inner
return func(*args, **kwds)
File "/usr/local/lib/python3.8/site-packages/django_lifecycle/mixins.py", line 134, in save
save(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 739, in save
self.save_base(using=using, force_insert=force_insert,
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 776, in save_base
updated = self._save_table(
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 881, in _save_table
results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 919, in _do_insert
return manager._insert(
File "/usr/local/lib/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
| 2022-03-04T15:45:24 |
||
pulp/pulpcore | 2,304 | pulp__pulpcore-2304 | [
"2102"
] | 603a60d996c12f1f22bdf924ec5aff8d72306654 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -54,14 +54,37 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
-def _import_file(fpath, resource_class, do_raise=True):
+def _import_file(fpath, resource_class, retry=False):
try:
log.info(_("Importing file {}.").format(fpath))
with open(fpath, "r") as json_file:
data = Dataset().load(json_file, format="json")
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- return resource.import_data(data, raise_errors=do_raise)
+ if retry:
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll do one
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ log.info(
+ _("...{} import-errors encountered importing {}, retrying").format(
+ a_result.totals["error"], fpath
+ )
+ )
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(_("FATAL import-failure importing {}").format(fpath))
+ raise
+ else:
+ a_result = resource.import_data(data, raise_errors=True)
+ return a_result
except AttributeError:
log.error(_("FAILURE importing file {}!").format(fpath))
raise
@@ -157,36 +180,14 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). We will try an import
- # that will simply record errors as they happen (rather than failing with an exception)
- # first. If errors happen, we'll do one retry before we give up on this repo-version's
- # import.
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {} from {}, retrying").format(
- a_result.totals["error"], filename, rv_name
- )
- )
- # Second attempt, we allow to raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = _import_file(os.path.join(rv_path, filename), res_class)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(
- _("FATAL import-failure importing {} from {}").format(filename, rv_name)
- )
- raise
-
+ a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
resulting_content_ids.extend(
row.object_id for row in a_result.rows if row.import_type in ("new", "update")
)
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource)
+ _import_file(ca_path, ContentArtifactResource, retry=True)
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
| Occasional UQ error core_repositoryversion_repository_id_number_3c54ce50_uniq on DistTree PulpImport
Rare failure, in this occasion invoked during a pulp-import testcase:
`pulp_rpm/tests/functional/api/test_pulpimport.py::DistributionTreePulpImportTestCase::test_import FAILED [ 81%]`
```
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task c455492e-bc13-4517-b86a-6069bca53ed7
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task 023517b6-401d-4f7e-be44-1d5814d0fe0c
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Task c455492e-bc13-4517-b86a-6069bca53ed7 failed (duplicate key value violates unique constraint "core_repositoryversion_repository_id_number_3c54ce50_uniq"
DETAIL: Key (repository_id, number)=(e84ff294-5d1b-4bfe-b5fb-e98d0c328537, 2) already exists.
)
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/local/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py", line 362, in _perform_task
result = func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/tasks/importer.py", line 206, in import_repository_version
with repo.new_version() as new_version:
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/repository.py", line 126, in new_version
version.save()
File "/usr/lib64/python3.8/contextlib.py", line 75, in inner
return func(*args, **kwds)
File "/usr/local/lib/python3.8/site-packages/django_lifecycle/mixins.py", line 134, in save
save(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 739, in save
self.save_base(using=using, force_insert=force_insert,
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 776, in save_base
updated = self._save_table(
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 881, in _save_table
results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 919, in _do_insert
return manager._insert(
File "/usr/local/lib/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
| 2022-03-04T15:45:24 |
||
pulp/pulpcore | 2,305 | pulp__pulpcore-2305 | [
"2102"
] | 69be1d6576064a6909bbfdac9d97c4433a8d4205 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -55,14 +55,37 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
-def _import_file(fpath, resource_class, do_raise=True):
+def _import_file(fpath, resource_class, retry=False):
try:
log.info(_("Importing file {}.").format(fpath))
with open(fpath, "r") as json_file:
data = Dataset().load(json_file.read(), format="json")
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- return resource.import_data(data, raise_errors=do_raise)
+ if retry:
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll do one
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ log.info(
+ _("...{} import-errors encountered importing {}, retrying").format(
+ a_result.totals["error"], fpath
+ )
+ )
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(_("FATAL import-failure importing {}").format(fpath))
+ raise
+ else:
+ a_result = resource.import_data(data, raise_errors=True)
+ return a_result
except AttributeError:
log.error(_("FAILURE importing file {}!").format(fpath))
raise
@@ -158,36 +181,14 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). We will try an import
- # that will simply record errors as they happen (rather than failing with an exception)
- # first. If errors happen, we'll do one retry before we give up on this repo-version's
- # import.
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {} from {}, retrying").format(
- a_result.totals["error"], filename, rv_name
- )
- )
- # Second attempt, we allow to raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = _import_file(os.path.join(rv_path, filename), res_class)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(
- _("FATAL import-failure importing {} from {}").format(filename, rv_name)
- )
- raise
-
+ a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
resulting_content_ids.extend(
row.object_id for row in a_result.rows if row.import_type in ("new", "update")
)
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource)
+ _import_file(ca_path, ContentArtifactResource, retry=True)
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
| Occasional UQ error core_repositoryversion_repository_id_number_3c54ce50_uniq on DistTree PulpImport
Rare failure, in this occasion invoked during a pulp-import testcase:
`pulp_rpm/tests/functional/api/test_pulpimport.py::DistributionTreePulpImportTestCase::test_import FAILED [ 81%]`
```
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task c455492e-bc13-4517-b86a-6069bca53ed7
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task 023517b6-401d-4f7e-be44-1d5814d0fe0c
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Task c455492e-bc13-4517-b86a-6069bca53ed7 failed (duplicate key value violates unique constraint "core_repositoryversion_repository_id_number_3c54ce50_uniq"
DETAIL: Key (repository_id, number)=(e84ff294-5d1b-4bfe-b5fb-e98d0c328537, 2) already exists.
)
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/local/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py", line 362, in _perform_task
result = func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/tasks/importer.py", line 206, in import_repository_version
with repo.new_version() as new_version:
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/repository.py", line 126, in new_version
version.save()
File "/usr/lib64/python3.8/contextlib.py", line 75, in inner
return func(*args, **kwds)
File "/usr/local/lib/python3.8/site-packages/django_lifecycle/mixins.py", line 134, in save
save(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 739, in save
self.save_base(using=using, force_insert=force_insert,
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 776, in save_base
updated = self._save_table(
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 881, in _save_table
results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 919, in _do_insert
return manager._insert(
File "/usr/local/lib/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
| 2022-03-04T15:45:28 |
||
pulp/pulpcore | 2,314 | pulp__pulpcore-2314 | [
"2101"
] | e79e913d380d554de681bed609896dd62eb9f09f | diff --git a/pulpcore/app/migrations/0064_add_new_style_task_columns.py b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
--- a/pulpcore/app/migrations/0064_add_new_style_task_columns.py
+++ b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
@@ -4,16 +4,34 @@
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
+TASK_BATCH_SIZE = 1000
+
def copy_reserved_resources_record(apps, schema_editor):
Task = apps.get_model('core', 'Task')
- for task in Task.objects.iterator():
+
+ # Update _reserved_resource_record for all tasks, 1000 tasks at a time.
+ # When we hit 1K tasks, go to the db for the batch.
+ # Make sure to update the final batch!
+ tasks = []
+ for task in Task.objects.iterator(chunk_size=TASK_BATCH_SIZE):
task._reserved_resources_record = list(task.reserved_resources_record.values_list('resource', flat=True))
- task.save()
+ tasks.append(task)
+ if len(tasks) == TASK_BATCH_SIZE:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+ tasks.clear()
+
+ # Update last set of tasks
+ if len(tasks) > 0:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+
+def purge_reservedresources(apps, schema_editor):
+ TaskReservedResource = apps.get_model('core', 'TaskReservedResource')
+ TaskReservedResource.objects.all().delete()
-def noop(apps, schema_editor):
- pass
+ ReservedResource = apps.get_model('core', 'ReservedResource')
+ ReservedResource.objects.all().delete()
class Migration(migrations.Migration):
@@ -23,6 +41,12 @@ class Migration(migrations.Migration):
]
operations = [
+ # Purge any ReservedResource entries - if there are any, they're orphans
+ migrations.RunPython(
+ code=purge_reservedresources,
+ reverse_code=migrations.RunPython.noop,
+ ),
+ # Update entities for the new task-system
migrations.AddField(
model_name='task',
name='args',
@@ -59,7 +83,7 @@ class Migration(migrations.Migration):
),
migrations.RunPython(
code=copy_reserved_resources_record,
- reverse_code=noop,
+ reverse_code=migrations.RunPython.noop,
),
migrations.RemoveField(
model_name='taskreservedresourcerecord',
@@ -80,4 +104,5 @@ class Migration(migrations.Migration):
old_name='_reserved_resources_record',
new_name='reserved_resources_record',
),
+
]
| Clean up TaskReservedResources/task-table at migration to new-tasking-system
See https://bugzilla.redhat.com/show_bug.cgi?id=2031154 for details.
Migration that needs to be updated to purge taskreservedresource entries: 0064_add_new_style_task_columns.py
This wants to be cherrypicked into 3.14/15/16 (after which the offending table no longer exists)
| Add core_task truncation - makes migration faster, may cascade-delete core_taskreservedresource as well? | 2022-03-10T07:07:27 |
|
pulp/pulpcore | 2,315 | pulp__pulpcore-2315 | [
"2101"
] | 77c40e38a2f70001b264b2b1f0ab7d1818b9391d | diff --git a/pulpcore/app/migrations/0064_add_new_style_task_columns.py b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
--- a/pulpcore/app/migrations/0064_add_new_style_task_columns.py
+++ b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
@@ -4,16 +4,34 @@
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
+TASK_BATCH_SIZE = 1000
+
def copy_reserved_resources_record(apps, schema_editor):
Task = apps.get_model('core', 'Task')
- for task in Task.objects.iterator():
+
+ # Update _reserved_resource_record for all tasks, 1000 tasks at a time.
+ # When we hit 1K tasks, go to the db for the batch.
+ # Make sure to update the final batch!
+ tasks = []
+ for task in Task.objects.iterator(chunk_size=TASK_BATCH_SIZE):
task._reserved_resources_record = list(task.reserved_resources_record.values_list('resource', flat=True))
- task.save()
+ tasks.append(task)
+ if len(tasks) == TASK_BATCH_SIZE:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+ tasks.clear()
+
+ # Update last set of tasks
+ if len(tasks) > 0:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+
+def purge_reservedresources(apps, schema_editor):
+ TaskReservedResource = apps.get_model('core', 'TaskReservedResource')
+ TaskReservedResource.objects.all().delete()
-def noop(apps, schema_editor):
- pass
+ ReservedResource = apps.get_model('core', 'ReservedResource')
+ ReservedResource.objects.all().delete()
class Migration(migrations.Migration):
@@ -23,6 +41,12 @@ class Migration(migrations.Migration):
]
operations = [
+ # Purge any ReservedResource entries - if there are any, they're orphans
+ migrations.RunPython(
+ code=purge_reservedresources,
+ reverse_code=migrations.RunPython.noop,
+ ),
+ # Update entities for the new task-system
migrations.AddField(
model_name='task',
name='args',
@@ -59,7 +83,7 @@ class Migration(migrations.Migration):
),
migrations.RunPython(
code=copy_reserved_resources_record,
- reverse_code=noop,
+ reverse_code=migrations.RunPython.noop,
),
migrations.RemoveField(
model_name='taskreservedresourcerecord',
@@ -80,4 +104,5 @@ class Migration(migrations.Migration):
old_name='_reserved_resources_record',
new_name='reserved_resources_record',
),
+
]
| Clean up TaskReservedResources/task-table at migration to new-tasking-system
See https://bugzilla.redhat.com/show_bug.cgi?id=2031154 for details.
Migration that needs to be updated to purge taskreservedresource entries: 0064_add_new_style_task_columns.py
This wants to be cherrypicked into 3.14/15/16 (after which the offending table no longer exists)
| Add core_task truncation - makes migration faster, may cascade-delete core_taskreservedresource as well? | 2022-03-10T07:07:40 |
|
pulp/pulpcore | 2,316 | pulp__pulpcore-2316 | [
"2101"
] | c055f86dab4bbbd76a3a4cf120ae2bff73df9a22 | diff --git a/pulpcore/app/migrations/0064_add_new_style_task_columns.py b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
--- a/pulpcore/app/migrations/0064_add_new_style_task_columns.py
+++ b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
@@ -4,16 +4,34 @@
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
+TASK_BATCH_SIZE = 1000
+
def copy_reserved_resources_record(apps, schema_editor):
Task = apps.get_model('core', 'Task')
- for task in Task.objects.iterator():
+
+ # Update _reserved_resource_record for all tasks, 1000 tasks at a time.
+ # When we hit 1K tasks, go to the db for the batch.
+ # Make sure to update the final batch!
+ tasks = []
+ for task in Task.objects.iterator(chunk_size=TASK_BATCH_SIZE):
task._reserved_resources_record = list(task.reserved_resources_record.values_list('resource', flat=True))
- task.save()
+ tasks.append(task)
+ if len(tasks) == TASK_BATCH_SIZE:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+ tasks.clear()
+
+ # Update last set of tasks
+ if len(tasks) > 0:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+
+def purge_reservedresources(apps, schema_editor):
+ TaskReservedResource = apps.get_model('core', 'TaskReservedResource')
+ TaskReservedResource.objects.all().delete()
-def noop(apps, schema_editor):
- pass
+ ReservedResource = apps.get_model('core', 'ReservedResource')
+ ReservedResource.objects.all().delete()
class Migration(migrations.Migration):
@@ -23,6 +41,12 @@ class Migration(migrations.Migration):
]
operations = [
+ # Purge any ReservedResource entries - if there are any, they're orphans
+ migrations.RunPython(
+ code=purge_reservedresources,
+ reverse_code=migrations.RunPython.noop,
+ ),
+ # Update entities for the new task-system
migrations.AddField(
model_name='task',
name='args',
@@ -59,7 +83,7 @@ class Migration(migrations.Migration):
),
migrations.RunPython(
code=copy_reserved_resources_record,
- reverse_code=noop,
+ reverse_code=migrations.RunPython.noop,
),
migrations.RemoveField(
model_name='taskreservedresourcerecord',
@@ -80,4 +104,5 @@ class Migration(migrations.Migration):
old_name='_reserved_resources_record',
new_name='reserved_resources_record',
),
+
]
| Clean up TaskReservedResources/task-table at migration to new-tasking-system
See https://bugzilla.redhat.com/show_bug.cgi?id=2031154 for details.
Migration that needs to be updated to purge taskreservedresource entries: 0064_add_new_style_task_columns.py
This wants to be cherrypicked into 3.14/15/16 (after which the offending table no longer exists)
| Add core_task truncation - makes migration faster, may cascade-delete core_taskreservedresource as well? | 2022-03-10T07:07:56 |
|
pulp/pulpcore | 2,317 | pulp__pulpcore-2317 | [
"2101"
] | 48c7c4499ceea8dec9b7258ca73c75f332211d15 | diff --git a/pulpcore/app/migrations/0064_add_new_style_task_columns.py b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
--- a/pulpcore/app/migrations/0064_add_new_style_task_columns.py
+++ b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
@@ -4,16 +4,34 @@
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
+TASK_BATCH_SIZE = 1000
+
def copy_reserved_resources_record(apps, schema_editor):
Task = apps.get_model('core', 'Task')
- for task in Task.objects.iterator():
+
+ # Update _reserved_resource_record for all tasks, 1000 tasks at a time.
+ # When we hit 1K tasks, go to the db for the batch.
+ # Make sure to update the final batch!
+ tasks = []
+ for task in Task.objects.iterator(chunk_size=TASK_BATCH_SIZE):
task._reserved_resources_record = list(task.reserved_resources_record.values_list('resource', flat=True))
- task.save()
+ tasks.append(task)
+ if len(tasks) == TASK_BATCH_SIZE:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+ tasks.clear()
+
+ # Update last set of tasks
+ if len(tasks) > 0:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+
+def purge_reservedresources(apps, schema_editor):
+ TaskReservedResource = apps.get_model('core', 'TaskReservedResource')
+ TaskReservedResource.objects.all().delete()
-def noop(apps, schema_editor):
- pass
+ ReservedResource = apps.get_model('core', 'ReservedResource')
+ ReservedResource.objects.all().delete()
class Migration(migrations.Migration):
@@ -23,6 +41,12 @@ class Migration(migrations.Migration):
]
operations = [
+ # Purge any ReservedResource entries - if there are any, they're orphans
+ migrations.RunPython(
+ code=purge_reservedresources,
+ reverse_code=migrations.RunPython.noop,
+ ),
+ # Update entities for the new task-system
migrations.AddField(
model_name='task',
name='args',
@@ -59,7 +83,7 @@ class Migration(migrations.Migration):
),
migrations.RunPython(
code=copy_reserved_resources_record,
- reverse_code=noop,
+ reverse_code=migrations.RunPython.noop,
),
migrations.RemoveField(
model_name='taskreservedresourcerecord',
@@ -80,4 +104,5 @@ class Migration(migrations.Migration):
old_name='_reserved_resources_record',
new_name='reserved_resources_record',
),
+
]
| Clean up TaskReservedResources/task-table at migration to new-tasking-system
See https://bugzilla.redhat.com/show_bug.cgi?id=2031154 for details.
Migration that needs to be updated to purge taskreservedresource entries: 0064_add_new_style_task_columns.py
This wants to be cherrypicked into 3.14/15/16 (after which the offending table no longer exists)
| Add core_task truncation - makes migration faster, may cascade-delete core_taskreservedresource as well? | 2022-03-10T07:08:10 |
|
pulp/pulpcore | 2,318 | pulp__pulpcore-2318 | [
"2101"
] | 8a8ef9ab11a3e825099fdd27855557f1e806428e | diff --git a/pulpcore/app/migrations/0064_add_new_style_task_columns.py b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
--- a/pulpcore/app/migrations/0064_add_new_style_task_columns.py
+++ b/pulpcore/app/migrations/0064_add_new_style_task_columns.py
@@ -4,16 +4,34 @@
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
+TASK_BATCH_SIZE = 1000
+
def copy_reserved_resources_record(apps, schema_editor):
Task = apps.get_model('core', 'Task')
- for task in Task.objects.iterator():
+
+ # Update _reserved_resource_record for all tasks, 1000 tasks at a time.
+ # When we hit 1K tasks, go to the db for the batch.
+ # Make sure to update the final batch!
+ tasks = []
+ for task in Task.objects.iterator(chunk_size=TASK_BATCH_SIZE):
task._reserved_resources_record = list(task.reserved_resources_record.values_list('resource', flat=True))
- task.save()
+ tasks.append(task)
+ if len(tasks) == TASK_BATCH_SIZE:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+ tasks.clear()
+
+ # Update last set of tasks
+ if len(tasks) > 0:
+ Task.objects.bulk_update(tasks, ["_reserved_resources_record"])
+
+def purge_reservedresources(apps, schema_editor):
+ TaskReservedResource = apps.get_model('core', 'TaskReservedResource')
+ TaskReservedResource.objects.all().delete()
-def noop(apps, schema_editor):
- pass
+ ReservedResource = apps.get_model('core', 'ReservedResource')
+ ReservedResource.objects.all().delete()
class Migration(migrations.Migration):
@@ -23,6 +41,12 @@ class Migration(migrations.Migration):
]
operations = [
+ # Purge any ReservedResource entries - if there are any, they're orphans
+ migrations.RunPython(
+ code=purge_reservedresources,
+ reverse_code=migrations.RunPython.noop,
+ ),
+ # Update entities for the new task-system
migrations.AddField(
model_name='task',
name='args',
@@ -59,7 +83,7 @@ class Migration(migrations.Migration):
),
migrations.RunPython(
code=copy_reserved_resources_record,
- reverse_code=noop,
+ reverse_code=migrations.RunPython.noop,
),
migrations.RemoveField(
model_name='taskreservedresourcerecord',
@@ -80,4 +104,5 @@ class Migration(migrations.Migration):
old_name='_reserved_resources_record',
new_name='reserved_resources_record',
),
+
]
| Clean up TaskReservedResources/task-table at migration to new-tasking-system
See https://bugzilla.redhat.com/show_bug.cgi?id=2031154 for details.
Migration that needs to be updated to purge taskreservedresource entries: 0064_add_new_style_task_columns.py
This wants to be cherrypicked into 3.14/15/16 (after which the offending table no longer exists)
| Add core_task truncation - makes migration faster, may cascade-delete core_taskreservedresource as well? | 2022-03-10T07:08:32 |
|
pulp/pulpcore | 2,319 | pulp__pulpcore-2319 | [
"2267"
] | 73b33e833c28a5fa6b06ff0cd5bdc49d658f3e3b | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -4,6 +4,7 @@
from contextlib import suppress
from gettext import gettext as _
from os import path
+from collections import defaultdict
import logging
import django
@@ -853,9 +854,11 @@ def _squash(self, repo_relations, next_version):
# delete any relationships added in the version being deleted and removed in the next one.
repo_relations.filter(version_added=self, version_removed=next_version).delete()
- # If the same content is deleted in version, but added back in next_version
- # set version_removed field in relation to None, and remove relation adding the content
- # in next_version
+ # If the same content is deleted in version, but added back in next_version then:
+ # - set version_removed field in relation to version_removed of the relation adding
+ # the content in next version because the content can be removed again after the
+ # next_version
+ # - and remove relation adding the content in next_version
content_added = repo_relations.filter(version_added=next_version).values_list("content_id")
# use list() to force the evaluation of the queryset, otherwise queryset is affected
@@ -866,13 +869,26 @@ def _squash(self, repo_relations, next_version):
)
)
- repo_relations.filter(
- version_removed=self, content_id__in=content_removed_and_readded
- ).update(version_removed=None)
-
- repo_relations.filter(
+ repo_contents_readded_in_next_version = repo_relations.filter(
version_added=next_version, content_id__in=content_removed_and_readded
- ).delete()
+ )
+
+ # Since the readded contents can be removed again by any subsequent version after the
+ # next version. Get the mapping of readded contents and their versions removed to use
+ # later. The version removed id will be None if a content is not removed.
+ version_removed_id_content_id_map = defaultdict(list)
+ for readded_repo_content in repo_contents_readded_in_next_version.iterator():
+ version_removed_id_content_id_map[readded_repo_content.version_removed_id].append(
+ readded_repo_content.content_id
+ )
+
+ repo_contents_readded_in_next_version.delete()
+
+ # Update the version removed of the readded contents
+ for version_removed_id, content_ids in version_removed_id_content_id_map.items():
+ repo_relations.filter(version_removed=self, content_id__in=content_ids).update(
+ version_removed_id=version_removed_id
+ )
# "squash" by moving other additions and removals forward to the next version
repo_relations.filter(version_added=self).update(version_added=next_version)
| diff --git a/pulpcore/tests/conftest.py b/pulpcore/tests/conftest.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/conftest.py
@@ -0,0 +1 @@
+from .conftest_pulp_file import * # noqa
diff --git a/pulpcore/tests/conftest_pulp_file.py b/pulpcore/tests/conftest_pulp_file.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/conftest_pulp_file.py
@@ -0,0 +1,141 @@
+import logging
+import uuid
+
+from pathlib import Path
+
+import pytest
+
+from pulpcore.client.pulp_file import (
+ ContentFilesApi,
+ RepositoriesFileApi,
+ RepositoriesFileVersionsApi,
+ RemotesFileApi,
+)
+from pulp_smash.pulp3.utils import gen_repo
+
+from pulpcore.tests.functional.api.using_plugin.utils import (
+ gen_file_client,
+)
+
+
+_logger = logging.getLogger(__name__)
+
+
[email protected](scope="session")
+def file_client():
+ return gen_file_client()
+
+
[email protected](scope="session")
+def content_file_api_client(file_client):
+ return ContentFilesApi(file_client)
+
+
[email protected](scope="session")
+def file_repo_api_client(file_client):
+ return RepositoriesFileApi(file_client)
+
+
[email protected](scope="session")
+def file_repo_version_api_client(file_client):
+ return RepositoriesFileVersionsApi(file_client)
+
+
[email protected]
+def file_repo(file_repo_api_client, gen_object_with_cleanup):
+ return gen_object_with_cleanup(file_repo_api_client, gen_repo())
+
+
[email protected](scope="session")
+def file_remote_api_client(file_client):
+ return RemotesFileApi(file_client)
+
+
[email protected](scope="session")
+def file_fixtures_root():
+ return Path(__file__).parent / "fixtures"
+
+
[email protected]
+def file_fixture_server_ssl_client_cert_req(
+ ssl_ctx_req_client_auth, file_fixtures_root, gen_fixture_server
+):
+ yield gen_fixture_server(file_fixtures_root, ssl_ctx_req_client_auth)
+
+
[email protected]
+def file_fixture_server_ssl(ssl_ctx, file_fixtures_root, gen_fixture_server):
+ yield gen_fixture_server(file_fixtures_root, ssl_ctx)
+
+
[email protected]
+def file_fixture_server(file_fixtures_root, gen_fixture_server):
+ yield gen_fixture_server(file_fixtures_root, None)
+
+
[email protected]
+def file_fixture_gen_remote(file_fixture_server, file_remote_api_client, gen_object_with_cleanup):
+ def _file_fixture_gen_remote(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update({"url": str(url), "policy": policy, "name": str(uuid.uuid4())})
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote
+
+
[email protected]
+def file_fixture_gen_remote_ssl(
+ file_fixture_server_ssl,
+ file_remote_api_client,
+ tls_certificate_authority_cert,
+ gen_object_with_cleanup,
+):
+ def _file_fixture_gen_remote_ssl(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server_ssl.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update(
+ {
+ "url": str(url),
+ "policy": policy,
+ "name": str(uuid.uuid4()),
+ "ca_cert": tls_certificate_authority_cert,
+ }
+ )
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote_ssl
+
+
[email protected]
+def file_fixture_gen_remote_client_cert_req(
+ file_fixture_server_ssl_client_cert_req,
+ file_remote_api_client,
+ tls_certificate_authority_cert,
+ client_tls_certificate_cert_pem,
+ client_tls_certificate_key_pem,
+ gen_object_with_cleanup,
+):
+ def _file_fixture_gen_remote_client_cert_req(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server_ssl_client_cert_req.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update(
+ {
+ "url": str(url),
+ "policy": policy,
+ "name": str(uuid.uuid4()),
+ "ca_cert": tls_certificate_authority_cert,
+ "client_cert": client_tls_certificate_cert_pem,
+ "client_key": client_tls_certificate_key_pem,
+ }
+ )
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote_client_cert_req
+
+
[email protected]
+def file_fixture_gen_file_repo(file_repo_api_client, gen_object_with_cleanup):
+ """A factory to generate a File Repository with auto-deletion after the test run."""
+
+ def _file_fixture_gen_file_repo(**kwargs):
+ return gen_object_with_cleanup(file_repo_api_client, kwargs)
+
+ yield _file_fixture_gen_file_repo
diff --git a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
@@ -1,8 +1,11 @@
"""Tests related to repository versions."""
import unittest
+import pytest
from random import choice, randint, sample
from time import sleep
from urllib.parse import urlsplit
+from tempfile import NamedTemporaryFile
+from hashlib import sha256
from pulp_smash import api, config, utils
from pulp_smash.exceptions import TaskReportError
@@ -396,6 +399,134 @@ def test_delete_publication(self):
self.client.get(publication["pulp_href"])
[email protected]
+def test_squash_repo_version(
+ file_repo_api_client, file_repo_version_api_client, content_file_api_client, file_repo
+):
+ """Test that the deletion of a repository version properly squashes the content.
+
+ - Setup versions like:
+ Version 0: <empty>
+ add: ABCDE
+ Version 1: ABCDE
+ delete: BCDE; add: FGHI
+ Version 2: AFGHI -- to be deleted
+ delete: GI; add: CD
+ Version 3: ACDFH -- to be squashed into
+ delete: DH; add: EI
+ Version 4: ACEFI
+ - Delete version 2.
+ - Check the content of all remaining versions.
+ """
+ content_units = {}
+ for name in ["A", "B", "C", "D", "E", "F", "G", "H", "I"]:
+ try:
+ content_units[name] = content_file_api_client.list(
+ relative_path=name, sha256=sha256(name.encode()).hexdigest()
+ ).results[0]
+ except IndexError:
+ with NamedTemporaryFile() as tf:
+ tf.write(name.encode())
+ tf.flush()
+ response = content_file_api_client.create(relative_path=name, file=tf.name)
+ result = monitor_task(response.task)
+ content_units[name] = content_file_api_client.read(result.created_resources[0])
+ response1 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["A", "B", "C", "D", "E"]
+ ]
+ },
+ )
+
+ response2 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["B", "C", "D", "E"]
+ ],
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["F", "G", "H", "I"]
+ ],
+ },
+ )
+
+ response3 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["G", "I"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["C", "D"]
+ ],
+ },
+ )
+
+ response4 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["D", "H"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["E", "I"]
+ ],
+ },
+ )
+ version1 = file_repo_version_api_client.read(monitor_task(response1.task).created_resources[0])
+ version2 = file_repo_version_api_client.read(monitor_task(response2.task).created_resources[0])
+ version3 = file_repo_version_api_client.read(monitor_task(response3.task).created_resources[0])
+ version4 = file_repo_version_api_client.read(monitor_task(response4.task).created_resources[0])
+
+ # Check version state before deletion
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version2.content_summary.added["file.file"]["count"] == 4
+ assert version2.content_summary.removed["file.file"]["count"] == 4
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content2 = content_file_api_client.list(repository_version=version2.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content2.results)) == {"A", "F", "G", "H", "I"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+ monitor_task(file_repo_version_api_client.delete(version2.pulp_href).task)
+
+ # Check version state after deletion (Version 2 is gone...)
+ version1 = file_repo_version_api_client.read(version1.pulp_href)
+ version3 = file_repo_version_api_client.read(version3.pulp_href)
+ version4 = file_repo_version_api_client.read(version4.pulp_href)
+
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+
class ContentImmutableRepoVersionTestCase(unittest.TestCase):
"""Test whether the content present in a repo version is immutable.
| Deleting repository versions can loose track of later content deletion
**Version**
pulpcore 3.18
**Describe the bug**
When deleting a repository version that deletes a content that is added back in the subsequent version, but deleted again in a later version that deletion in the later version is lost.
**To Reproduce**
Steps to reproduce the behavior:
```bash
#!/bin/bash
set -eu
pulp file repository destroy --name test_delete_versions || true
pulp file repository create --name test_delete_versions
for NAME in "aaaa" "bbbb" "cccc" "dddd" "eeee" "ffff" "gggg" "hhhh" "jjjj"
do
echo "$NAME" > "$NAME"
pulp file content upload --relative-path "$NAME" --file "$NAME" || true
declare $NAME='{"sha256": "'"$(sha256sum --binary $NAME | cut -d" " -f1)"'", "relative_path": "'"$NAME"'"}'
done
pulp file repository content modify --repository test_delete_versions --add-content '['"$aaaa"', '"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']' --add-content '['"$ffff"', '"$gggg"', '"$hhhh"', '"$jjjj"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$gggg"', '"$jjjj"']' --add-content '['"$cccc"', '"$dddd"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$dddd"', '"$hhhh"']' --add-content '['"$eeee"', '"$jjjj"']'
pulp file repository version list --repository test_delete_versions
# pulp file repository content list --repository test_delete_versions
pulp file repository version destroy --repository test_delete_versions --version 2
pulp file repository version list --repository test_delete_versions
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 1 | jq -r '.[].relative_path' | sort)" = $'aaaa\nbbbb\ncccc\ndddd\neeee' ]
then
echo Version 1 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 3 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\ndddd\nffff\nhhhh' ]
then
echo Version 3 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 4 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\neeee\nffff\njjjj' ]
then
echo Version 4 is wrong.
fi
```
**Expected behavior**
Content in the respository versions should not change.
**Additional context**
| 2022-03-10T16:53:22 |
|
pulp/pulpcore | 2,321 | pulp__pulpcore-2321 | [
"2215"
] | 0b7c22ca99bebaa3af8506cc8a039f21ed95734b | diff --git a/pulpcore/app/tasks/purge.py b/pulpcore/app/tasks/purge.py
--- a/pulpcore/app/tasks/purge.py
+++ b/pulpcore/app/tasks/purge.py
@@ -5,6 +5,48 @@
Task,
)
from pulpcore.app.role_util import get_objects_for_user
+from pulpcore.constants import TASK_STATES
+
+# Delete 1K at a time - better to use less memory, and take a little longer, with a utility
+# function like this.
+DELETE_LIMIT = 1000
+# Key that delete() returns for Tasks
+TASK_KEY = "core.Task"
+
+
+def _details_reporting(current_reports, current_details, totals_pb):
+ """
+ Create and update progress-reports for each detail-key returned from a delete() call.
+
+ We don't know how many entities will be deleted via cascade-delete until we're all done.
+
+ The function has one special case: we know how many Tasks we're expecting to delete right
+ from the beginning. Therefore, we "assume" that the key `core.Task` has been pre-seeded
+ with a ProgressReport whose total is correct, in advance, and therefore don't update
+ total for that key.
+
+ Args:
+ current_reports (dict): key:ProgressReport to record into
+ Returns:
+ updated current_reports
+ """
+ entity_count = 0
+ for key, curr_detail in current_details.items():
+ entity_count += current_details[key]
+ if key in current_reports:
+ current_reports[key].increase_by(curr_detail)
+ else:
+ pb = ProgressReport(
+ message=_("Purged task-objects of type {}".format(key)),
+ code="purge.tasks.key.{}".format(key),
+ total=None,
+ done=curr_detail,
+ )
+ pb.save()
+ current_reports[key] = pb
+ # Update/save totals once
+ totals_pb.increase_by(entity_count)
+ return current_reports
def purge(finished_before, states):
@@ -26,25 +68,48 @@ def purge(finished_before, states):
"""
current_user = get_current_authenticated_user()
- qs = Task.objects.filter(finished_at__lt=finished_before, state__in=states)
- units_deleted, details = get_objects_for_user(current_user, "core.delete_task", qs=qs).delete()
-
+ # Tasks, prior to the specified date, in the specified state, owned by the current-user
+ tasks_qs = Task.objects.filter(finished_at__lt=finished_before, state__in=states)
+ candidate_qs = get_objects_for_user(current_user, "core.delete_task", qs=tasks_qs)
+ delete_qs = get_objects_for_user(current_user, "core.delete_task", qs=tasks_qs[:DELETE_LIMIT])
# Progress bar reporting total-units
- progress_bar = ProgressReport(
- message=_("Purged task-objects total"),
- total=units_deleted,
+ totals_pb = ProgressReport(
+ message=_("Purged task-related-objects total"),
+ total=None,
code="purge.tasks.total",
- done=units_deleted,
- state="completed",
+ done=0,
+ )
+ totals_pb.save()
+ # Dictionary to hold progress-reports by delete-details-key
+ details_reports = {}
+
+ # Figure out how many Tasks owned by the current user we're about to delete
+ expected_total = candidate_qs.count()
+ # Build and save a progress-report for that detail
+ pb = ProgressReport(
+ message=_("Purged task-objects of type {}".format(TASK_KEY)),
+ total=expected_total,
+ code="purge.tasks.key.{}".format(TASK_KEY),
+ done=0,
)
- progress_bar.save()
- # This loop reports back the specific entities deleted and the number removed
- for key in details:
- progress_bar = ProgressReport(
- message=_("Purged task-objects of type {}".format(key)),
- total=details[key],
- code="purge.tasks.key.{}".format(key),
- done=details[key],
- state="completed",
- )
- progress_bar.save()
+ pb.save()
+ details_reports[TASK_KEY] = pb
+
+ # Our delete-query is going to deal with "the first DELETE_LIMIT tasks that match our
+ # criteria", looping until we've deleted everything that fits our parameters
+ units_deleted, details = delete_qs.delete()
+ # Until our query returns "No tasks deleted", add results into totals and Do It Again
+ while units_deleted > 0:
+ _details_reporting(details_reports, details, totals_pb)
+ units_deleted, details = delete_qs.delete()
+
+ # Complete the progress-reports for the specific entities deleted
+ for key, pb in details_reports.items():
+ pb.total = pb.done
+ pb.state = TASK_STATES.COMPLETED
+ pb.save()
+
+ # Complete the totals-ProgressReport
+ totals_pb.total = totals_pb.done
+ totals_pb.state = TASK_STATES.COMPLETED
+ totals_pb.save()
| Task purging endpoint loads every task in memory
**Version**
`pulpcore-3.17.3`
`pulp_rpm-3.17.3`
`pulp_ansible-0.12.0`
`pulp_container-2.10.0`
`pulp_deb-2.17.0`
`pulp_file-1.10.1`
`pulp_python-3.6.0`
**Describe the bug**
We have a large number of finished pulp tasks (around 1.5kk). So we're trying to purge it with [`pulp/api/v3/tasks/purge`](https://docs.pulpproject.org/pulpcore/restapi.html#operation/tasks_purge) and quickly realized, that this task is trying to load whole finished queue (all 1.5kk finished tasks) into memory, which cause it killed after consuming 15GB of memory.
**To Reproduce**
Steps to reproduce the behavior:
1. Create large number of pulp-tasks (more than 1kk will be enough)
2. Run [`pulp/api/v3/tasks/purge`](https://docs.pulpproject.org/pulpcore/restapi.html#operation/tasks_purge) with `finished_before` set to future date or something.
3. Your server will run out-of-memory and task won't be completed.
**Expected behavior**
Task purging api-endpoint does not load the entire finished queue into memory
| Related: https://github.com/pulp/pulpcore/issues/5048 | 2022-03-10T18:53:35 |
|
pulp/pulpcore | 2,324 | pulp__pulpcore-2324 | [
"2267"
] | 8bab542d2c10deb64bc32d61e29b0f051c5d325f | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -4,6 +4,7 @@
from contextlib import suppress
from gettext import gettext as _
from os import path
+from collections import defaultdict
import logging
import django
@@ -870,9 +871,11 @@ def _squash(self, repo_relations, next_version):
# delete any relationships added in the version being deleted and removed in the next one.
repo_relations.filter(version_added=self, version_removed=next_version).delete()
- # If the same content is deleted in version, but added back in next_version
- # set version_removed field in relation to None, and remove relation adding the content
- # in next_version
+ # If the same content is deleted in version, but added back in next_version then:
+ # - set version_removed field in relation to version_removed of the relation adding
+ # the content in next version because the content can be removed again after the
+ # next_version
+ # - and remove relation adding the content in next_version
content_added = repo_relations.filter(version_added=next_version).values_list("content_id")
# use list() to force the evaluation of the queryset, otherwise queryset is affected
@@ -883,13 +886,26 @@ def _squash(self, repo_relations, next_version):
)
)
- repo_relations.filter(
- version_removed=self, content_id__in=content_removed_and_readded
- ).update(version_removed=None)
-
- repo_relations.filter(
+ repo_contents_readded_in_next_version = repo_relations.filter(
version_added=next_version, content_id__in=content_removed_and_readded
- ).delete()
+ )
+
+ # Since the readded contents can be removed again by any subsequent version after the
+ # next version. Get the mapping of readded contents and their versions removed to use
+ # later. The version removed id will be None if a content is not removed.
+ version_removed_id_content_id_map = defaultdict(list)
+ for readded_repo_content in repo_contents_readded_in_next_version.iterator():
+ version_removed_id_content_id_map[readded_repo_content.version_removed_id].append(
+ readded_repo_content.content_id
+ )
+
+ repo_contents_readded_in_next_version.delete()
+
+ # Update the version removed of the readded contents
+ for version_removed_id, content_ids in version_removed_id_content_id_map.items():
+ repo_relations.filter(version_removed=self, content_id__in=content_ids).update(
+ version_removed_id=version_removed_id
+ )
# "squash" by moving other additions and removals forward to the next version
repo_relations.filter(version_added=self).update(version_added=next_version)
| diff --git a/pulpcore/tests/conftest.py b/pulpcore/tests/conftest.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/conftest.py
@@ -0,0 +1 @@
+from .conftest_pulp_file import * # noqa
diff --git a/pulpcore/tests/conftest_pulp_file.py b/pulpcore/tests/conftest_pulp_file.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/conftest_pulp_file.py
@@ -0,0 +1,141 @@
+import logging
+import uuid
+
+from pathlib import Path
+
+import pytest
+
+from pulpcore.client.pulp_file import (
+ ContentFilesApi,
+ RepositoriesFileApi,
+ RepositoriesFileVersionsApi,
+ RemotesFileApi,
+)
+from pulp_smash.pulp3.utils import gen_repo
+
+from pulpcore.tests.functional.api.using_plugin.utils import (
+ gen_file_client,
+)
+
+
+_logger = logging.getLogger(__name__)
+
+
[email protected](scope="session")
+def file_client():
+ return gen_file_client()
+
+
[email protected](scope="session")
+def content_file_api_client(file_client):
+ return ContentFilesApi(file_client)
+
+
[email protected](scope="session")
+def file_repo_api_client(file_client):
+ return RepositoriesFileApi(file_client)
+
+
[email protected](scope="session")
+def file_repo_version_api_client(file_client):
+ return RepositoriesFileVersionsApi(file_client)
+
+
[email protected]
+def file_repo(file_repo_api_client, gen_object_with_cleanup):
+ return gen_object_with_cleanup(file_repo_api_client, gen_repo())
+
+
[email protected](scope="session")
+def file_remote_api_client(file_client):
+ return RemotesFileApi(file_client)
+
+
[email protected](scope="session")
+def file_fixtures_root():
+ return Path(__file__).parent / "fixtures"
+
+
[email protected]
+def file_fixture_server_ssl_client_cert_req(
+ ssl_ctx_req_client_auth, file_fixtures_root, gen_fixture_server
+):
+ yield gen_fixture_server(file_fixtures_root, ssl_ctx_req_client_auth)
+
+
[email protected]
+def file_fixture_server_ssl(ssl_ctx, file_fixtures_root, gen_fixture_server):
+ yield gen_fixture_server(file_fixtures_root, ssl_ctx)
+
+
[email protected]
+def file_fixture_server(file_fixtures_root, gen_fixture_server):
+ yield gen_fixture_server(file_fixtures_root, None)
+
+
[email protected]
+def file_fixture_gen_remote(file_fixture_server, file_remote_api_client, gen_object_with_cleanup):
+ def _file_fixture_gen_remote(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update({"url": str(url), "policy": policy, "name": str(uuid.uuid4())})
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote
+
+
[email protected]
+def file_fixture_gen_remote_ssl(
+ file_fixture_server_ssl,
+ file_remote_api_client,
+ tls_certificate_authority_cert,
+ gen_object_with_cleanup,
+):
+ def _file_fixture_gen_remote_ssl(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server_ssl.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update(
+ {
+ "url": str(url),
+ "policy": policy,
+ "name": str(uuid.uuid4()),
+ "ca_cert": tls_certificate_authority_cert,
+ }
+ )
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote_ssl
+
+
[email protected]
+def file_fixture_gen_remote_client_cert_req(
+ file_fixture_server_ssl_client_cert_req,
+ file_remote_api_client,
+ tls_certificate_authority_cert,
+ client_tls_certificate_cert_pem,
+ client_tls_certificate_key_pem,
+ gen_object_with_cleanup,
+):
+ def _file_fixture_gen_remote_client_cert_req(*, fixture_name, policy, **kwargs):
+ url = file_fixture_server_ssl_client_cert_req.make_url(f"/{fixture_name}/PULP_MANIFEST")
+ kwargs.update(
+ {
+ "url": str(url),
+ "policy": policy,
+ "name": str(uuid.uuid4()),
+ "ca_cert": tls_certificate_authority_cert,
+ "client_cert": client_tls_certificate_cert_pem,
+ "client_key": client_tls_certificate_key_pem,
+ }
+ )
+ return gen_object_with_cleanup(file_remote_api_client, kwargs)
+
+ yield _file_fixture_gen_remote_client_cert_req
+
+
[email protected]
+def file_fixture_gen_file_repo(file_repo_api_client, gen_object_with_cleanup):
+ """A factory to generate a File Repository with auto-deletion after the test run."""
+
+ def _file_fixture_gen_file_repo(**kwargs):
+ return gen_object_with_cleanup(file_repo_api_client, kwargs)
+
+ yield _file_fixture_gen_file_repo
diff --git a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
@@ -1,8 +1,11 @@
"""Tests related to repository versions."""
import unittest
+import pytest
from random import choice, randint, sample
from time import sleep
from urllib.parse import urlsplit
+from tempfile import NamedTemporaryFile
+from hashlib import sha256
from pulp_smash import api, config, utils
from pulp_smash.exceptions import TaskReportError
@@ -396,6 +399,134 @@ def test_delete_publication(self):
self.client.get(publication["pulp_href"])
[email protected]
+def test_squash_repo_version(
+ file_repo_api_client, file_repo_version_api_client, content_file_api_client, file_repo
+):
+ """Test that the deletion of a repository version properly squashes the content.
+
+ - Setup versions like:
+ Version 0: <empty>
+ add: ABCDE
+ Version 1: ABCDE
+ delete: BCDE; add: FGHI
+ Version 2: AFGHI -- to be deleted
+ delete: GI; add: CD
+ Version 3: ACDFH -- to be squashed into
+ delete: DH; add: EI
+ Version 4: ACEFI
+ - Delete version 2.
+ - Check the content of all remaining versions.
+ """
+ content_units = {}
+ for name in ["A", "B", "C", "D", "E", "F", "G", "H", "I"]:
+ try:
+ content_units[name] = content_file_api_client.list(
+ relative_path=name, sha256=sha256(name.encode()).hexdigest()
+ ).results[0]
+ except IndexError:
+ with NamedTemporaryFile() as tf:
+ tf.write(name.encode())
+ tf.flush()
+ response = content_file_api_client.create(relative_path=name, file=tf.name)
+ result = monitor_task(response.task)
+ content_units[name] = content_file_api_client.read(result.created_resources[0])
+ response1 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["A", "B", "C", "D", "E"]
+ ]
+ },
+ )
+
+ response2 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["B", "C", "D", "E"]
+ ],
+ "add_content_units": [
+ content.pulp_href
+ for key, content in content_units.items()
+ if key in ["F", "G", "H", "I"]
+ ],
+ },
+ )
+
+ response3 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["G", "I"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["C", "D"]
+ ],
+ },
+ )
+
+ response4 = file_repo_api_client.modify(
+ file_repo.pulp_href,
+ {
+ "remove_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["D", "H"]
+ ],
+ "add_content_units": [
+ content.pulp_href for key, content in content_units.items() if key in ["E", "I"]
+ ],
+ },
+ )
+ version1 = file_repo_version_api_client.read(monitor_task(response1.task).created_resources[0])
+ version2 = file_repo_version_api_client.read(monitor_task(response2.task).created_resources[0])
+ version3 = file_repo_version_api_client.read(monitor_task(response3.task).created_resources[0])
+ version4 = file_repo_version_api_client.read(monitor_task(response4.task).created_resources[0])
+
+ # Check version state before deletion
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version2.content_summary.added["file.file"]["count"] == 4
+ assert version2.content_summary.removed["file.file"]["count"] == 4
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content2 = content_file_api_client.list(repository_version=version2.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content2.results)) == {"A", "F", "G", "H", "I"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+ monitor_task(file_repo_version_api_client.delete(version2.pulp_href).task)
+
+ # Check version state after deletion (Version 2 is gone...)
+ version1 = file_repo_version_api_client.read(version1.pulp_href)
+ version3 = file_repo_version_api_client.read(version3.pulp_href)
+ version4 = file_repo_version_api_client.read(version4.pulp_href)
+
+ assert version1.content_summary.added["file.file"]["count"] == 5
+ assert "file.file" not in version1.content_summary.removed
+ assert version3.content_summary.added["file.file"]["count"] == 2
+ assert version3.content_summary.removed["file.file"]["count"] == 2
+ assert version4.content_summary.added["file.file"]["count"] == 2
+ assert version4.content_summary.removed["file.file"]["count"] == 2
+
+ content1 = content_file_api_client.list(repository_version=version1.pulp_href)
+ content3 = content_file_api_client.list(repository_version=version3.pulp_href)
+ content4 = content_file_api_client.list(repository_version=version4.pulp_href)
+ assert set((content.relative_path for content in content1.results)) == {"A", "B", "C", "D", "E"}
+ assert set((content.relative_path for content in content3.results)) == {"A", "C", "D", "F", "H"}
+ assert set((content.relative_path for content in content4.results)) == {"A", "C", "E", "F", "I"}
+
+
class ContentImmutableRepoVersionTestCase(unittest.TestCase):
"""Test whether the content present in a repo version is immutable.
| Deleting repository versions can loose track of later content deletion
**Version**
pulpcore 3.18
**Describe the bug**
When deleting a repository version that deletes a content that is added back in the subsequent version, but deleted again in a later version that deletion in the later version is lost.
**To Reproduce**
Steps to reproduce the behavior:
```bash
#!/bin/bash
set -eu
pulp file repository destroy --name test_delete_versions || true
pulp file repository create --name test_delete_versions
for NAME in "aaaa" "bbbb" "cccc" "dddd" "eeee" "ffff" "gggg" "hhhh" "jjjj"
do
echo "$NAME" > "$NAME"
pulp file content upload --relative-path "$NAME" --file "$NAME" || true
declare $NAME='{"sha256": "'"$(sha256sum --binary $NAME | cut -d" " -f1)"'", "relative_path": "'"$NAME"'"}'
done
pulp file repository content modify --repository test_delete_versions --add-content '['"$aaaa"', '"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$bbbb"', '"$cccc"', '"$dddd"', '"$eeee"']' --add-content '['"$ffff"', '"$gggg"', '"$hhhh"', '"$jjjj"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$gggg"', '"$jjjj"']' --add-content '['"$cccc"', '"$dddd"']'
pulp file repository content modify --repository test_delete_versions --remove-content '['"$dddd"', '"$hhhh"']' --add-content '['"$eeee"', '"$jjjj"']'
pulp file repository version list --repository test_delete_versions
# pulp file repository content list --repository test_delete_versions
pulp file repository version destroy --repository test_delete_versions --version 2
pulp file repository version list --repository test_delete_versions
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 1 | jq -r '.[].relative_path' | sort)" = $'aaaa\nbbbb\ncccc\ndddd\neeee' ]
then
echo Version 1 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 3 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\ndddd\nffff\nhhhh' ]
then
echo Version 3 is wrong.
fi
if [ ! "$(pulp file repository content list --repository test_delete_versions --version 4 | jq -r '.[].relative_path' | sort)" = $'aaaa\ncccc\neeee\nffff\njjjj' ]
then
echo Version 4 is wrong.
fi
```
**Expected behavior**
Content in the respository versions should not change.
**Additional context**
| 2022-03-11T09:27:10 |
|
pulp/pulpcore | 2,331 | pulp__pulpcore-2331 | [
"2329"
] | 2965bda3534685c266af0a098cc6cf10eec59252 | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -296,6 +296,8 @@
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
+TASK_DIAGNOSTICS = False
+
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://dynaconf.readthedocs.io/en/latest/guides/django.html
import dynaconf # noqa
diff --git a/pulpcore/constants.py b/pulpcore/constants.py
--- a/pulpcore/constants.py
+++ b/pulpcore/constants.py
@@ -1,6 +1,10 @@
+from pathlib import Path
from types import SimpleNamespace
+VAR_TMP_PULP = Path("/var/tmp/pulp")
+
+
#: All valid task states.
TASK_STATES = SimpleNamespace(
WAITING="waiting",
diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -5,10 +5,13 @@
import logging
import os
import random
+import resource
import select
import signal
import socket
import sys
+import threading
+import time
import traceback
from contextlib import suppress
from datetime import timedelta
@@ -37,6 +40,7 @@
from pulpcore.constants import ( # noqa: E402: module level not at top of file
TASK_STATES,
TASK_INCOMPLETE_STATES,
+ VAR_TMP_PULP,
)
from pulpcore.exceptions import AdvisoryLockError # noqa: E402: module level not at top of file
@@ -377,6 +381,23 @@ def run_forever(self):
self.shutdown()
+def write_memory_usage(task_pk):
+ taskdata_dir = VAR_TMP_PULP / str(task_pk)
+ taskdata_dir.mkdir(parents=True, exist_ok=True)
+ memory_file_dir = taskdata_dir / "memory.datum"
+ _logger.info("Writing task memory data to {}".format(memory_file_dir))
+
+ with open(memory_file_dir, "w") as file:
+ file.write("# Seconds\tMemory in MB\n")
+ seconds = 0
+ while True:
+ current_mb_in_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024
+ file.write(f"{seconds}\t{current_mb_in_use:.2f}\n")
+ file.flush()
+ time.sleep(5)
+ seconds += 5
+
+
def child_signal_handler(sig, frame):
# Reset signal handlers to default
# If you kill the process a second time it's not graceful anymore.
@@ -394,6 +415,11 @@ def _perform_task(task_pk, task_working_dir_rel_path):
signal.signal(signal.SIGINT, child_signal_handler)
signal.signal(signal.SIGTERM, child_signal_handler)
signal.signal(signal.SIGUSR1, child_signal_handler)
+ if settings.TASK_DIAGNOSTICS:
+ # It would be better to have this recording happen in the parent process instead of here
+ # https://github.com/pulp/pulpcore/issues/2337
+ normal_thread = threading.Thread(target=write_memory_usage, args=(task_pk,), daemon=True)
+ normal_thread.start()
# All processes need to create their own postgres connection
connection.connection = None
task = Task.objects.get(pk=task_pk)
| As a user or developer, I can enable the memory recording of my tasks
This feature will be enabled by a setting named `RECORD_TASK_METRICS` which will default to `False`. When enabled, the directory `/var/lib/pulp/task_metrics/` will have files added to it.
The memory usage of the each task should be written periodically to the file `/var/lib/pulp/task_metrics/80/54bdac-1cfe-4c1b-91c2-1139fc316aeb.datum` where the UUID is split at the first two chars and the filename is the remaining portion of the UUID with `.datum`. on the end of it.
The memory should be recorded periodically.
| Use `/var/tmp/pulp/` instead of `/var/lib/pulp/`. The RPM plugin already uses it for dumping diagnostics, and they get cleaned up automatically by the OS after a period of time (I believe the default is 1 month).
https://github.com/pulp/pulp_rpm/blob/main/pulp_rpm/app/depsolving.py#L849-L851
I would also recommend creating a full directory for the task and calling the files inside things like `memory_tracking.datum` - that would let us put more than one kind of diagnostic there.
Great idea re the `/var/tmp/pulp/` dir. | 2022-03-11T17:44:31 |
|
pulp/pulpcore | 2,333 | pulp__pulpcore-2333 | [
"2327"
] | 2110008e54c277618693b5265938acf29df064f4 | diff --git a/pulpcore/app/management/commands/datarepair-2327.py b/pulpcore/app/management/commands/datarepair-2327.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/management/commands/datarepair-2327.py
@@ -0,0 +1,120 @@
+from gettext import gettext as _
+
+from django.db import connection
+from django.conf import settings
+from django.core.management import BaseCommand
+from django.db.models import Q
+from django.utils.encoding import force_bytes, force_str
+
+import cryptography
+
+from pulpcore.app.models import Remote
+
+
+class Command(BaseCommand):
+ """
+ Django management command for repairing incorrectly migrated remote data.
+ """
+
+ help = _(
+ "Repairs issue #2327. A small number of configuration settings may have been "
+ "corrupted during an upgrade from a previous version of Pulp to a Pulp version "
+ "between 3.15-3.18, resulting in trouble when syncing or viewing certain remotes. "
+ "This script repairs the data (which was not lost)."
+ )
+
+ def add_arguments(self, parser):
+ """Set up arguments."""
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help=_("Don't modify anything, just collect results on how many Remotes are impacted."),
+ )
+
+ def handle(self, *args, **options):
+
+ dry_run = options["dry_run"]
+ fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+
+ with open(settings.DB_ENCRYPTION_KEY, "rb") as key_file:
+ fernet = cryptography.fernet.Fernet(key_file.read())
+
+ possibly_affected_remotes = (
+ Q(username__isnull=False)
+ | Q(password__isnull=False)
+ | Q(proxy_username__isnull=False)
+ | Q(proxy_password__isnull=False)
+ | Q(client_key__isnull=False)
+ )
+
+ number_unencrypted = 0
+ number_multi_encrypted = 0
+
+ for remote_pk in Remote.objects.filter(possibly_affected_remotes).values_list(
+ "pk", flat=True
+ ):
+ try:
+ remote = Remote.objects.get(pk=remote_pk)
+ # if we can get the remote successfully, it is either OK or the fields are
+ # encrypted more than once
+ except cryptography.fernet.InvalidToken:
+ # If decryption fails then it probably hasn't been encrypted yet
+ # get the raw column value, avoiding any Django field handling
+ with connection.cursor() as cursor:
+ cursor.execute(
+ "SELECT username, password, proxy_username, proxy_password, client_key "
+ "FROM core_remote WHERE pulp_id = %s",
+ [str(remote_pk)],
+ )
+ row = cursor.fetchone()
+
+ field_values = {}
+
+ for field, value in zip(fields, row):
+ field_values[field] = value
+
+ if not dry_run:
+ Remote.objects.filter(pk=remote_pk).update(**field_values)
+ number_unencrypted += 1
+ else:
+ times_decrypted = 0
+ keep_trying = True
+ needs_update = False
+
+ while keep_trying:
+ for field in fields:
+ field_value = getattr(remote, field) # value gets decrypted once on access
+ if not field_value:
+ continue
+
+ try:
+ # try to decrypt it again
+ field_value = force_str(fernet.decrypt(force_bytes(field_value)))
+ # it was decrypted successfully again time, so it was probably
+ # encrypted multiple times over. lets re-set the value with the
+ # newly decrypted value
+ setattr(remote, field, field_value)
+ needs_update = True
+ except cryptography.fernet.InvalidToken:
+ # couldn't be decrypted again, stop here
+ keep_trying = False
+
+ times_decrypted += 1
+
+ if needs_update:
+ if not dry_run:
+ remote.save()
+ number_multi_encrypted += 1
+
+ if dry_run:
+ print("Remotes with un-encrypted fields: {}".format(number_unencrypted))
+ print("Remotes encrypted multiple times: {}".format(number_multi_encrypted))
+ else:
+ if not number_unencrypted and not number_multi_encrypted:
+ print("Finished. (OK)")
+ else:
+ print(
+ "Finished. ({} remotes fixed)".format(
+ number_unencrypted + number_multi_encrypted
+ )
+ )
diff --git a/pulpcore/app/migrations/0073_encrypt_remote_fields.py b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
--- a/pulpcore/app/migrations/0073_encrypt_remote_fields.py
+++ b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
@@ -1,59 +1,53 @@
# Generated by Django 2.2.20 on 2021-04-29 14:33
-from django.db import connection, migrations
-from django.db.models import Q
-
+from django.db import migrations
import pulpcore.app.models.fields
fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+new_fields = ("_encrypted_username", "_encrypted_password", "_encrypted_proxy_username", "_encrypted_proxy_password", "_encrypted_client_key")
def encrypt_remote_fields(apps, schema_editor):
- offset = 0
- chunk_size = 100
Remote = apps.get_model("core", "Remote")
- with connection.cursor() as cursor:
- while True:
- cursor.execute(
- f"SELECT pulp_id, {(',').join(fields)} FROM "
- f"core_remote LIMIT {chunk_size} OFFSET {offset}"
- )
- records = cursor.fetchall()
- offset += chunk_size
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in fields]):
+ continue
- if len(records) == 0:
- break
+ remote._encrypted_username = remote.username
+ remote._encrypted_password = remote.password
+ remote._encrypted_proxy_username = remote.proxy_username
+ remote._encrypted_proxy_password = remote.proxy_password
+ remote._encrypted_client_key = remote.client_key
+ remotes_needing_update.append(remote)
- for record in records:
- update = {
- field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
- }
- if not update:
- continue
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
+ remotes_needing_update.clear()
- Remote.objects.filter(pk=record[0]).update(**update)
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
def unencrypt_remote_fields(apps, schema_editor):
Remote = apps.get_model("core", "Remote")
- q = Q()
- for field in fields:
- q &= Q(**{field: None}) | Q(**{field: ""})
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in new_fields]):
+ continue
+ remote.username = remote._encrypted_username
+ remote.password = remote._encrypted_password
+ remote.proxy_username = remote._encrypted_proxy_username
+ remote.proxy_password = remote._encrypted_proxy_password
+ remote.client_key = remote._encrypted_client_key
+ remotes_needing_update.append(remote)
- for remote in Remote.objects.exclude(q):
- update = [
- f"{field} = '{getattr(remote, field)}'"
- for field in fields
- if getattr(remote, field) is not None
- ]
- query = (
- f"UPDATE core_remote cr SET {(', ').join(update)} WHERE pulp_id = '{remote.pulp_id}'"
- )
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, fields)
+ remotes_needing_update.clear()
- with connection.cursor() as cursor:
- cursor.execute(query)
+ Remote.objects.bulk_update(remotes_needing_update, fields)
class Migration(migrations.Migration):
@@ -63,33 +57,82 @@ class Migration(migrations.Migration):
]
operations = [
- migrations.AlterField(
+ # Add new fields to temporarily hold the encrypted values
+ migrations.AddField(
model_name="remote",
- name="client_key",
+ name="_encrypted_client_key",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="password",
+ name="_encrypted_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_password",
+ name="_encrypted_proxy_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_username",
+ name="_encrypted_proxy_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="username",
+ name="_encrypted_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
+ # Populate the new fields with encrypted values computed from the unencrypted fields
migrations.RunPython(
code=encrypt_remote_fields,
reverse_code=unencrypt_remote_fields,
),
+ # Remove the unencrypted columns
+ migrations.RemoveField(
+ model_name="remote",
+ name="client_key",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_username",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="username",
+ ),
+ # Replace the formerly-unencrypted columns with the new encrypted ones
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_client_key",
+ new_name="client_key",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_password",
+ new_name="password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_password",
+ new_name="proxy_password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_username",
+ new_name="proxy_username",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_username",
+ new_name="username",
+ ),
]
| Migration of encrypted fields between 3.14 and 3.16 is broken for some remotes
**Version**
After upgrading from Katello 4.2 to 4.3, some users are encountering sync problems with some repositories. A 500 error is triggered when the /repositories/.../sync/ endpoint is used. The error seems to be related to the client_cert / client_key / ca_cert values.
```
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:57 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:57 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:57 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:57 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:57 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:57 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:57 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:57 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:57 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:57 foreman pulpcore-api: raise exc
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 470, in partial_update
Feb 24 11:28:57 foreman pulpcore-api: return self.update(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 452, in update
Feb 24 11:28:57 foreman pulpcore-api: instance = self.get_object()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 96, in get_object
Feb 24 11:28:57 foreman pulpcore-api: obj = get_object_or_404(queryset, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 19, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/shortcuts.py", line 76, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return queryset.get(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:57 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:57 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:57 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:57 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:57 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:57 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:57 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:57 +0000] "PATCH /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/ HTTP/1.1" 500 145 "-" "OpenAPI-Generator/3.16.1/ruby"
Feb 24 11:28:58 foreman qdrouterd: SERVER (info) [C464018] Connection from 10.2.1.176:50898 (to :5647) failed: amqp:resource-limit-exceeded local-idle-timeout expired
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018][L1196864] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018] Connection Closed
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C4][L1196865] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:58 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:58 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:58 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:58 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:58 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:58 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:58 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:58 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:58 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:58 foreman pulpcore-api: raise exc
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/viewsets.py", line 130, in sync
Feb 24 11:28:58 foreman pulpcore-api: serializer.is_valid(raise_exception=True)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 220, in is_valid
Feb 24 11:28:58 foreman pulpcore-api: self._validated_data = self.run_validation(self.initial_data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 419, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 476, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: validated_value = field.run_validation(primitive_value)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 153, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: return super().run_validation(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/fields.py", line 568, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 373, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: return self.get_object(match.view_name, match.args, match.kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/serializers/base.py", line 322, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return super().get_object(*args, **kwargs).cast()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 318, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return queryset.get(**lookup_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:58 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:58 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:58 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:58 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:58 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:58 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:58 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:58 +0000] "POST /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/ HTTP/1.1" 500 145
```
>
> What did notice, though, is that on the old 4.2 server with pulpcore 3.14 all those remotes have an empty username and password set, i.e. an empty string “” and not NULL like many others. However, the list of remotes on the old server with empty string username is longer than the affected ones on my 4.3 server.
>
> So it seems to be an issue with the database migration from 3.14 to 3.16 and encryption of some fields in the database. The migration/encryption seems to fail is some cases and the original content remains in the database which makes decryption failed. This is also true for my affected rhel repository: I can see in the database that the remote contains the RSA private key in client_key, unlike the other rhel repos which contain some base64 encoded string in the client_key fields.
>
> Thus the problem is that for some rows in the database (at least the core_remote table) encryption of the fields client_key, username, password, (proxy_username, proxy_password, too, I guess) fails during migration from 3.14 to 3.16 leaving the original content in the database. This leads to decryption failure in 3.16.
>
> For whatever reason only a few repos are affected, mostly those which have empty strings for username, password instead of NULL like many others. However, even with empty strings for some remotes the encryption works while it doesn’t for others.
**Describe the bug**
API calls to trigger a sync return a 500 error for some remotes
**To Reproduce**
Unclear
**Expected behavior**
A sync task is correctly created, no 500 error.
**Additional context**
Discourse discussion: https://community.theforeman.org/t/pulprpmclient-apierror-http-500-during-sync-repository/27480/13?u=dralley
| Looking closer into the encrypted strings in the database after migration from 3.14 to 3.16 I have noticed that the empty strings are consecutive in an unordered select. That made me suspect that the problem is the chunking and the assumption the order of rows won't change between chunks. To verify I have decrypted all encrypted usernames in the migrated database and I found what I have suspected:
```
>>> f.decrypt(b'gAAAAABiKvJgZhyIdXy__3X2rh8QdXezaWj-Y-RelFEfYIWg2mrTREsKTB7ydPY2gn3ZhveMwE3ocN1KO8YV3h5iA-wMibo_aw==')
b''
>>> f.decrypt(b'gAAAAABiKvJzCDPxmuSFmmAawerxPi1AqUCP4H8NxWiO0ypnYwFraXPj35EWQ4ABupu_KIBbBPFhW2elE_4Ru6FQQWRggn1yeg==')
b''
>>> f.decrypt(b'gAAAAABiKvGrd6IVPXjJZuTUPYxXg_F3jXvaMmbH3l_O2x1hNnxG8vBKeTHav_0Bz2rjsjcUc6CH_K4eapwLpV0tNGF_cJZKbRbqsSB_JZTQyjW8jSovvTipMSsbWeQJJZ-B5yLWk6vBnNk9cQ81I6kQOnXZolXbRfIPFdPM9AhwCJro8vnDcN4AQ5NKe9dyOVM80hHDquUW2IavogypDl9XLbsnr6m9KQ==')
b'gAAAAABiKe5e7RWrDl9cNAUTLHZ9CjN30uvPAZ_KZZskG_pyCBDCJBJ5pY6pvKaidZltPjWa0iqLP8RsKGakC8fpTi5xMz-c6Q=='
...
```
Obviously the last one above has been encrypted twice. So basically, the migration does encrypt some rows twice and some not at all.
I guess the reason is in 0073_encrypt_remote_fields.py:
```
def encrypt_remote_fields(apps, schema_editor):
offset = 0
chunk_size = 100
Remote = apps.get_model("core", "Remote")
with connection.cursor() as cursor:
while True:
cursor.execute(
f"SELECT pulp_id, {(',').join(fields)} FROM "
f"core_remote LIMIT {chunk_size} OFFSET {offset}"
)
records = cursor.fetchall()
offset += chunk_size
if len(records) == 0:
break
for record in records:
update = {
field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
}
if not update:
continue
Remote.objects.filter(pk=record[0]).update(**update)
```
I have more than 100 rows in core_remote, i.e. it runs two selects:
```
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 0;
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 100;
```
This assumes that the order of the rows in the whole table does not change between the first and second select. I suspect that this isn't true. Comparing the select on the database before the migration and after returns rows definitively in different order.
So the migration should make sure to maintain order of returned rows, e.g. by adding a `ORDER by pulp_id`. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Of course this leaves people who have already migrated and cannot go back with broken content in those fields: some fields may still contain plaintext content which causes decryption to fail with this InvalidToken exception. Other fields are double encrypted and the decrypt will return the single encrypted string instead of the plaintext causing authentication to fail.
To make matters worse: at least on my Katello 4.2 server there are quite a few rows in core_remote containing empty strings "" in those fields instead of NULL, causing the migration to encrypt the empty string. Due to this, I have 41 rows in the 3.16 table with encrypted usernames even though I actually only have 4 repositories having a non-empty username...
Thinking a little bit more about it, I think LIMIT/chunking in the migration is simply wrong. It should just use the standard postgresql cursor/buffering to go through all rows and not trying to split it up manually. Using LIMIT/OFFSET for paging through an unsorted table is just conceptionally flawed.
In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
Or otherwise the migration could temporarily add encrypted columns for those 5 fields to be encrypted and write the encrypted text there. This allows the migration to loop through the table (even with limit) and find rows which still need to be encrypted. Once there are not rows left to be encrypted, a single update can move the encrypted texts from the temporary columns into the normal columns and after that you can drop the temporary columns.
>So the migration should make sure to maintain order of returned rows, e.g. by adding a ORDER by pulp_id. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
>In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
This part should be fine though. All Django migrations implicitly run inside a transaction unless you manually specify otherwise, and that isn't the case here.
> Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory. And more importantly: you could easily optimize the select by search only for rows which actually have something set in on of the five columns to be encrypted. In my case, that would return 4 rows with username/password set and 5 redhat repositories with client keys (after setting the empty string "" columns to null)...
Chunks can be useful if it's a huge table with many (millions) of rows. But in that case, you actually wouldn't want to sort the table because it is much more efficient to deliver the rows in database order.
Isn't it possible with django to loop through the rows from the database directly, i.e. instead of using fetchall which I assume retrieves all rows from the select have something like a simple "fetch" to get the next row from the cursor until it hits the end?
>Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory.
I don't know why it was done this way in particular, the comments on that [PR](https://github.com/pulp/pulpcore/pull/1301) aren't illuminating, but I'll bring it up on Monday and see if that whole requirement can be dropped. | 2022-03-12T16:16:52 |
|
pulp/pulpcore | 2,338 | pulp__pulpcore-2338 | [
"2069"
] | 2965bda3534685c266af0a098cc6cf10eec59252 | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-15T20:56:53 |
|
pulp/pulpcore | 2,341 | pulp__pulpcore-2341 | [
"2069"
] | d273b6fb14826d1bb2069855205ab0af5f742cbd | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-16T15:37:59 |
|
pulp/pulpcore | 2,343 | pulp__pulpcore-2343 | [
"2069"
] | 30dd736681e797c5de82b383fe20b16ac0203196 | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-16T15:40:31 |
|
pulp/pulpcore | 2,344 | pulp__pulpcore-2344 | [
"2069"
] | 483cf66daf5d7321a538316eb74ea6dadcfe6ee7 | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-16T15:40:31 |
|
pulp/pulpcore | 2,345 | pulp__pulpcore-2345 | [
"2069"
] | 3131158dea2eea5cb05ec18afce462f2d9cf8bb7 | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-16T15:42:15 |
|
pulp/pulpcore | 2,354 | pulp__pulpcore-2354 | [
"2327"
] | f145409373a0515c2bee58b86eda76b4fc1244bf | diff --git a/pulpcore/app/management/commands/datarepair-2327.py b/pulpcore/app/management/commands/datarepair-2327.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/management/commands/datarepair-2327.py
@@ -0,0 +1,120 @@
+from gettext import gettext as _
+
+from django.db import connection
+from django.conf import settings
+from django.core.management import BaseCommand
+from django.db.models import Q
+from django.utils.encoding import force_bytes, force_str
+
+import cryptography
+
+from pulpcore.app.models import Remote
+
+
+class Command(BaseCommand):
+ """
+ Django management command for repairing incorrectly migrated remote data.
+ """
+
+ help = _(
+ "Repairs issue #2327. A small number of configuration settings may have been "
+ "corrupted during an upgrade from a previous version of Pulp to a Pulp version "
+ "between 3.15-3.18, resulting in trouble when syncing or viewing certain remotes. "
+ "This script repairs the data (which was not lost)."
+ )
+
+ def add_arguments(self, parser):
+ """Set up arguments."""
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help=_("Don't modify anything, just collect results on how many Remotes are impacted."),
+ )
+
+ def handle(self, *args, **options):
+
+ dry_run = options["dry_run"]
+ fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+
+ with open(settings.DB_ENCRYPTION_KEY, "rb") as key_file:
+ fernet = cryptography.fernet.Fernet(key_file.read())
+
+ possibly_affected_remotes = (
+ Q(username__isnull=False)
+ | Q(password__isnull=False)
+ | Q(proxy_username__isnull=False)
+ | Q(proxy_password__isnull=False)
+ | Q(client_key__isnull=False)
+ )
+
+ number_unencrypted = 0
+ number_multi_encrypted = 0
+
+ for remote_pk in Remote.objects.filter(possibly_affected_remotes).values_list(
+ "pk", flat=True
+ ):
+ try:
+ remote = Remote.objects.get(pk=remote_pk)
+ # if we can get the remote successfully, it is either OK or the fields are
+ # encrypted more than once
+ except cryptography.fernet.InvalidToken:
+ # If decryption fails then it probably hasn't been encrypted yet
+ # get the raw column value, avoiding any Django field handling
+ with connection.cursor() as cursor:
+ cursor.execute(
+ "SELECT username, password, proxy_username, proxy_password, client_key "
+ "FROM core_remote WHERE pulp_id = %s",
+ [str(remote_pk)],
+ )
+ row = cursor.fetchone()
+
+ field_values = {}
+
+ for field, value in zip(fields, row):
+ field_values[field] = value
+
+ if not dry_run:
+ Remote.objects.filter(pk=remote_pk).update(**field_values)
+ number_unencrypted += 1
+ else:
+ times_decrypted = 0
+ keep_trying = True
+ needs_update = False
+
+ while keep_trying:
+ for field in fields:
+ field_value = getattr(remote, field) # value gets decrypted once on access
+ if not field_value:
+ continue
+
+ try:
+ # try to decrypt it again
+ field_value = force_str(fernet.decrypt(force_bytes(field_value)))
+ # it was decrypted successfully again time, so it was probably
+ # encrypted multiple times over. lets re-set the value with the
+ # newly decrypted value
+ setattr(remote, field, field_value)
+ needs_update = True
+ except cryptography.fernet.InvalidToken:
+ # couldn't be decrypted again, stop here
+ keep_trying = False
+
+ times_decrypted += 1
+
+ if needs_update:
+ if not dry_run:
+ remote.save()
+ number_multi_encrypted += 1
+
+ if dry_run:
+ print("Remotes with un-encrypted fields: {}".format(number_unencrypted))
+ print("Remotes encrypted multiple times: {}".format(number_multi_encrypted))
+ else:
+ if not number_unencrypted and not number_multi_encrypted:
+ print("Finished. (OK)")
+ else:
+ print(
+ "Finished. ({} remotes fixed)".format(
+ number_unencrypted + number_multi_encrypted
+ )
+ )
diff --git a/pulpcore/app/migrations/0073_encrypt_remote_fields.py b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
--- a/pulpcore/app/migrations/0073_encrypt_remote_fields.py
+++ b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
@@ -1,59 +1,53 @@
# Generated by Django 2.2.20 on 2021-04-29 14:33
-from django.db import connection, migrations
-from django.db.models import Q
-
+from django.db import migrations
import pulpcore.app.models.fields
fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+new_fields = ("_encrypted_username", "_encrypted_password", "_encrypted_proxy_username", "_encrypted_proxy_password", "_encrypted_client_key")
def encrypt_remote_fields(apps, schema_editor):
- offset = 0
- chunk_size = 100
Remote = apps.get_model("core", "Remote")
- with connection.cursor() as cursor:
- while True:
- cursor.execute(
- f"SELECT pulp_id, {(',').join(fields)} FROM "
- f"core_remote LIMIT {chunk_size} OFFSET {offset}"
- )
- records = cursor.fetchall()
- offset += chunk_size
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in fields]):
+ continue
- if len(records) == 0:
- break
+ remote._encrypted_username = remote.username
+ remote._encrypted_password = remote.password
+ remote._encrypted_proxy_username = remote.proxy_username
+ remote._encrypted_proxy_password = remote.proxy_password
+ remote._encrypted_client_key = remote.client_key
+ remotes_needing_update.append(remote)
- for record in records:
- update = {
- field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
- }
- if not update:
- continue
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
+ remotes_needing_update.clear()
- Remote.objects.filter(pk=record[0]).update(**update)
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
def unencrypt_remote_fields(apps, schema_editor):
Remote = apps.get_model("core", "Remote")
- q = Q()
- for field in fields:
- q &= Q(**{field: None}) | Q(**{field: ""})
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in new_fields]):
+ continue
+ remote.username = remote._encrypted_username
+ remote.password = remote._encrypted_password
+ remote.proxy_username = remote._encrypted_proxy_username
+ remote.proxy_password = remote._encrypted_proxy_password
+ remote.client_key = remote._encrypted_client_key
+ remotes_needing_update.append(remote)
- for remote in Remote.objects.exclude(q):
- update = [
- f"{field} = '{getattr(remote, field)}'"
- for field in fields
- if getattr(remote, field) is not None
- ]
- query = (
- f"UPDATE core_remote cr SET {(', ').join(update)} WHERE pulp_id = '{remote.pulp_id}'"
- )
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, fields)
+ remotes_needing_update.clear()
- with connection.cursor() as cursor:
- cursor.execute(query)
+ Remote.objects.bulk_update(remotes_needing_update, fields)
class Migration(migrations.Migration):
@@ -63,33 +57,82 @@ class Migration(migrations.Migration):
]
operations = [
- migrations.AlterField(
+ # Add new fields to temporarily hold the encrypted values
+ migrations.AddField(
model_name="remote",
- name="client_key",
+ name="_encrypted_client_key",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="password",
+ name="_encrypted_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_password",
+ name="_encrypted_proxy_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_username",
+ name="_encrypted_proxy_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="username",
+ name="_encrypted_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
+ # Populate the new fields with encrypted values computed from the unencrypted fields
migrations.RunPython(
code=encrypt_remote_fields,
reverse_code=unencrypt_remote_fields,
),
+ # Remove the unencrypted columns
+ migrations.RemoveField(
+ model_name="remote",
+ name="client_key",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_username",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="username",
+ ),
+ # Replace the formerly-unencrypted columns with the new encrypted ones
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_client_key",
+ new_name="client_key",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_password",
+ new_name="password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_password",
+ new_name="proxy_password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_username",
+ new_name="proxy_username",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_username",
+ new_name="username",
+ ),
]
| Migration of encrypted fields between 3.14 and 3.16 is broken for some remotes
**Version**
After upgrading from Katello 4.2 to 4.3, some users are encountering sync problems with some repositories. A 500 error is triggered when the /repositories/.../sync/ endpoint is used. The error seems to be related to the client_cert / client_key / ca_cert values.
```
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:57 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:57 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:57 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:57 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:57 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:57 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:57 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:57 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:57 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:57 foreman pulpcore-api: raise exc
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 470, in partial_update
Feb 24 11:28:57 foreman pulpcore-api: return self.update(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 452, in update
Feb 24 11:28:57 foreman pulpcore-api: instance = self.get_object()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 96, in get_object
Feb 24 11:28:57 foreman pulpcore-api: obj = get_object_or_404(queryset, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 19, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/shortcuts.py", line 76, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return queryset.get(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:57 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:57 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:57 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:57 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:57 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:57 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:57 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:57 +0000] "PATCH /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/ HTTP/1.1" 500 145 "-" "OpenAPI-Generator/3.16.1/ruby"
Feb 24 11:28:58 foreman qdrouterd: SERVER (info) [C464018] Connection from 10.2.1.176:50898 (to :5647) failed: amqp:resource-limit-exceeded local-idle-timeout expired
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018][L1196864] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018] Connection Closed
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C4][L1196865] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:58 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:58 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:58 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:58 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:58 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:58 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:58 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:58 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:58 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:58 foreman pulpcore-api: raise exc
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/viewsets.py", line 130, in sync
Feb 24 11:28:58 foreman pulpcore-api: serializer.is_valid(raise_exception=True)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 220, in is_valid
Feb 24 11:28:58 foreman pulpcore-api: self._validated_data = self.run_validation(self.initial_data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 419, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 476, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: validated_value = field.run_validation(primitive_value)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 153, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: return super().run_validation(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/fields.py", line 568, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 373, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: return self.get_object(match.view_name, match.args, match.kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/serializers/base.py", line 322, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return super().get_object(*args, **kwargs).cast()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 318, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return queryset.get(**lookup_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:58 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:58 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:58 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:58 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:58 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:58 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:58 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:58 +0000] "POST /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/ HTTP/1.1" 500 145
```
>
> What did notice, though, is that on the old 4.2 server with pulpcore 3.14 all those remotes have an empty username and password set, i.e. an empty string “” and not NULL like many others. However, the list of remotes on the old server with empty string username is longer than the affected ones on my 4.3 server.
>
> So it seems to be an issue with the database migration from 3.14 to 3.16 and encryption of some fields in the database. The migration/encryption seems to fail is some cases and the original content remains in the database which makes decryption failed. This is also true for my affected rhel repository: I can see in the database that the remote contains the RSA private key in client_key, unlike the other rhel repos which contain some base64 encoded string in the client_key fields.
>
> Thus the problem is that for some rows in the database (at least the core_remote table) encryption of the fields client_key, username, password, (proxy_username, proxy_password, too, I guess) fails during migration from 3.14 to 3.16 leaving the original content in the database. This leads to decryption failure in 3.16.
>
> For whatever reason only a few repos are affected, mostly those which have empty strings for username, password instead of NULL like many others. However, even with empty strings for some remotes the encryption works while it doesn’t for others.
**Describe the bug**
API calls to trigger a sync return a 500 error for some remotes
**To Reproduce**
Unclear
**Expected behavior**
A sync task is correctly created, no 500 error.
**Additional context**
Discourse discussion: https://community.theforeman.org/t/pulprpmclient-apierror-http-500-during-sync-repository/27480/13?u=dralley
| Looking closer into the encrypted strings in the database after migration from 3.14 to 3.16 I have noticed that the empty strings are consecutive in an unordered select. That made me suspect that the problem is the chunking and the assumption the order of rows won't change between chunks. To verify I have decrypted all encrypted usernames in the migrated database and I found what I have suspected:
```
>>> f.decrypt(b'gAAAAABiKvJgZhyIdXy__3X2rh8QdXezaWj-Y-RelFEfYIWg2mrTREsKTB7ydPY2gn3ZhveMwE3ocN1KO8YV3h5iA-wMibo_aw==')
b''
>>> f.decrypt(b'gAAAAABiKvJzCDPxmuSFmmAawerxPi1AqUCP4H8NxWiO0ypnYwFraXPj35EWQ4ABupu_KIBbBPFhW2elE_4Ru6FQQWRggn1yeg==')
b''
>>> f.decrypt(b'gAAAAABiKvGrd6IVPXjJZuTUPYxXg_F3jXvaMmbH3l_O2x1hNnxG8vBKeTHav_0Bz2rjsjcUc6CH_K4eapwLpV0tNGF_cJZKbRbqsSB_JZTQyjW8jSovvTipMSsbWeQJJZ-B5yLWk6vBnNk9cQ81I6kQOnXZolXbRfIPFdPM9AhwCJro8vnDcN4AQ5NKe9dyOVM80hHDquUW2IavogypDl9XLbsnr6m9KQ==')
b'gAAAAABiKe5e7RWrDl9cNAUTLHZ9CjN30uvPAZ_KZZskG_pyCBDCJBJ5pY6pvKaidZltPjWa0iqLP8RsKGakC8fpTi5xMz-c6Q=='
...
```
Obviously the last one above has been encrypted twice. So basically, the migration does encrypt some rows twice and some not at all.
I guess the reason is in 0073_encrypt_remote_fields.py:
```
def encrypt_remote_fields(apps, schema_editor):
offset = 0
chunk_size = 100
Remote = apps.get_model("core", "Remote")
with connection.cursor() as cursor:
while True:
cursor.execute(
f"SELECT pulp_id, {(',').join(fields)} FROM "
f"core_remote LIMIT {chunk_size} OFFSET {offset}"
)
records = cursor.fetchall()
offset += chunk_size
if len(records) == 0:
break
for record in records:
update = {
field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
}
if not update:
continue
Remote.objects.filter(pk=record[0]).update(**update)
```
I have more than 100 rows in core_remote, i.e. it runs two selects:
```
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 0;
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 100;
```
This assumes that the order of the rows in the whole table does not change between the first and second select. I suspect that this isn't true. Comparing the select on the database before the migration and after returns rows definitively in different order.
So the migration should make sure to maintain order of returned rows, e.g. by adding a `ORDER by pulp_id`. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Of course this leaves people who have already migrated and cannot go back with broken content in those fields: some fields may still contain plaintext content which causes decryption to fail with this InvalidToken exception. Other fields are double encrypted and the decrypt will return the single encrypted string instead of the plaintext causing authentication to fail.
To make matters worse: at least on my Katello 4.2 server there are quite a few rows in core_remote containing empty strings "" in those fields instead of NULL, causing the migration to encrypt the empty string. Due to this, I have 41 rows in the 3.16 table with encrypted usernames even though I actually only have 4 repositories having a non-empty username...
Thinking a little bit more about it, I think LIMIT/chunking in the migration is simply wrong. It should just use the standard postgresql cursor/buffering to go through all rows and not trying to split it up manually. Using LIMIT/OFFSET for paging through an unsorted table is just conceptionally flawed.
In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
Or otherwise the migration could temporarily add encrypted columns for those 5 fields to be encrypted and write the encrypted text there. This allows the migration to loop through the table (even with limit) and find rows which still need to be encrypted. Once there are not rows left to be encrypted, a single update can move the encrypted texts from the temporary columns into the normal columns and after that you can drop the temporary columns.
>So the migration should make sure to maintain order of returned rows, e.g. by adding a ORDER by pulp_id. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
>In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
This part should be fine though. All Django migrations implicitly run inside a transaction unless you manually specify otherwise, and that isn't the case here.
> Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory. And more importantly: you could easily optimize the select by search only for rows which actually have something set in on of the five columns to be encrypted. In my case, that would return 4 rows with username/password set and 5 redhat repositories with client keys (after setting the empty string "" columns to null)...
Chunks can be useful if it's a huge table with many (millions) of rows. But in that case, you actually wouldn't want to sort the table because it is much more efficient to deliver the rows in database order.
Isn't it possible with django to loop through the rows from the database directly, i.e. instead of using fetchall which I assume retrieves all rows from the select have something like a simple "fetch" to get the next row from the cursor until it hits the end?
>Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory.
I don't know why it was done this way in particular, the comments on that [PR](https://github.com/pulp/pulpcore/pull/1301) aren't illuminating, but I'll bring it up on Monday and see if that whole requirement can be dropped. | 2022-03-17T19:22:10 |
|
pulp/pulpcore | 2,355 | pulp__pulpcore-2355 | [
"2327"
] | 749d75f4f7d353f5f221f4ab19c14c2c6ec56973 | diff --git a/pulpcore/app/management/commands/datarepair-2327.py b/pulpcore/app/management/commands/datarepair-2327.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/management/commands/datarepair-2327.py
@@ -0,0 +1,120 @@
+from gettext import gettext as _
+
+from django.db import connection
+from django.conf import settings
+from django.core.management import BaseCommand
+from django.db.models import Q
+from django.utils.encoding import force_bytes, force_str
+
+import cryptography
+
+from pulpcore.app.models import Remote
+
+
+class Command(BaseCommand):
+ """
+ Django management command for repairing incorrectly migrated remote data.
+ """
+
+ help = _(
+ "Repairs issue #2327. A small number of configuration settings may have been "
+ "corrupted during an upgrade from a previous version of Pulp to a Pulp version "
+ "between 3.15-3.18, resulting in trouble when syncing or viewing certain remotes. "
+ "This script repairs the data (which was not lost)."
+ )
+
+ def add_arguments(self, parser):
+ """Set up arguments."""
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help=_("Don't modify anything, just collect results on how many Remotes are impacted."),
+ )
+
+ def handle(self, *args, **options):
+
+ dry_run = options["dry_run"]
+ fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+
+ with open(settings.DB_ENCRYPTION_KEY, "rb") as key_file:
+ fernet = cryptography.fernet.Fernet(key_file.read())
+
+ possibly_affected_remotes = (
+ Q(username__isnull=False)
+ | Q(password__isnull=False)
+ | Q(proxy_username__isnull=False)
+ | Q(proxy_password__isnull=False)
+ | Q(client_key__isnull=False)
+ )
+
+ number_unencrypted = 0
+ number_multi_encrypted = 0
+
+ for remote_pk in Remote.objects.filter(possibly_affected_remotes).values_list(
+ "pk", flat=True
+ ):
+ try:
+ remote = Remote.objects.get(pk=remote_pk)
+ # if we can get the remote successfully, it is either OK or the fields are
+ # encrypted more than once
+ except cryptography.fernet.InvalidToken:
+ # If decryption fails then it probably hasn't been encrypted yet
+ # get the raw column value, avoiding any Django field handling
+ with connection.cursor() as cursor:
+ cursor.execute(
+ "SELECT username, password, proxy_username, proxy_password, client_key "
+ "FROM core_remote WHERE pulp_id = %s",
+ [str(remote_pk)],
+ )
+ row = cursor.fetchone()
+
+ field_values = {}
+
+ for field, value in zip(fields, row):
+ field_values[field] = value
+
+ if not dry_run:
+ Remote.objects.filter(pk=remote_pk).update(**field_values)
+ number_unencrypted += 1
+ else:
+ times_decrypted = 0
+ keep_trying = True
+ needs_update = False
+
+ while keep_trying:
+ for field in fields:
+ field_value = getattr(remote, field) # value gets decrypted once on access
+ if not field_value:
+ continue
+
+ try:
+ # try to decrypt it again
+ field_value = force_str(fernet.decrypt(force_bytes(field_value)))
+ # it was decrypted successfully again time, so it was probably
+ # encrypted multiple times over. lets re-set the value with the
+ # newly decrypted value
+ setattr(remote, field, field_value)
+ needs_update = True
+ except cryptography.fernet.InvalidToken:
+ # couldn't be decrypted again, stop here
+ keep_trying = False
+
+ times_decrypted += 1
+
+ if needs_update:
+ if not dry_run:
+ remote.save()
+ number_multi_encrypted += 1
+
+ if dry_run:
+ print("Remotes with un-encrypted fields: {}".format(number_unencrypted))
+ print("Remotes encrypted multiple times: {}".format(number_multi_encrypted))
+ else:
+ if not number_unencrypted and not number_multi_encrypted:
+ print("Finished. (OK)")
+ else:
+ print(
+ "Finished. ({} remotes fixed)".format(
+ number_unencrypted + number_multi_encrypted
+ )
+ )
diff --git a/pulpcore/app/migrations/0073_encrypt_remote_fields.py b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
--- a/pulpcore/app/migrations/0073_encrypt_remote_fields.py
+++ b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
@@ -1,59 +1,53 @@
# Generated by Django 2.2.20 on 2021-04-29 14:33
-from django.db import connection, migrations
-from django.db.models import Q
-
+from django.db import migrations
import pulpcore.app.models.fields
fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+new_fields = ("_encrypted_username", "_encrypted_password", "_encrypted_proxy_username", "_encrypted_proxy_password", "_encrypted_client_key")
def encrypt_remote_fields(apps, schema_editor):
- offset = 0
- chunk_size = 100
Remote = apps.get_model("core", "Remote")
- with connection.cursor() as cursor:
- while True:
- cursor.execute(
- f"SELECT pulp_id, {(',').join(fields)} FROM "
- f"core_remote LIMIT {chunk_size} OFFSET {offset}"
- )
- records = cursor.fetchall()
- offset += chunk_size
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in fields]):
+ continue
- if len(records) == 0:
- break
+ remote._encrypted_username = remote.username
+ remote._encrypted_password = remote.password
+ remote._encrypted_proxy_username = remote.proxy_username
+ remote._encrypted_proxy_password = remote.proxy_password
+ remote._encrypted_client_key = remote.client_key
+ remotes_needing_update.append(remote)
- for record in records:
- update = {
- field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
- }
- if not update:
- continue
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
+ remotes_needing_update.clear()
- Remote.objects.filter(pk=record[0]).update(**update)
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
def unencrypt_remote_fields(apps, schema_editor):
Remote = apps.get_model("core", "Remote")
- q = Q()
- for field in fields:
- q &= Q(**{field: None}) | Q(**{field: ""})
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in new_fields]):
+ continue
+ remote.username = remote._encrypted_username
+ remote.password = remote._encrypted_password
+ remote.proxy_username = remote._encrypted_proxy_username
+ remote.proxy_password = remote._encrypted_proxy_password
+ remote.client_key = remote._encrypted_client_key
+ remotes_needing_update.append(remote)
- for remote in Remote.objects.exclude(q):
- update = [
- f"{field} = '{getattr(remote, field)}'"
- for field in fields
- if getattr(remote, field) is not None
- ]
- query = (
- f"UPDATE core_remote cr SET {(', ').join(update)} WHERE pulp_id = '{remote.pulp_id}'"
- )
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, fields)
+ remotes_needing_update.clear()
- with connection.cursor() as cursor:
- cursor.execute(query)
+ Remote.objects.bulk_update(remotes_needing_update, fields)
class Migration(migrations.Migration):
@@ -63,33 +57,82 @@ class Migration(migrations.Migration):
]
operations = [
- migrations.AlterField(
+ # Add new fields to temporarily hold the encrypted values
+ migrations.AddField(
model_name="remote",
- name="client_key",
+ name="_encrypted_client_key",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="password",
+ name="_encrypted_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_password",
+ name="_encrypted_proxy_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_username",
+ name="_encrypted_proxy_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="username",
+ name="_encrypted_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
+ # Populate the new fields with encrypted values computed from the unencrypted fields
migrations.RunPython(
code=encrypt_remote_fields,
reverse_code=unencrypt_remote_fields,
),
+ # Remove the unencrypted columns
+ migrations.RemoveField(
+ model_name="remote",
+ name="client_key",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_username",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="username",
+ ),
+ # Replace the formerly-unencrypted columns with the new encrypted ones
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_client_key",
+ new_name="client_key",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_password",
+ new_name="password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_password",
+ new_name="proxy_password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_username",
+ new_name="proxy_username",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_username",
+ new_name="username",
+ ),
]
| Migration of encrypted fields between 3.14 and 3.16 is broken for some remotes
**Version**
After upgrading from Katello 4.2 to 4.3, some users are encountering sync problems with some repositories. A 500 error is triggered when the /repositories/.../sync/ endpoint is used. The error seems to be related to the client_cert / client_key / ca_cert values.
```
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:57 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:57 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:57 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:57 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:57 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:57 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:57 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:57 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:57 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:57 foreman pulpcore-api: raise exc
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 470, in partial_update
Feb 24 11:28:57 foreman pulpcore-api: return self.update(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 452, in update
Feb 24 11:28:57 foreman pulpcore-api: instance = self.get_object()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 96, in get_object
Feb 24 11:28:57 foreman pulpcore-api: obj = get_object_or_404(queryset, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 19, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/shortcuts.py", line 76, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return queryset.get(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:57 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:57 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:57 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:57 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:57 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:57 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:57 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:57 +0000] "PATCH /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/ HTTP/1.1" 500 145 "-" "OpenAPI-Generator/3.16.1/ruby"
Feb 24 11:28:58 foreman qdrouterd: SERVER (info) [C464018] Connection from 10.2.1.176:50898 (to :5647) failed: amqp:resource-limit-exceeded local-idle-timeout expired
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018][L1196864] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018] Connection Closed
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C4][L1196865] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:58 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:58 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:58 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:58 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:58 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:58 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:58 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:58 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:58 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:58 foreman pulpcore-api: raise exc
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/viewsets.py", line 130, in sync
Feb 24 11:28:58 foreman pulpcore-api: serializer.is_valid(raise_exception=True)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 220, in is_valid
Feb 24 11:28:58 foreman pulpcore-api: self._validated_data = self.run_validation(self.initial_data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 419, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 476, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: validated_value = field.run_validation(primitive_value)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 153, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: return super().run_validation(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/fields.py", line 568, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 373, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: return self.get_object(match.view_name, match.args, match.kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/serializers/base.py", line 322, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return super().get_object(*args, **kwargs).cast()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 318, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return queryset.get(**lookup_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:58 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:58 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:58 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:58 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:58 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:58 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:58 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:58 +0000] "POST /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/ HTTP/1.1" 500 145
```
>
> What did notice, though, is that on the old 4.2 server with pulpcore 3.14 all those remotes have an empty username and password set, i.e. an empty string “” and not NULL like many others. However, the list of remotes on the old server with empty string username is longer than the affected ones on my 4.3 server.
>
> So it seems to be an issue with the database migration from 3.14 to 3.16 and encryption of some fields in the database. The migration/encryption seems to fail is some cases and the original content remains in the database which makes decryption failed. This is also true for my affected rhel repository: I can see in the database that the remote contains the RSA private key in client_key, unlike the other rhel repos which contain some base64 encoded string in the client_key fields.
>
> Thus the problem is that for some rows in the database (at least the core_remote table) encryption of the fields client_key, username, password, (proxy_username, proxy_password, too, I guess) fails during migration from 3.14 to 3.16 leaving the original content in the database. This leads to decryption failure in 3.16.
>
> For whatever reason only a few repos are affected, mostly those which have empty strings for username, password instead of NULL like many others. However, even with empty strings for some remotes the encryption works while it doesn’t for others.
**Describe the bug**
API calls to trigger a sync return a 500 error for some remotes
**To Reproduce**
Unclear
**Expected behavior**
A sync task is correctly created, no 500 error.
**Additional context**
Discourse discussion: https://community.theforeman.org/t/pulprpmclient-apierror-http-500-during-sync-repository/27480/13?u=dralley
| Looking closer into the encrypted strings in the database after migration from 3.14 to 3.16 I have noticed that the empty strings are consecutive in an unordered select. That made me suspect that the problem is the chunking and the assumption the order of rows won't change between chunks. To verify I have decrypted all encrypted usernames in the migrated database and I found what I have suspected:
```
>>> f.decrypt(b'gAAAAABiKvJgZhyIdXy__3X2rh8QdXezaWj-Y-RelFEfYIWg2mrTREsKTB7ydPY2gn3ZhveMwE3ocN1KO8YV3h5iA-wMibo_aw==')
b''
>>> f.decrypt(b'gAAAAABiKvJzCDPxmuSFmmAawerxPi1AqUCP4H8NxWiO0ypnYwFraXPj35EWQ4ABupu_KIBbBPFhW2elE_4Ru6FQQWRggn1yeg==')
b''
>>> f.decrypt(b'gAAAAABiKvGrd6IVPXjJZuTUPYxXg_F3jXvaMmbH3l_O2x1hNnxG8vBKeTHav_0Bz2rjsjcUc6CH_K4eapwLpV0tNGF_cJZKbRbqsSB_JZTQyjW8jSovvTipMSsbWeQJJZ-B5yLWk6vBnNk9cQ81I6kQOnXZolXbRfIPFdPM9AhwCJro8vnDcN4AQ5NKe9dyOVM80hHDquUW2IavogypDl9XLbsnr6m9KQ==')
b'gAAAAABiKe5e7RWrDl9cNAUTLHZ9CjN30uvPAZ_KZZskG_pyCBDCJBJ5pY6pvKaidZltPjWa0iqLP8RsKGakC8fpTi5xMz-c6Q=='
...
```
Obviously the last one above has been encrypted twice. So basically, the migration does encrypt some rows twice and some not at all.
I guess the reason is in 0073_encrypt_remote_fields.py:
```
def encrypt_remote_fields(apps, schema_editor):
offset = 0
chunk_size = 100
Remote = apps.get_model("core", "Remote")
with connection.cursor() as cursor:
while True:
cursor.execute(
f"SELECT pulp_id, {(',').join(fields)} FROM "
f"core_remote LIMIT {chunk_size} OFFSET {offset}"
)
records = cursor.fetchall()
offset += chunk_size
if len(records) == 0:
break
for record in records:
update = {
field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
}
if not update:
continue
Remote.objects.filter(pk=record[0]).update(**update)
```
I have more than 100 rows in core_remote, i.e. it runs two selects:
```
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 0;
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 100;
```
This assumes that the order of the rows in the whole table does not change between the first and second select. I suspect that this isn't true. Comparing the select on the database before the migration and after returns rows definitively in different order.
So the migration should make sure to maintain order of returned rows, e.g. by adding a `ORDER by pulp_id`. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Of course this leaves people who have already migrated and cannot go back with broken content in those fields: some fields may still contain plaintext content which causes decryption to fail with this InvalidToken exception. Other fields are double encrypted and the decrypt will return the single encrypted string instead of the plaintext causing authentication to fail.
To make matters worse: at least on my Katello 4.2 server there are quite a few rows in core_remote containing empty strings "" in those fields instead of NULL, causing the migration to encrypt the empty string. Due to this, I have 41 rows in the 3.16 table with encrypted usernames even though I actually only have 4 repositories having a non-empty username...
Thinking a little bit more about it, I think LIMIT/chunking in the migration is simply wrong. It should just use the standard postgresql cursor/buffering to go through all rows and not trying to split it up manually. Using LIMIT/OFFSET for paging through an unsorted table is just conceptionally flawed.
In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
Or otherwise the migration could temporarily add encrypted columns for those 5 fields to be encrypted and write the encrypted text there. This allows the migration to loop through the table (even with limit) and find rows which still need to be encrypted. Once there are not rows left to be encrypted, a single update can move the encrypted texts from the temporary columns into the normal columns and after that you can drop the temporary columns.
>So the migration should make sure to maintain order of returned rows, e.g. by adding a ORDER by pulp_id. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
>In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
This part should be fine though. All Django migrations implicitly run inside a transaction unless you manually specify otherwise, and that isn't the case here.
> Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory. And more importantly: you could easily optimize the select by search only for rows which actually have something set in on of the five columns to be encrypted. In my case, that would return 4 rows with username/password set and 5 redhat repositories with client keys (after setting the empty string "" columns to null)...
Chunks can be useful if it's a huge table with many (millions) of rows. But in that case, you actually wouldn't want to sort the table because it is much more efficient to deliver the rows in database order.
Isn't it possible with django to loop through the rows from the database directly, i.e. instead of using fetchall which I assume retrieves all rows from the select have something like a simple "fetch" to get the next row from the cursor until it hits the end?
>Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory.
I don't know why it was done this way in particular, the comments on that [PR](https://github.com/pulp/pulpcore/pull/1301) aren't illuminating, but I'll bring it up on Monday and see if that whole requirement can be dropped. | 2022-03-17T20:04:20 |
|
pulp/pulpcore | 2,356 | pulp__pulpcore-2356 | [
"2327"
] | 1dbac29eea7d1ac2e35c000a2154fb40957a038a | diff --git a/pulpcore/app/management/commands/datarepair-2327.py b/pulpcore/app/management/commands/datarepair-2327.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/management/commands/datarepair-2327.py
@@ -0,0 +1,120 @@
+from gettext import gettext as _
+
+from django.db import connection
+from django.conf import settings
+from django.core.management import BaseCommand
+from django.db.models import Q
+from django.utils.encoding import force_bytes, force_str
+
+import cryptography
+
+from pulpcore.app.models import Remote
+
+
+class Command(BaseCommand):
+ """
+ Django management command for repairing incorrectly migrated remote data.
+ """
+
+ help = _(
+ "Repairs issue #2327. A small number of configuration settings may have been "
+ "corrupted during an upgrade from a previous version of Pulp to a Pulp version "
+ "between 3.15-3.18, resulting in trouble when syncing or viewing certain remotes. "
+ "This script repairs the data (which was not lost)."
+ )
+
+ def add_arguments(self, parser):
+ """Set up arguments."""
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help=_("Don't modify anything, just collect results on how many Remotes are impacted."),
+ )
+
+ def handle(self, *args, **options):
+
+ dry_run = options["dry_run"]
+ fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+
+ with open(settings.DB_ENCRYPTION_KEY, "rb") as key_file:
+ fernet = cryptography.fernet.Fernet(key_file.read())
+
+ possibly_affected_remotes = (
+ Q(username__isnull=False)
+ | Q(password__isnull=False)
+ | Q(proxy_username__isnull=False)
+ | Q(proxy_password__isnull=False)
+ | Q(client_key__isnull=False)
+ )
+
+ number_unencrypted = 0
+ number_multi_encrypted = 0
+
+ for remote_pk in Remote.objects.filter(possibly_affected_remotes).values_list(
+ "pk", flat=True
+ ):
+ try:
+ remote = Remote.objects.get(pk=remote_pk)
+ # if we can get the remote successfully, it is either OK or the fields are
+ # encrypted more than once
+ except cryptography.fernet.InvalidToken:
+ # If decryption fails then it probably hasn't been encrypted yet
+ # get the raw column value, avoiding any Django field handling
+ with connection.cursor() as cursor:
+ cursor.execute(
+ "SELECT username, password, proxy_username, proxy_password, client_key "
+ "FROM core_remote WHERE pulp_id = %s",
+ [str(remote_pk)],
+ )
+ row = cursor.fetchone()
+
+ field_values = {}
+
+ for field, value in zip(fields, row):
+ field_values[field] = value
+
+ if not dry_run:
+ Remote.objects.filter(pk=remote_pk).update(**field_values)
+ number_unencrypted += 1
+ else:
+ times_decrypted = 0
+ keep_trying = True
+ needs_update = False
+
+ while keep_trying:
+ for field in fields:
+ field_value = getattr(remote, field) # value gets decrypted once on access
+ if not field_value:
+ continue
+
+ try:
+ # try to decrypt it again
+ field_value = force_str(fernet.decrypt(force_bytes(field_value)))
+ # it was decrypted successfully again time, so it was probably
+ # encrypted multiple times over. lets re-set the value with the
+ # newly decrypted value
+ setattr(remote, field, field_value)
+ needs_update = True
+ except cryptography.fernet.InvalidToken:
+ # couldn't be decrypted again, stop here
+ keep_trying = False
+
+ times_decrypted += 1
+
+ if needs_update:
+ if not dry_run:
+ remote.save()
+ number_multi_encrypted += 1
+
+ if dry_run:
+ print("Remotes with un-encrypted fields: {}".format(number_unencrypted))
+ print("Remotes encrypted multiple times: {}".format(number_multi_encrypted))
+ else:
+ if not number_unencrypted and not number_multi_encrypted:
+ print("Finished. (OK)")
+ else:
+ print(
+ "Finished. ({} remotes fixed)".format(
+ number_unencrypted + number_multi_encrypted
+ )
+ )
diff --git a/pulpcore/app/migrations/0073_encrypt_remote_fields.py b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
--- a/pulpcore/app/migrations/0073_encrypt_remote_fields.py
+++ b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
@@ -1,59 +1,53 @@
# Generated by Django 2.2.20 on 2021-04-29 14:33
-from django.db import connection, migrations
-from django.db.models import Q
-
+from django.db import migrations
import pulpcore.app.models.fields
fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+new_fields = ("_encrypted_username", "_encrypted_password", "_encrypted_proxy_username", "_encrypted_proxy_password", "_encrypted_client_key")
def encrypt_remote_fields(apps, schema_editor):
- offset = 0
- chunk_size = 100
Remote = apps.get_model("core", "Remote")
- with connection.cursor() as cursor:
- while True:
- cursor.execute(
- f"SELECT pulp_id, {(',').join(fields)} FROM "
- f"core_remote LIMIT {chunk_size} OFFSET {offset}"
- )
- records = cursor.fetchall()
- offset += chunk_size
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in fields]):
+ continue
- if len(records) == 0:
- break
+ remote._encrypted_username = remote.username
+ remote._encrypted_password = remote.password
+ remote._encrypted_proxy_username = remote.proxy_username
+ remote._encrypted_proxy_password = remote.proxy_password
+ remote._encrypted_client_key = remote.client_key
+ remotes_needing_update.append(remote)
- for record in records:
- update = {
- field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
- }
- if not update:
- continue
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
+ remotes_needing_update.clear()
- Remote.objects.filter(pk=record[0]).update(**update)
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
def unencrypt_remote_fields(apps, schema_editor):
Remote = apps.get_model("core", "Remote")
- q = Q()
- for field in fields:
- q &= Q(**{field: None}) | Q(**{field: ""})
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in new_fields]):
+ continue
+ remote.username = remote._encrypted_username
+ remote.password = remote._encrypted_password
+ remote.proxy_username = remote._encrypted_proxy_username
+ remote.proxy_password = remote._encrypted_proxy_password
+ remote.client_key = remote._encrypted_client_key
+ remotes_needing_update.append(remote)
- for remote in Remote.objects.exclude(q):
- update = [
- f"{field} = '{getattr(remote, field)}'"
- for field in fields
- if getattr(remote, field) is not None
- ]
- query = (
- f"UPDATE core_remote cr SET {(', ').join(update)} WHERE pulp_id = '{remote.pulp_id}'"
- )
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, fields)
+ remotes_needing_update.clear()
- with connection.cursor() as cursor:
- cursor.execute(query)
+ Remote.objects.bulk_update(remotes_needing_update, fields)
class Migration(migrations.Migration):
@@ -63,33 +57,82 @@ class Migration(migrations.Migration):
]
operations = [
- migrations.AlterField(
+ # Add new fields to temporarily hold the encrypted values
+ migrations.AddField(
model_name="remote",
- name="client_key",
+ name="_encrypted_client_key",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="password",
+ name="_encrypted_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_password",
+ name="_encrypted_proxy_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_username",
+ name="_encrypted_proxy_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="username",
+ name="_encrypted_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
+ # Populate the new fields with encrypted values computed from the unencrypted fields
migrations.RunPython(
code=encrypt_remote_fields,
reverse_code=unencrypt_remote_fields,
),
+ # Remove the unencrypted columns
+ migrations.RemoveField(
+ model_name="remote",
+ name="client_key",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_username",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="username",
+ ),
+ # Replace the formerly-unencrypted columns with the new encrypted ones
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_client_key",
+ new_name="client_key",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_password",
+ new_name="password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_password",
+ new_name="proxy_password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_username",
+ new_name="proxy_username",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_username",
+ new_name="username",
+ ),
]
| Migration of encrypted fields between 3.14 and 3.16 is broken for some remotes
**Version**
After upgrading from Katello 4.2 to 4.3, some users are encountering sync problems with some repositories. A 500 error is triggered when the /repositories/.../sync/ endpoint is used. The error seems to be related to the client_cert / client_key / ca_cert values.
```
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:57 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:57 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:57 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:57 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:57 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:57 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:57 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:57 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:57 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:57 foreman pulpcore-api: raise exc
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 470, in partial_update
Feb 24 11:28:57 foreman pulpcore-api: return self.update(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 452, in update
Feb 24 11:28:57 foreman pulpcore-api: instance = self.get_object()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 96, in get_object
Feb 24 11:28:57 foreman pulpcore-api: obj = get_object_or_404(queryset, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 19, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/shortcuts.py", line 76, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return queryset.get(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:57 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:57 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:57 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:57 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:57 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:57 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:57 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:57 +0000] "PATCH /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/ HTTP/1.1" 500 145 "-" "OpenAPI-Generator/3.16.1/ruby"
Feb 24 11:28:58 foreman qdrouterd: SERVER (info) [C464018] Connection from 10.2.1.176:50898 (to :5647) failed: amqp:resource-limit-exceeded local-idle-timeout expired
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018][L1196864] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018] Connection Closed
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C4][L1196865] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:58 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:58 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:58 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:58 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:58 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:58 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:58 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:58 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:58 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:58 foreman pulpcore-api: raise exc
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/viewsets.py", line 130, in sync
Feb 24 11:28:58 foreman pulpcore-api: serializer.is_valid(raise_exception=True)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 220, in is_valid
Feb 24 11:28:58 foreman pulpcore-api: self._validated_data = self.run_validation(self.initial_data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 419, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 476, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: validated_value = field.run_validation(primitive_value)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 153, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: return super().run_validation(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/fields.py", line 568, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 373, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: return self.get_object(match.view_name, match.args, match.kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/serializers/base.py", line 322, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return super().get_object(*args, **kwargs).cast()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 318, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return queryset.get(**lookup_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:58 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:58 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:58 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:58 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:58 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:58 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:58 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:58 +0000] "POST /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/ HTTP/1.1" 500 145
```
>
> What did notice, though, is that on the old 4.2 server with pulpcore 3.14 all those remotes have an empty username and password set, i.e. an empty string “” and not NULL like many others. However, the list of remotes on the old server with empty string username is longer than the affected ones on my 4.3 server.
>
> So it seems to be an issue with the database migration from 3.14 to 3.16 and encryption of some fields in the database. The migration/encryption seems to fail is some cases and the original content remains in the database which makes decryption failed. This is also true for my affected rhel repository: I can see in the database that the remote contains the RSA private key in client_key, unlike the other rhel repos which contain some base64 encoded string in the client_key fields.
>
> Thus the problem is that for some rows in the database (at least the core_remote table) encryption of the fields client_key, username, password, (proxy_username, proxy_password, too, I guess) fails during migration from 3.14 to 3.16 leaving the original content in the database. This leads to decryption failure in 3.16.
>
> For whatever reason only a few repos are affected, mostly those which have empty strings for username, password instead of NULL like many others. However, even with empty strings for some remotes the encryption works while it doesn’t for others.
**Describe the bug**
API calls to trigger a sync return a 500 error for some remotes
**To Reproduce**
Unclear
**Expected behavior**
A sync task is correctly created, no 500 error.
**Additional context**
Discourse discussion: https://community.theforeman.org/t/pulprpmclient-apierror-http-500-during-sync-repository/27480/13?u=dralley
| Looking closer into the encrypted strings in the database after migration from 3.14 to 3.16 I have noticed that the empty strings are consecutive in an unordered select. That made me suspect that the problem is the chunking and the assumption the order of rows won't change between chunks. To verify I have decrypted all encrypted usernames in the migrated database and I found what I have suspected:
```
>>> f.decrypt(b'gAAAAABiKvJgZhyIdXy__3X2rh8QdXezaWj-Y-RelFEfYIWg2mrTREsKTB7ydPY2gn3ZhveMwE3ocN1KO8YV3h5iA-wMibo_aw==')
b''
>>> f.decrypt(b'gAAAAABiKvJzCDPxmuSFmmAawerxPi1AqUCP4H8NxWiO0ypnYwFraXPj35EWQ4ABupu_KIBbBPFhW2elE_4Ru6FQQWRggn1yeg==')
b''
>>> f.decrypt(b'gAAAAABiKvGrd6IVPXjJZuTUPYxXg_F3jXvaMmbH3l_O2x1hNnxG8vBKeTHav_0Bz2rjsjcUc6CH_K4eapwLpV0tNGF_cJZKbRbqsSB_JZTQyjW8jSovvTipMSsbWeQJJZ-B5yLWk6vBnNk9cQ81I6kQOnXZolXbRfIPFdPM9AhwCJro8vnDcN4AQ5NKe9dyOVM80hHDquUW2IavogypDl9XLbsnr6m9KQ==')
b'gAAAAABiKe5e7RWrDl9cNAUTLHZ9CjN30uvPAZ_KZZskG_pyCBDCJBJ5pY6pvKaidZltPjWa0iqLP8RsKGakC8fpTi5xMz-c6Q=='
...
```
Obviously the last one above has been encrypted twice. So basically, the migration does encrypt some rows twice and some not at all.
I guess the reason is in 0073_encrypt_remote_fields.py:
```
def encrypt_remote_fields(apps, schema_editor):
offset = 0
chunk_size = 100
Remote = apps.get_model("core", "Remote")
with connection.cursor() as cursor:
while True:
cursor.execute(
f"SELECT pulp_id, {(',').join(fields)} FROM "
f"core_remote LIMIT {chunk_size} OFFSET {offset}"
)
records = cursor.fetchall()
offset += chunk_size
if len(records) == 0:
break
for record in records:
update = {
field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
}
if not update:
continue
Remote.objects.filter(pk=record[0]).update(**update)
```
I have more than 100 rows in core_remote, i.e. it runs two selects:
```
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 0;
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 100;
```
This assumes that the order of the rows in the whole table does not change between the first and second select. I suspect that this isn't true. Comparing the select on the database before the migration and after returns rows definitively in different order.
So the migration should make sure to maintain order of returned rows, e.g. by adding a `ORDER by pulp_id`. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Of course this leaves people who have already migrated and cannot go back with broken content in those fields: some fields may still contain plaintext content which causes decryption to fail with this InvalidToken exception. Other fields are double encrypted and the decrypt will return the single encrypted string instead of the plaintext causing authentication to fail.
To make matters worse: at least on my Katello 4.2 server there are quite a few rows in core_remote containing empty strings "" in those fields instead of NULL, causing the migration to encrypt the empty string. Due to this, I have 41 rows in the 3.16 table with encrypted usernames even though I actually only have 4 repositories having a non-empty username...
Thinking a little bit more about it, I think LIMIT/chunking in the migration is simply wrong. It should just use the standard postgresql cursor/buffering to go through all rows and not trying to split it up manually. Using LIMIT/OFFSET for paging through an unsorted table is just conceptionally flawed.
In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
Or otherwise the migration could temporarily add encrypted columns for those 5 fields to be encrypted and write the encrypted text there. This allows the migration to loop through the table (even with limit) and find rows which still need to be encrypted. Once there are not rows left to be encrypted, a single update can move the encrypted texts from the temporary columns into the normal columns and after that you can drop the temporary columns.
>So the migration should make sure to maintain order of returned rows, e.g. by adding a ORDER by pulp_id. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
>In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
This part should be fine though. All Django migrations implicitly run inside a transaction unless you manually specify otherwise, and that isn't the case here.
> Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory. And more importantly: you could easily optimize the select by search only for rows which actually have something set in on of the five columns to be encrypted. In my case, that would return 4 rows with username/password set and 5 redhat repositories with client keys (after setting the empty string "" columns to null)...
Chunks can be useful if it's a huge table with many (millions) of rows. But in that case, you actually wouldn't want to sort the table because it is much more efficient to deliver the rows in database order.
Isn't it possible with django to loop through the rows from the database directly, i.e. instead of using fetchall which I assume retrieves all rows from the select have something like a simple "fetch" to get the next row from the cursor until it hits the end?
>Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory.
I don't know why it was done this way in particular, the comments on that [PR](https://github.com/pulp/pulpcore/pull/1301) aren't illuminating, but I'll bring it up on Monday and see if that whole requirement can be dropped. | 2022-03-17T20:04:22 |
|
pulp/pulpcore | 2,357 | pulp__pulpcore-2357 | [
"2327"
] | 799c9c639f9424ad6f7a4c0c7ce779c29300cecd | diff --git a/pulpcore/app/management/commands/datarepair-2327.py b/pulpcore/app/management/commands/datarepair-2327.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/management/commands/datarepair-2327.py
@@ -0,0 +1,120 @@
+from gettext import gettext as _
+
+from django.db import connection
+from django.conf import settings
+from django.core.management import BaseCommand
+from django.db.models import Q
+from django.utils.encoding import force_bytes, force_str
+
+import cryptography
+
+from pulpcore.app.models import Remote
+
+
+class Command(BaseCommand):
+ """
+ Django management command for repairing incorrectly migrated remote data.
+ """
+
+ help = _(
+ "Repairs issue #2327. A small number of configuration settings may have been "
+ "corrupted during an upgrade from a previous version of Pulp to a Pulp version "
+ "between 3.15-3.18, resulting in trouble when syncing or viewing certain remotes. "
+ "This script repairs the data (which was not lost)."
+ )
+
+ def add_arguments(self, parser):
+ """Set up arguments."""
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help=_("Don't modify anything, just collect results on how many Remotes are impacted."),
+ )
+
+ def handle(self, *args, **options):
+
+ dry_run = options["dry_run"]
+ fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+
+ with open(settings.DB_ENCRYPTION_KEY, "rb") as key_file:
+ fernet = cryptography.fernet.Fernet(key_file.read())
+
+ possibly_affected_remotes = (
+ Q(username__isnull=False)
+ | Q(password__isnull=False)
+ | Q(proxy_username__isnull=False)
+ | Q(proxy_password__isnull=False)
+ | Q(client_key__isnull=False)
+ )
+
+ number_unencrypted = 0
+ number_multi_encrypted = 0
+
+ for remote_pk in Remote.objects.filter(possibly_affected_remotes).values_list(
+ "pk", flat=True
+ ):
+ try:
+ remote = Remote.objects.get(pk=remote_pk)
+ # if we can get the remote successfully, it is either OK or the fields are
+ # encrypted more than once
+ except cryptography.fernet.InvalidToken:
+ # If decryption fails then it probably hasn't been encrypted yet
+ # get the raw column value, avoiding any Django field handling
+ with connection.cursor() as cursor:
+ cursor.execute(
+ "SELECT username, password, proxy_username, proxy_password, client_key "
+ "FROM core_remote WHERE pulp_id = %s",
+ [str(remote_pk)],
+ )
+ row = cursor.fetchone()
+
+ field_values = {}
+
+ for field, value in zip(fields, row):
+ field_values[field] = value
+
+ if not dry_run:
+ Remote.objects.filter(pk=remote_pk).update(**field_values)
+ number_unencrypted += 1
+ else:
+ times_decrypted = 0
+ keep_trying = True
+ needs_update = False
+
+ while keep_trying:
+ for field in fields:
+ field_value = getattr(remote, field) # value gets decrypted once on access
+ if not field_value:
+ continue
+
+ try:
+ # try to decrypt it again
+ field_value = force_str(fernet.decrypt(force_bytes(field_value)))
+ # it was decrypted successfully again time, so it was probably
+ # encrypted multiple times over. lets re-set the value with the
+ # newly decrypted value
+ setattr(remote, field, field_value)
+ needs_update = True
+ except cryptography.fernet.InvalidToken:
+ # couldn't be decrypted again, stop here
+ keep_trying = False
+
+ times_decrypted += 1
+
+ if needs_update:
+ if not dry_run:
+ remote.save()
+ number_multi_encrypted += 1
+
+ if dry_run:
+ print("Remotes with un-encrypted fields: {}".format(number_unencrypted))
+ print("Remotes encrypted multiple times: {}".format(number_multi_encrypted))
+ else:
+ if not number_unencrypted and not number_multi_encrypted:
+ print("Finished. (OK)")
+ else:
+ print(
+ "Finished. ({} remotes fixed)".format(
+ number_unencrypted + number_multi_encrypted
+ )
+ )
diff --git a/pulpcore/app/migrations/0073_encrypt_remote_fields.py b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
--- a/pulpcore/app/migrations/0073_encrypt_remote_fields.py
+++ b/pulpcore/app/migrations/0073_encrypt_remote_fields.py
@@ -1,59 +1,53 @@
# Generated by Django 2.2.20 on 2021-04-29 14:33
-from django.db import connection, migrations
-from django.db.models import Q
-
+from django.db import migrations
import pulpcore.app.models.fields
fields = ("username", "password", "proxy_username", "proxy_password", "client_key")
+new_fields = ("_encrypted_username", "_encrypted_password", "_encrypted_proxy_username", "_encrypted_proxy_password", "_encrypted_client_key")
def encrypt_remote_fields(apps, schema_editor):
- offset = 0
- chunk_size = 100
Remote = apps.get_model("core", "Remote")
- with connection.cursor() as cursor:
- while True:
- cursor.execute(
- f"SELECT pulp_id, {(',').join(fields)} FROM "
- f"core_remote LIMIT {chunk_size} OFFSET {offset}"
- )
- records = cursor.fetchall()
- offset += chunk_size
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in fields]):
+ continue
- if len(records) == 0:
- break
+ remote._encrypted_username = remote.username
+ remote._encrypted_password = remote.password
+ remote._encrypted_proxy_username = remote.proxy_username
+ remote._encrypted_proxy_password = remote.proxy_password
+ remote._encrypted_client_key = remote.client_key
+ remotes_needing_update.append(remote)
- for record in records:
- update = {
- field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
- }
- if not update:
- continue
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
+ remotes_needing_update.clear()
- Remote.objects.filter(pk=record[0]).update(**update)
+ Remote.objects.bulk_update(remotes_needing_update, new_fields)
def unencrypt_remote_fields(apps, schema_editor):
Remote = apps.get_model("core", "Remote")
- q = Q()
- for field in fields:
- q &= Q(**{field: None}) | Q(**{field: ""})
+ remotes_needing_update = []
+ for remote in Remote.objects.all().iterator():
+ if not any([getattr(remote, field) for field in new_fields]):
+ continue
+ remote.username = remote._encrypted_username
+ remote.password = remote._encrypted_password
+ remote.proxy_username = remote._encrypted_proxy_username
+ remote.proxy_password = remote._encrypted_proxy_password
+ remote.client_key = remote._encrypted_client_key
+ remotes_needing_update.append(remote)
- for remote in Remote.objects.exclude(q):
- update = [
- f"{field} = '{getattr(remote, field)}'"
- for field in fields
- if getattr(remote, field) is not None
- ]
- query = (
- f"UPDATE core_remote cr SET {(', ').join(update)} WHERE pulp_id = '{remote.pulp_id}'"
- )
+ if len(remotes_needing_update) > 100:
+ Remote.objects.bulk_update(remotes_needing_update, fields)
+ remotes_needing_update.clear()
- with connection.cursor() as cursor:
- cursor.execute(query)
+ Remote.objects.bulk_update(remotes_needing_update, fields)
class Migration(migrations.Migration):
@@ -63,33 +57,82 @@ class Migration(migrations.Migration):
]
operations = [
- migrations.AlterField(
+ # Add new fields to temporarily hold the encrypted values
+ migrations.AddField(
model_name="remote",
- name="client_key",
+ name="_encrypted_client_key",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="password",
+ name="_encrypted_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_password",
+ name="_encrypted_proxy_password",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="proxy_username",
+ name="_encrypted_proxy_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
- migrations.AlterField(
+ migrations.AddField(
model_name="remote",
- name="username",
+ name="_encrypted_username",
field=pulpcore.app.models.fields.EncryptedTextField(null=True),
),
+ # Populate the new fields with encrypted values computed from the unencrypted fields
migrations.RunPython(
code=encrypt_remote_fields,
reverse_code=unencrypt_remote_fields,
),
+ # Remove the unencrypted columns
+ migrations.RemoveField(
+ model_name="remote",
+ name="client_key",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_password",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="proxy_username",
+ ),
+ migrations.RemoveField(
+ model_name="remote",
+ name="username",
+ ),
+ # Replace the formerly-unencrypted columns with the new encrypted ones
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_client_key",
+ new_name="client_key",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_password",
+ new_name="password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_password",
+ new_name="proxy_password",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_proxy_username",
+ new_name="proxy_username",
+ ),
+ migrations.RenameField(
+ model_name="remote",
+ old_name="_encrypted_username",
+ new_name="username",
+ ),
]
| Migration of encrypted fields between 3.14 and 3.16 is broken for some remotes
**Version**
After upgrading from Katello 4.2 to 4.3, some users are encountering sync problems with some repositories. A 500 error is triggered when the /repositories/.../sync/ endpoint is used. The error seems to be related to the client_cert / client_key / ca_cert values.
```
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:57 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:57 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:57 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:57 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:57 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:57 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:57 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:57 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:57 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:57 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:57 foreman pulpcore-api: raise exc
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:57 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 470, in partial_update
Feb 24 11:28:57 foreman pulpcore-api: return self.update(request, *args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/viewsets/base.py", line 452, in update
Feb 24 11:28:57 foreman pulpcore-api: instance = self.get_object()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 96, in get_object
Feb 24 11:28:57 foreman pulpcore-api: obj = get_object_or_404(queryset, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/generics.py", line 19, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/shortcuts.py", line 76, in get_object_or_404
Feb 24 11:28:57 foreman pulpcore-api: return queryset.get(*args, **kwargs)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:57 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:57 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:57 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:57 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:57 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:57 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:57 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:57 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:57 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:57 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:57 +0000] "PATCH /pulp/api/v3/remotes/rpm/rpm/89e5b587-2295-4f1d-a49d-c0e8ed518c67/ HTTP/1.1" 500 145 "-" "OpenAPI-Generator/3.16.1/ruby"
Feb 24 11:28:58 foreman qdrouterd: SERVER (info) [C464018] Connection from 10.2.1.176:50898 (to :5647) failed: amqp:resource-limit-exceeded local-idle-timeout expired
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018][L1196864] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C464018] Connection Closed
Feb 24 11:28:58 foreman qdrouterd: ROUTER_CORE (info) [C4][L1196865] Link detached: del=0 presett=0 psdrop=0 acc=0 rej=0 rel=0 mod=0 delay1=0 delay10=0 blocked=no
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: django.request:ERROR: Internal Server Error: /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 96, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: data = base64.urlsafe_b64decode(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 133, in urlsafe_b64decode
Feb 24 11:28:58 foreman pulpcore-api: return b64decode(s)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/rh/rh-python38/root/usr/lib64/python3.8/base64.py", line 87, in b64decode
Feb 24 11:28:58 foreman pulpcore-api: return binascii.a2b_base64(s)
Feb 24 11:28:58 foreman pulpcore-api: binascii.Error: Incorrect padding
Feb 24 11:28:58 foreman pulpcore-api: During handling of the above exception, another exception occurred:
Feb 24 11:28:58 foreman pulpcore-api: Traceback (most recent call last):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner
Feb 24 11:28:58 foreman pulpcore-api: response = get_response(request)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response
Feb 24 11:28:58 foreman pulpcore-api: response = wrapped_callback(request, *callback_args, **callback_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
Feb 24 11:28:58 foreman pulpcore-api: return view_func(*args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/viewsets.py", line 125, in view
Feb 24 11:28:58 foreman pulpcore-api: return self.dispatch(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = self.handle_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception
Feb 24 11:28:58 foreman pulpcore-api: self.raise_uncaught_exception(exc)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
Feb 24 11:28:58 foreman pulpcore-api: raise exc
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch
Feb 24 11:28:58 foreman pulpcore-api: response = handler(request, *args, **kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/viewsets.py", line 130, in sync
Feb 24 11:28:58 foreman pulpcore-api: serializer.is_valid(raise_exception=True)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 220, in is_valid
Feb 24 11:28:58 foreman pulpcore-api: self._validated_data = self.run_validation(self.initial_data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 419, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/serializers.py", line 476, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: validated_value = field.run_validation(primitive_value)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 153, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: return super().run_validation(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/fields.py", line 568, in run_validation
Feb 24 11:28:58 foreman pulpcore-api: value = self.to_internal_value(data)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 373, in to_internal_value
Feb 24 11:28:58 foreman pulpcore-api: return self.get_object(match.view_name, match.args, match.kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/serializers/base.py", line 322, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return super().get_object(*args, **kwargs).cast()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/rest_framework/relations.py", line 318, in get_object
Feb 24 11:28:58 foreman pulpcore-api: return queryset.get(**lookup_kwargs)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 431, in get
Feb 24 11:28:58 foreman pulpcore-api: num = len(clone)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 262, in __len__
Feb 24 11:28:58 foreman pulpcore-api: self._fetch_all()
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 1324, in _fetch_all
Feb 24 11:28:58 foreman pulpcore-api: self._result_cache = list(self._iterable_class(self))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py", line 68, in __iter__
Feb 24 11:28:58 foreman pulpcore-api: for row in compiler.results_iter(results):
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1122, in apply_converters
Feb 24 11:28:58 foreman pulpcore-api: value = converter(value, expression, connection)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/models/fields.py", line 104, in from_db_value
Feb 24 11:28:58 foreman pulpcore-api: return force_str(self._fernet.decrypt(force_bytes(value)))
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 75, in decrypt
Feb 24 11:28:58 foreman pulpcore-api: timestamp, data = Fernet._get_unverified_token_data(token)
Feb 24 11:28:58 foreman pulpcore-api: File "/opt/theforeman/tfm-pulpcore/root/usr/lib64/python3.8/site-packages/cryptography/fernet.py", line 98, in _get_unverified_token_data
Feb 24 11:28:58 foreman pulpcore-api: raise InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: cryptography.fernet.InvalidToken
Feb 24 11:28:58 foreman pulpcore-api: pulp [e2601bd6-70a5-4a87-ace3-a386280e3e30]: - - [24/Feb/2022:10:28:58 +0000] "POST /pulp/api/v3/repositories/rpm/rpm/b4a448e8-896c-4fec-8ee2-fcdf4307f2fd/sync/ HTTP/1.1" 500 145
```
>
> What did notice, though, is that on the old 4.2 server with pulpcore 3.14 all those remotes have an empty username and password set, i.e. an empty string “” and not NULL like many others. However, the list of remotes on the old server with empty string username is longer than the affected ones on my 4.3 server.
>
> So it seems to be an issue with the database migration from 3.14 to 3.16 and encryption of some fields in the database. The migration/encryption seems to fail is some cases and the original content remains in the database which makes decryption failed. This is also true for my affected rhel repository: I can see in the database that the remote contains the RSA private key in client_key, unlike the other rhel repos which contain some base64 encoded string in the client_key fields.
>
> Thus the problem is that for some rows in the database (at least the core_remote table) encryption of the fields client_key, username, password, (proxy_username, proxy_password, too, I guess) fails during migration from 3.14 to 3.16 leaving the original content in the database. This leads to decryption failure in 3.16.
>
> For whatever reason only a few repos are affected, mostly those which have empty strings for username, password instead of NULL like many others. However, even with empty strings for some remotes the encryption works while it doesn’t for others.
**Describe the bug**
API calls to trigger a sync return a 500 error for some remotes
**To Reproduce**
Unclear
**Expected behavior**
A sync task is correctly created, no 500 error.
**Additional context**
Discourse discussion: https://community.theforeman.org/t/pulprpmclient-apierror-http-500-during-sync-repository/27480/13?u=dralley
| Looking closer into the encrypted strings in the database after migration from 3.14 to 3.16 I have noticed that the empty strings are consecutive in an unordered select. That made me suspect that the problem is the chunking and the assumption the order of rows won't change between chunks. To verify I have decrypted all encrypted usernames in the migrated database and I found what I have suspected:
```
>>> f.decrypt(b'gAAAAABiKvJgZhyIdXy__3X2rh8QdXezaWj-Y-RelFEfYIWg2mrTREsKTB7ydPY2gn3ZhveMwE3ocN1KO8YV3h5iA-wMibo_aw==')
b''
>>> f.decrypt(b'gAAAAABiKvJzCDPxmuSFmmAawerxPi1AqUCP4H8NxWiO0ypnYwFraXPj35EWQ4ABupu_KIBbBPFhW2elE_4Ru6FQQWRggn1yeg==')
b''
>>> f.decrypt(b'gAAAAABiKvGrd6IVPXjJZuTUPYxXg_F3jXvaMmbH3l_O2x1hNnxG8vBKeTHav_0Bz2rjsjcUc6CH_K4eapwLpV0tNGF_cJZKbRbqsSB_JZTQyjW8jSovvTipMSsbWeQJJZ-B5yLWk6vBnNk9cQ81I6kQOnXZolXbRfIPFdPM9AhwCJro8vnDcN4AQ5NKe9dyOVM80hHDquUW2IavogypDl9XLbsnr6m9KQ==')
b'gAAAAABiKe5e7RWrDl9cNAUTLHZ9CjN30uvPAZ_KZZskG_pyCBDCJBJ5pY6pvKaidZltPjWa0iqLP8RsKGakC8fpTi5xMz-c6Q=='
...
```
Obviously the last one above has been encrypted twice. So basically, the migration does encrypt some rows twice and some not at all.
I guess the reason is in 0073_encrypt_remote_fields.py:
```
def encrypt_remote_fields(apps, schema_editor):
offset = 0
chunk_size = 100
Remote = apps.get_model("core", "Remote")
with connection.cursor() as cursor:
while True:
cursor.execute(
f"SELECT pulp_id, {(',').join(fields)} FROM "
f"core_remote LIMIT {chunk_size} OFFSET {offset}"
)
records = cursor.fetchall()
offset += chunk_size
if len(records) == 0:
break
for record in records:
update = {
field: record[i] for i, field in enumerate(fields, 1) if record[i] is not None
}
if not update:
continue
Remote.objects.filter(pk=record[0]).update(**update)
```
I have more than 100 rows in core_remote, i.e. it runs two selects:
```
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 0;
pulpcore=# select pulp_id, "username", "password", "proxy_username", "proxy_password", "client_key" from core_remote limit 100 offset 100;
```
This assumes that the order of the rows in the whole table does not change between the first and second select. I suspect that this isn't true. Comparing the select on the database before the migration and after returns rows definitively in different order.
So the migration should make sure to maintain order of returned rows, e.g. by adding a `ORDER by pulp_id`. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Of course this leaves people who have already migrated and cannot go back with broken content in those fields: some fields may still contain plaintext content which causes decryption to fail with this InvalidToken exception. Other fields are double encrypted and the decrypt will return the single encrypted string instead of the plaintext causing authentication to fail.
To make matters worse: at least on my Katello 4.2 server there are quite a few rows in core_remote containing empty strings "" in those fields instead of NULL, causing the migration to encrypt the empty string. Due to this, I have 41 rows in the 3.16 table with encrypted usernames even though I actually only have 4 repositories having a non-empty username...
Thinking a little bit more about it, I think LIMIT/chunking in the migration is simply wrong. It should just use the standard postgresql cursor/buffering to go through all rows and not trying to split it up manually. Using LIMIT/OFFSET for paging through an unsorted table is just conceptionally flawed.
In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
Or otherwise the migration could temporarily add encrypted columns for those 5 fields to be encrypted and write the encrypted text there. This allows the migration to loop through the table (even with limit) and find rows which still need to be encrypted. Once there are not rows left to be encrypted, a single update can move the encrypted texts from the temporary columns into the normal columns and after that you can drop the temporary columns.
>So the migration should make sure to maintain order of returned rows, e.g. by adding a ORDER by pulp_id. That way the chunks should work unless some other operation in between would insert or delete rows or modify pulp_ids...
Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
>In addition: technically, the SELECT and the following updates should be in a transaction block to make sure it's consistent and no other process can get in between the select and the updates.
This part should be fine though. All Django migrations implicitly run inside a transaction unless you manually specify otherwise, and that isn't the case here.
> Yes, your theory is almost certainly the correct root cause. That needs to be fixed as you've described, using ordered queries.
Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory. And more importantly: you could easily optimize the select by search only for rows which actually have something set in on of the five columns to be encrypted. In my case, that would return 4 rows with username/password set and 5 redhat repositories with client keys (after setting the empty string "" columns to null)...
Chunks can be useful if it's a huge table with many (millions) of rows. But in that case, you actually wouldn't want to sort the table because it is much more efficient to deliver the rows in database order.
Isn't it possible with django to loop through the rows from the database directly, i.e. instead of using fetchall which I assume retrieves all rows from the select have something like a simple "fetch" to get the next row from the cursor until it hits the end?
>Do you really think splitting the table into chunks is necessary? For the core_remote table, even if someone has a couple of thousand repositories, a single select of the whole table should easily fit into memory.
I don't know why it was done this way in particular, the comments on that [PR](https://github.com/pulp/pulpcore/pull/1301) aren't illuminating, but I'll bring it up on Monday and see if that whole requirement can be dropped. | 2022-03-17T20:04:31 |
|
pulp/pulpcore | 2,378 | pulp__pulpcore-2378 | [
"2374"
] | 022ce83040f24d8a5fb74a6c6c3325b07691dc36 | diff --git a/pulpcore/app/admin.py b/pulpcore/app/admin.py
deleted file mode 100644
--- a/pulpcore/app/admin.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from django.contrib import admin
-from guardian.admin import GuardedModelAdmin
-
-from pulpcore.app.models import Task, RBACContentGuard
-
-
[email protected](Task)
-class TaskAdmin(GuardedModelAdmin):
- list_display = (
- "pulp_id",
- "pulp_created",
- "pulp_last_updated",
- "state",
- "name",
- "started_at",
- "finished_at",
- "error",
- "worker",
- "parent_task",
- "task_group",
- )
- list_filter = (
- "pulp_created",
- "pulp_last_updated",
- "started_at",
- "finished_at",
- )
- raw_id_fields = ("worker",)
- search_fields = ("name",)
- readonly_fields = (
- "pulp_id",
- "pulp_created",
- "pulp_last_updated",
- "state",
- "name",
- "started_at",
- "finished_at",
- "error",
- "worker",
- "parent_task",
- "task_group",
- )
-
-
[email protected](RBACContentGuard)
-class RBACContentGuardAdmin(GuardedModelAdmin):
- list_display = (
- "name",
- "description",
- )
- list_filter = (
- "name",
- "pulp_created",
- "pulp_last_updated",
- )
- search_fields = ("name",)
diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -37,8 +37,6 @@
DEPLOY_ROOT = Path("/var/lib/pulp")
MEDIA_ROOT = str(DEPLOY_ROOT / "media") # Django 3.1 adds support for pathlib.Path
-ADMIN_SITE_URL = "admin/"
-
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
@@ -67,7 +65,6 @@
INSTALLED_APPS = [
# django stuff
- "django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
diff --git a/pulpcore/app/urls.py b/pulpcore/app/urls.py
--- a/pulpcore/app/urls.py
+++ b/pulpcore/app/urls.py
@@ -2,7 +2,6 @@
import logging
from django.conf import settings
-from django.contrib import admin
from django.urls import path, include
from drf_spectacular.views import (
SpectacularJSONAPIView,
@@ -143,7 +142,6 @@ def __repr__(self):
PulpImporterImportCheckView.as_view(),
),
path("auth/", include("rest_framework.urls")),
- path(settings.ADMIN_SITE_URL, admin.site.urls),
]
urlpatterns.append(
| Remove the django admin UI served at `/admin/`.
## Background + Proposal
We enabled the django admin as tech-preview so that we could view and modify permissions and group membership, back before we had APIs for all of these things. Now that we have APIs for all of this, we should discontinue the use of the django admin UI.
## Why?
Pulp's data is highly relational and having users interact directly with the database via forms is not a safe situation.
## Details
Removing it would involve removing these parts roughly:
* The [admin.py](https://github.com/pulp/pulpcore/blob/fcb355c1f8fec6780524c010cf5bba7e1aa1d51b/pulpcore/app/admin.py)
* The [settings.py](https://github.com/pulp/pulpcore/blob/fcb355c1f8fec6780524c010cf5bba7e1aa1d51b/pulpcore/app/settings.py#L67) and [the setting](https://github.com/pulp/pulpcore/blob/main/pulpcore/app/settings.py#L40).
* The [urls.py usage](https://github.com/pulp/pulpcore/blob/fcb355c1f8fec6780524c010cf5bba7e1aa1d51b/pulpcore/app/urls.py#L144)
* The [docs](https://github.com/pulp/pulpcore/blob/901499a3566a940516664ae6ca042455efee0115/docs/configuration/settings.rst#admin_site_url).
| 2022-03-22T16:25:21 |
||
pulp/pulpcore | 2,380 | pulp__pulpcore-2380 | [
"2379"
] | 3e5b68377088616c9ec55884f614b00c4a5dfc4e | diff --git a/pulpcore/app/viewsets/__init__.py b/pulpcore/app/viewsets/__init__.py
--- a/pulpcore/app/viewsets/__init__.py
+++ b/pulpcore/app/viewsets/__init__.py
@@ -47,6 +47,7 @@
DistributionFilter,
DistributionViewSet,
ListContentGuardViewSet,
+ ListDistributionViewSet,
ListPublicationViewSet,
PublicationFilter,
PublicationViewSet,
diff --git a/pulpcore/app/viewsets/publication.py b/pulpcore/app/viewsets/publication.py
--- a/pulpcore/app/viewsets/publication.py
+++ b/pulpcore/app/viewsets/publication.py
@@ -238,6 +238,18 @@ class Meta:
}
+class ListDistributionViewSet(NamedModelViewSet, mixins.ListModelMixin):
+ endpoint_name = "distributions"
+ queryset = Distribution.objects.all()
+ serializer_class = DistributionSerializer
+ filterset_class = DistributionFilter
+
+ @classmethod
+ def is_master_viewset(cls):
+ """Do not hide from the routers."""
+ return False
+
+
class DistributionViewSet(
NamedModelViewSet,
mixins.RetrieveModelMixin,
| diff --git a/pulpcore/tests/functional/api/using_plugin/test_distributions.py b/pulpcore/tests/functional/api/using_plugin/test_distributions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_distributions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_distributions.py
@@ -32,6 +32,7 @@
RepositorySyncURL,
)
from pulpcore.tests.functional.api.using_plugin.constants import (
+ BASE_DISTRIBUTION_PATH,
FILE_CHUNKED_FIXTURE_MANIFEST_URL,
FILE_CONTENT_NAME,
FILE_DISTRIBUTION_PATH,
@@ -160,7 +161,13 @@ def test_03_fully_update(self):
self.do_fully_update_attr(key)
@skip_if(bool, "distribution", False)
- def test_04_delete_distribution(self):
+ def test_04_list(self):
+ """Test the generic distribution list endpoint."""
+ distributions = self.client.get(BASE_DISTRIBUTION_PATH)
+ assert self.distribution["pulp_href"] in [distro["pulp_href"] for distro in distributions]
+
+ @skip_if(bool, "distribution", False)
+ def test_05_delete_distribution(self):
"""Delete a distribution."""
self.client.delete(self.distribution["pulp_href"])
with self.assertRaises(HTTPError):
| Add a generic distributions list endpoint
| 2022-03-22T16:44:36 |
|
pulp/pulpcore | 2,382 | pulp__pulpcore-2382 | [
"2381"
] | 3e5b68377088616c9ec55884f614b00c4a5dfc4e | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -293,6 +293,11 @@ async def _handle_remote_artifacts(self, batch):
"""
remotes_present = set()
for d_content in batch:
+ # https://code.djangoproject.com/ticket/33596
+ # If the content was pre-fetched previously, remove that cached data, which could be out
+ # of date.
+ if hasattr(d_content.content, "_remote_artifact_saver_cas"):
+ delattr(d_content.content, "_remote_artifact_saver_cas")
for d_artifact in d_content.d_artifacts:
if d_artifact.remote:
remotes_present.add(d_artifact.remote)
| Duplicate Key Exception Raised during Sync related to duplicate RemoteArtifact objects
**Version**
pulpcore 3.18 at 3e5b68377088616c9ec55884f614b00c4a5dfc4e and pulp_container at d801f93ee4f50b08bd9e8c699be668e2ed5732c1
**Describe the bug**
Sync with pulp_container until you receive the duplicate RemoteArtifact exception:
```
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: Task 24e40894-bd11-446e-bccb-08de4bbfdce4 failed (duplicate key value violates unique constraint "core_remoteartifact_content_artifact_id_remote_id_be64c19e_uniq"
DETAIL: Key (content_artifact_id, remote_id)=(6225848a-55ca-4635-862c-e77db4bc98b7, c60e2039-a95e-4480-bbf7-329158ae4b17) already exists.
)
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 442, in _perform_task
result = func(*args, **kwargs)
File "/home/vagrant/devel/pulp_container/pulp_container/app/tasks/synchronize.py", line 44, in synchronize
return dv.create()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/declarative_version.py", line 161, in create
loop.run_until_complete(pipeline)
File "/usr/lib64/python3.10/asyncio/base_events.py", line 641, in run_until_complete
return future.result()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 225, in create_pipeline
await asyncio.gather(*futures)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 43, in __call__
await self.run()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 279, in run
await self._handle_remote_artifacts(batch)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 399, in _handle_remote_artifacts
await sync_to_async(RemoteArtifact.objects.bulk_create)(ras_to_create_ordered)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 414, in __call__
ret = await asyncio.wait_for(future, timeout=None)
File "/usr/lib64/python3.10/asyncio/tasks.py", line 408, in wait_for
return await fut
File "/usr/lib64/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 455, in thread_handler
return func(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 79, in _execute
with self.db.wrap_database_errors:
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
**To Reproduce**
Run the pulp_container sync `pytest -v pulp_container/tests/functional/ -k test_sync` over and over until you receive an exception and the sync fails.
**Expected behavior**
For the sync to not fail on a duplicate key error
**Additional context**
It's easier to reproduce with the refactored pulp_container sync pipeline contained in d801f93ee4f50b08bd9e8c699be668e2ed5732c1
| The reason is the prefetcher provides caching, see this bug https://code.djangoproject.com/ticket/33596 I learned from @dkliban on matrix that the last time we had this problem we used a solution [like this](https://github.com/pulp/pulpcore/commit/8dcdcd2dfb6d1f217e9725db9bbead1eef86c079). I'm going to try to put that into a PR now. | 2022-03-22T19:39:40 |
|
pulp/pulpcore | 2,383 | pulp__pulpcore-2383 | [
"2381"
] | 558f72e4a5dfb5e39c60bff0eb571ab479d711e9 | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -293,6 +293,11 @@ async def _handle_remote_artifacts(self, batch):
"""
remotes_present = set()
for d_content in batch:
+ # https://code.djangoproject.com/ticket/33596
+ # If the content was pre-fetched previously, remove that cached data, which could be out
+ # of date.
+ if hasattr(d_content.content, "_remote_artifact_saver_cas"):
+ delattr(d_content.content, "_remote_artifact_saver_cas")
for d_artifact in d_content.d_artifacts:
if d_artifact.remote:
remotes_present.add(d_artifact.remote)
| Duplicate Key Exception Raised during Sync related to duplicate RemoteArtifact objects
**Version**
pulpcore 3.18 at 3e5b68377088616c9ec55884f614b00c4a5dfc4e and pulp_container at d801f93ee4f50b08bd9e8c699be668e2ed5732c1
**Describe the bug**
Sync with pulp_container until you receive the duplicate RemoteArtifact exception:
```
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: Task 24e40894-bd11-446e-bccb-08de4bbfdce4 failed (duplicate key value violates unique constraint "core_remoteartifact_content_artifact_id_remote_id_be64c19e_uniq"
DETAIL: Key (content_artifact_id, remote_id)=(6225848a-55ca-4635-862c-e77db4bc98b7, c60e2039-a95e-4480-bbf7-329158ae4b17) already exists.
)
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 442, in _perform_task
result = func(*args, **kwargs)
File "/home/vagrant/devel/pulp_container/pulp_container/app/tasks/synchronize.py", line 44, in synchronize
return dv.create()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/declarative_version.py", line 161, in create
loop.run_until_complete(pipeline)
File "/usr/lib64/python3.10/asyncio/base_events.py", line 641, in run_until_complete
return future.result()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 225, in create_pipeline
await asyncio.gather(*futures)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 43, in __call__
await self.run()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 279, in run
await self._handle_remote_artifacts(batch)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 399, in _handle_remote_artifacts
await sync_to_async(RemoteArtifact.objects.bulk_create)(ras_to_create_ordered)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 414, in __call__
ret = await asyncio.wait_for(future, timeout=None)
File "/usr/lib64/python3.10/asyncio/tasks.py", line 408, in wait_for
return await fut
File "/usr/lib64/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 455, in thread_handler
return func(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 79, in _execute
with self.db.wrap_database_errors:
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
**To Reproduce**
Run the pulp_container sync `pytest -v pulp_container/tests/functional/ -k test_sync` over and over until you receive an exception and the sync fails.
**Expected behavior**
For the sync to not fail on a duplicate key error
**Additional context**
It's easier to reproduce with the refactored pulp_container sync pipeline contained in d801f93ee4f50b08bd9e8c699be668e2ed5732c1
| The reason is the prefetcher provides caching, see this bug https://code.djangoproject.com/ticket/33596 I learned from @dkliban on matrix that the last time we had this problem we used a solution [like this](https://github.com/pulp/pulpcore/commit/8dcdcd2dfb6d1f217e9725db9bbead1eef86c079). I'm going to try to put that into a PR now. | 2022-03-23T07:28:27 |
|
pulp/pulpcore | 2,384 | pulp__pulpcore-2384 | [
"2381"
] | 05c4f3db18198ee03c58068f3e2e2ae01b706a87 | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -289,6 +289,11 @@ async def _needed_remote_artifacts(self, batch):
"""
remotes_present = set()
for d_content in batch:
+ # https://code.djangoproject.com/ticket/33596
+ # If the content was pre-fetched previously, remove that cached data, which could be out
+ # of date.
+ if hasattr(d_content.content, "_remote_artifact_saver_cas"):
+ delattr(d_content.content, "_remote_artifact_saver_cas")
for d_artifact in d_content.d_artifacts:
if d_artifact.remote:
remotes_present.add(d_artifact.remote)
| Duplicate Key Exception Raised during Sync related to duplicate RemoteArtifact objects
**Version**
pulpcore 3.18 at 3e5b68377088616c9ec55884f614b00c4a5dfc4e and pulp_container at d801f93ee4f50b08bd9e8c699be668e2ed5732c1
**Describe the bug**
Sync with pulp_container until you receive the duplicate RemoteArtifact exception:
```
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: Task 24e40894-bd11-446e-bccb-08de4bbfdce4 failed (duplicate key value violates unique constraint "core_remoteartifact_content_artifact_id_remote_id_be64c19e_uniq"
DETAIL: Key (content_artifact_id, remote_id)=(6225848a-55ca-4635-862c-e77db4bc98b7, c60e2039-a95e-4480-bbf7-329158ae4b17) already exists.
)
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 442, in _perform_task
result = func(*args, **kwargs)
File "/home/vagrant/devel/pulp_container/pulp_container/app/tasks/synchronize.py", line 44, in synchronize
return dv.create()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/declarative_version.py", line 161, in create
loop.run_until_complete(pipeline)
File "/usr/lib64/python3.10/asyncio/base_events.py", line 641, in run_until_complete
return future.result()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 225, in create_pipeline
await asyncio.gather(*futures)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 43, in __call__
await self.run()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 279, in run
await self._handle_remote_artifacts(batch)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 399, in _handle_remote_artifacts
await sync_to_async(RemoteArtifact.objects.bulk_create)(ras_to_create_ordered)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 414, in __call__
ret = await asyncio.wait_for(future, timeout=None)
File "/usr/lib64/python3.10/asyncio/tasks.py", line 408, in wait_for
return await fut
File "/usr/lib64/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 455, in thread_handler
return func(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 79, in _execute
with self.db.wrap_database_errors:
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
**To Reproduce**
Run the pulp_container sync `pytest -v pulp_container/tests/functional/ -k test_sync` over and over until you receive an exception and the sync fails.
**Expected behavior**
For the sync to not fail on a duplicate key error
**Additional context**
It's easier to reproduce with the refactored pulp_container sync pipeline contained in d801f93ee4f50b08bd9e8c699be668e2ed5732c1
| The reason is the prefetcher provides caching, see this bug https://code.djangoproject.com/ticket/33596 I learned from @dkliban on matrix that the last time we had this problem we used a solution [like this](https://github.com/pulp/pulpcore/commit/8dcdcd2dfb6d1f217e9725db9bbead1eef86c079). I'm going to try to put that into a PR now. | 2022-03-23T07:28:40 |
|
pulp/pulpcore | 2,385 | pulp__pulpcore-2385 | [
"2381"
] | 43218f91f0ae66aa0926933966187bff4aeda847 | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -293,6 +293,11 @@ async def _handle_remote_artifacts(self, batch):
"""
remotes_present = set()
for d_content in batch:
+ # https://code.djangoproject.com/ticket/33596
+ # If the content was pre-fetched previously, remove that cached data, which could be out
+ # of date.
+ if hasattr(d_content.content, "_remote_artifact_saver_cas"):
+ delattr(d_content.content, "_remote_artifact_saver_cas")
for d_artifact in d_content.d_artifacts:
if d_artifact.remote:
remotes_present.add(d_artifact.remote)
| Duplicate Key Exception Raised during Sync related to duplicate RemoteArtifact objects
**Version**
pulpcore 3.18 at 3e5b68377088616c9ec55884f614b00c4a5dfc4e and pulp_container at d801f93ee4f50b08bd9e8c699be668e2ed5732c1
**Describe the bug**
Sync with pulp_container until you receive the duplicate RemoteArtifact exception:
```
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: Task 24e40894-bd11-446e-bccb-08de4bbfdce4 failed (duplicate key value violates unique constraint "core_remoteartifact_content_artifact_id_remote_id_be64c19e_uniq"
DETAIL: Key (content_artifact_id, remote_id)=(6225848a-55ca-4635-862c-e77db4bc98b7, c60e2039-a95e-4480-bbf7-329158ae4b17) already exists.
)
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 442, in _perform_task
result = func(*args, **kwargs)
File "/home/vagrant/devel/pulp_container/pulp_container/app/tasks/synchronize.py", line 44, in synchronize
return dv.create()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/declarative_version.py", line 161, in create
loop.run_until_complete(pipeline)
File "/usr/lib64/python3.10/asyncio/base_events.py", line 641, in run_until_complete
return future.result()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 225, in create_pipeline
await asyncio.gather(*futures)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 43, in __call__
await self.run()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 279, in run
await self._handle_remote_artifacts(batch)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 399, in _handle_remote_artifacts
await sync_to_async(RemoteArtifact.objects.bulk_create)(ras_to_create_ordered)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 414, in __call__
ret = await asyncio.wait_for(future, timeout=None)
File "/usr/lib64/python3.10/asyncio/tasks.py", line 408, in wait_for
return await fut
File "/usr/lib64/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 455, in thread_handler
return func(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 79, in _execute
with self.db.wrap_database_errors:
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
**To Reproduce**
Run the pulp_container sync `pytest -v pulp_container/tests/functional/ -k test_sync` over and over until you receive an exception and the sync fails.
**Expected behavior**
For the sync to not fail on a duplicate key error
**Additional context**
It's easier to reproduce with the refactored pulp_container sync pipeline contained in d801f93ee4f50b08bd9e8c699be668e2ed5732c1
| The reason is the prefetcher provides caching, see this bug https://code.djangoproject.com/ticket/33596 I learned from @dkliban on matrix that the last time we had this problem we used a solution [like this](https://github.com/pulp/pulpcore/commit/8dcdcd2dfb6d1f217e9725db9bbead1eef86c079). I'm going to try to put that into a PR now. | 2022-03-23T07:28:53 |
|
pulp/pulpcore | 2,386 | pulp__pulpcore-2386 | [
"2381"
] | c71a83544d421c3b363e3f31ed6be52b72884222 | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -293,6 +293,11 @@ async def _handle_remote_artifacts(self, batch):
"""
remotes_present = set()
for d_content in batch:
+ # https://code.djangoproject.com/ticket/33596
+ # If the content was pre-fetched previously, remove that cached data, which could be out
+ # of date.
+ if hasattr(d_content.content, "_remote_artifact_saver_cas"):
+ delattr(d_content.content, "_remote_artifact_saver_cas")
for d_artifact in d_content.d_artifacts:
if d_artifact.remote:
remotes_present.add(d_artifact.remote)
| Duplicate Key Exception Raised during Sync related to duplicate RemoteArtifact objects
**Version**
pulpcore 3.18 at 3e5b68377088616c9ec55884f614b00c4a5dfc4e and pulp_container at d801f93ee4f50b08bd9e8c699be668e2ed5732c1
**Describe the bug**
Sync with pulp_container until you receive the duplicate RemoteArtifact exception:
```
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: Task 24e40894-bd11-446e-bccb-08de4bbfdce4 failed (duplicate key value violates unique constraint "core_remoteartifact_content_artifact_id_remote_id_be64c19e_uniq"
DETAIL: Key (content_artifact_id, remote_id)=(6225848a-55ca-4635-862c-e77db4bc98b7, c60e2039-a95e-4480-bbf7-329158ae4b17) already exists.
)
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 442, in _perform_task
result = func(*args, **kwargs)
File "/home/vagrant/devel/pulp_container/pulp_container/app/tasks/synchronize.py", line 44, in synchronize
return dv.create()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/declarative_version.py", line 161, in create
loop.run_until_complete(pipeline)
File "/usr/lib64/python3.10/asyncio/base_events.py", line 641, in run_until_complete
return future.result()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 225, in create_pipeline
await asyncio.gather(*futures)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 43, in __call__
await self.run()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 279, in run
await self._handle_remote_artifacts(batch)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 399, in _handle_remote_artifacts
await sync_to_async(RemoteArtifact.objects.bulk_create)(ras_to_create_ordered)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 414, in __call__
ret = await asyncio.wait_for(future, timeout=None)
File "/usr/lib64/python3.10/asyncio/tasks.py", line 408, in wait_for
return await fut
File "/usr/lib64/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 455, in thread_handler
return func(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 79, in _execute
with self.db.wrap_database_errors:
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
**To Reproduce**
Run the pulp_container sync `pytest -v pulp_container/tests/functional/ -k test_sync` over and over until you receive an exception and the sync fails.
**Expected behavior**
For the sync to not fail on a duplicate key error
**Additional context**
It's easier to reproduce with the refactored pulp_container sync pipeline contained in d801f93ee4f50b08bd9e8c699be668e2ed5732c1
| The reason is the prefetcher provides caching, see this bug https://code.djangoproject.com/ticket/33596 I learned from @dkliban on matrix that the last time we had this problem we used a solution [like this](https://github.com/pulp/pulpcore/commit/8dcdcd2dfb6d1f217e9725db9bbead1eef86c079). I'm going to try to put that into a PR now. | 2022-03-23T07:29:05 |
|
pulp/pulpcore | 2,387 | pulp__pulpcore-2387 | [
"2069"
] | 702350e8697302fe59fde80ddbe5932548ff3033 | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-23T11:13:41 |
|
pulp/pulpcore | 2,388 | pulp__pulpcore-2388 | [
"2069"
] | a785b8b853fca4eaaf8714a92a359f40035f9a81 | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-23T11:13:42 |
|
pulp/pulpcore | 2,389 | pulp__pulpcore-2389 | [
"2069"
] | e302891263d62fadd0fefe156db8addaa2fc1bcf | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-23T11:13:42 |
|
pulp/pulpcore | 2,390 | pulp__pulpcore-2390 | [
"2069"
] | 272ef6139310fa9de1e6204d62e372226d2a5fe0 | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-23T11:13:43 |
|
pulp/pulpcore | 2,391 | pulp__pulpcore-2391 | [
"2069"
] | c862cf4057f0d99b02e94a78aaedcf9eba723675 | diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py
--- a/pulpcore/plugin/stages/api.py
+++ b/pulpcore/plugin/stages/api.py
@@ -179,12 +179,12 @@ def __str__(self):
return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__)
-async def create_pipeline(stages, maxsize=1000):
+async def create_pipeline(stages, maxsize=1):
"""
A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.
Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that
- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the
+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the
`items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an
example of the simplest stage that only passes data::
@@ -196,7 +196,7 @@ async def run(self):
Args:
stages (list of coroutines): A list of Stages API compatible coroutines.
maxsize (int): The maximum amount of items a queue between two stages should hold. Optional
- and defaults to 100.
+ and defaults to 1.
Returns:
A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
| Reduce memory usage of the pipeline
Author: @bmbouter (bmbouter)
Redmine Issue: 9635, https://pulp.plan.io/issues/9635
---
## Motivation
It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.
## What is already in place
The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).
Interestingly the docstring says this defaults to 100, but it seems to actually be 1000!
Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).
## Why this isn't enough
These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.
Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.
## Idea
Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.
Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.
## Details
Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.
Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.
We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
| From: @dralley (dalley)
Date: 2021-12-13T16:54:22Z
---
> Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....
This is true but:
1) the metadata is very messed up - 13 million duplicate "files" are listed for that package.
2) the postgresql maximum insert size is 1gb - so a single content unit exceeding that is a hard limitation regardless of anything else we do. Luckily I think that would be much much less frequent than an entire batch exceeding that limit, which I don't think we've ever seen happen either (but still a theoretical issue).
@bmbouter I finally have a PR for he pulp_rpm plugin that reduces the frontend memory consumption to ~50 megabytes. Unfortunately the whole sync (for a large repo like RHEL7) is still using ~1.8gb
So this effort is necessary to make any further improvements.
@dralley great I'm hoping to PoC it this week. It was further down on my list of things to do, but now it's close to the top. I'll take as assigned.
[I implemented the version](https://github.com/pulp/pulpcore/compare/main...bmbouter:memory-reduction-PoC?expand=1) which would have a setting and if the memory in-use was over that setting it would disable batching on the queues and let the queue drain.
It did slow significantly limit memory usage, but the runtime slows down hugely. For example consider a pulp_rpm sync of EL7 with `on_demand=True` so no artifact downloading. Runtime-wise the regular one took 777 seconds, then 7671 seconds with the single-item pipeline. Here are two graphs of the memory usage. This was configured with `TASK_MEMORY_LIMIT = 128` which put it into effect almost immediately for all tests run.
Note the y-axis is MB of the process and the x-axis is from every call to put() after first-stage. So by the time the first call occurs first stage has already allocated a lot of memory.
# RPM Regular, No Memory Restriction

# RPM With Memory Restriction

Here are some results for pulp_file which also had a roughly 10x slowdown.
# pulp_file Regular, No Memory Restriction

# pulp_file With Memory Restriction

@bmbouter What about some intermediate value, like a limit of 1.2gb?
So I don't think we should merge or persue this experiment, mainly for two reasons.
1) It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
2) It runs crazy crazy slow
Here are a few ideas though:
1) We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
2) The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
3) We should try to optimize first stage more using cpu and memory profilers, but this will have to be a plugin-by-plugin effort.
> @bmbouter What about some intermediate value, like a limit of 1.2gb?
It takes so long to run (like hours) with memory restriction I don't know what effective memory it would be using. What do you think about my next-steps proposals [here](https://github.com/pulp/pulpcore/issues/2069#issuecomment-1065199607)?
> It doesn't actually limit the memory to the desired amount and it's off by a lot. In all tests it was supposed to hold to 128 M, so if you had the OOM set to 140 in all cases your OOM would still occur.
That's expected with the current frontend, all the work to reduce the amount of memory used by the rpm plugin frontend isn't merged yet. Probably 128mb would still be too low, but it could probably stay under 400mb.
> It runs crazy crazy slow
It sounds like you're disabling batching entirely though, what if we didn't go quite that far? Dynamically sized batches I mean. But perhaps that's not very easy with the current architecture.
Looking at the PR, all of the queues are swapped with memory restricting ones, when it seems like really the only one that needs to be done that way is the first stage.
>We should add a debugging feature to produce memory usage graphs for every task run and dump them into a directory. This way users and developers can accurate record how their memory performance is doing. An adaptation of this PR can add this feature.
How much overhead is there for the memory tracking alone? Is that using the pympler method or just a general "how much memory is this process using" graph?
>The batch sizes are really what is important. In my case my memory restriction was reducing them to 0, and by default they are 1000, so what if we made this a configurable option on BaseRemote allowing users to specify a different default.
I'm uncomfortable making an implementation detail part of our public API like that. I could agree with putting it in `settings.py`
Also could you write up how you were using scalene as a knowledge sharing exercise? It's really useful to be able to reference things like that months from now :)
In the PR, only one queue is swapped with the memory restricting one, the first one just after first stage. It does disable batching of the entire pipeline though. I've kind of determined that if we memory throttle we need to also disable batching because throttling and using batching to wait for additional put() calls would likely deadlock. I think what this experiment does show is that changing the batch size does influence the memory usage numbers significantly, which is good.
I'm hesitant to try to dynamically change the batch size because very likely the algorithm is going to continue to reduce it because memory seems to still grow, just more slowly, and then you're in the super-slow no-batching situation again. I'd rather the user (or administrator) tell us and for all tasks or this specific task use this value and otherwise default to what it was doing before 1000.
The memory tracking was just periodic calls to the resource library, which I think just reads the values from the kernel. I ended up not even using pympler.
The scalene stuff was pretty easy, in my dev env I `pip install scalene` and the `pclean`. I `pstart` all the processes, but then I shutdown the workers with `sudo systemctl stop pulpcore-worker@1 pulpcore-worker@2`. I then use the script below as the scalene entry point by calling it with: `/usr/local/lib/pulp/bin/scalene --html --outfile profiling_output.html scalene_pulpcore_worker.py`.
### scalene_pulpcore_worker.py
```
import logging
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
django.setup()
from pulpcore.tasking.pulpcore_worker import NewPulpWorker # noqa: E402: module level not at top
_logger = logging.getLogger(__name__)
NewPulpWorker().run_forever()
```
Then run a task, but you'll see tracebacks after the tasking code completes, it's the RuntimeError [here](https://github.com/pulp/pulpcore/blob/e3421edb42d669d2f0cd91403803892f37710dd9/pulpcore/app/models/task.py#L217). Also you'll see scalene not provide any actual tasking code in its analysis. It does profile the startup of the worker nicely though.
@pulp/core FYI ^
Here's the story I have on recording the memory: https://github.com/pulp/pulpcore/issues/2329
Here's the story I wrote on making batch_size a BaseRemote param https://github.com/pulp/pulpcore/issues/2332
Maybe we're looking at this wrong.
Right now the abridged version of what we do is:
* parse the full metadata, put it in the pipeline
* try to find any that already exist, swap the content models
* save the ones whose internal state says they aren't saved yet
But the entire pipeline basically holds these fat, completely populated models objects from beginning to end.
What if we merge the `QueryExistingContents` and `ContentSaver` stages, promoted them as far upwards in the pipeline as we can and then only hold on to the absolute minimum possible amount of data possible after those stages are finished. So amount of memory would roughly scale with the contents of the first one or two queues, rather than the number of stages. Then the whole pipeline could stay full.
The pipeline also handles downloading, creating of remote artifacts, resolving content futures, and the searching+reading of `Artifact` objects from the db too.
It does allow a lot of objects to be loaded into the pipeline, with 1000 in each queue, then maybe 500 as the batch iterator (holding objects within each stage) that's like 6K objects in the pipeline that I've seen.
Overall reducing the number of items will reduce the memory footprint, but also it will slow down the sync with more RTTs to the database and possibly less parallel downloading. I think ultimately the batcher is in real control of that (500) items each, so what we could consider doing is reducing the queue size from 1000 to 500 because there isn't performance gained from allowing more items in the queues between the stages than each stage could handle in one batch. This would reduce the number of items by like 40% (quick guess). That would be pareto efficient because runtime should be unaffected, yet the memory amount reduced.
Maybe we could benchmark a pre-post change after I finish my memory recording PR that needs revision. | 2022-03-23T11:13:57 |
|
pulp/pulpcore | 2,392 | pulp__pulpcore-2392 | [
"2381"
] | c862cf4057f0d99b02e94a78aaedcf9eba723675 | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -284,12 +284,17 @@ def _handle_remote_artifacts(self, batch):
"""
remotes_present = set()
for d_content in batch:
+ # The fix described here solves two sepearate bugs:
+ #
# If the attribute is set in a previous batch on the very first item in this batch, the
# rest of the items in this batch will not get the attribute set during prefetch.
# https://code.djangoproject.com/ticket/32089
+
+ # https://code.djangoproject.com/ticket/33596
+ # If the content was pre-fetched previously, remove that cached data, which could be out
+ # of date.
if hasattr(d_content.content, "_remote_artifact_saver_cas"):
delattr(d_content.content, "_remote_artifact_saver_cas")
-
for d_artifact in d_content.d_artifacts:
if d_artifact.remote:
remotes_present.add(d_artifact.remote)
| Duplicate Key Exception Raised during Sync related to duplicate RemoteArtifact objects
**Version**
pulpcore 3.18 at 3e5b68377088616c9ec55884f614b00c4a5dfc4e and pulp_container at d801f93ee4f50b08bd9e8c699be668e2ed5732c1
**Describe the bug**
Sync with pulp_container until you receive the duplicate RemoteArtifact exception:
```
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: Task 24e40894-bd11-446e-bccb-08de4bbfdce4 failed (duplicate key value violates unique constraint "core_remoteartifact_content_artifact_id_remote_id_be64c19e_uniq"
DETAIL: Key (content_artifact_id, remote_id)=(6225848a-55ca-4635-862c-e77db4bc98b7, c60e2039-a95e-4480-bbf7-329158ae4b17) already exists.
)
pulp [bf9ddb0e6acf4ade98840d4890e01b6e]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 442, in _perform_task
result = func(*args, **kwargs)
File "/home/vagrant/devel/pulp_container/pulp_container/app/tasks/synchronize.py", line 44, in synchronize
return dv.create()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/declarative_version.py", line 161, in create
loop.run_until_complete(pipeline)
File "/usr/lib64/python3.10/asyncio/base_events.py", line 641, in run_until_complete
return future.result()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 225, in create_pipeline
await asyncio.gather(*futures)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/api.py", line 43, in __call__
await self.run()
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 279, in run
await self._handle_remote_artifacts(batch)
File "/home/vagrant/devel/pulpcore/pulpcore/plugin/stages/artifact_stages.py", line 399, in _handle_remote_artifacts
await sync_to_async(RemoteArtifact.objects.bulk_create)(ras_to_create_ordered)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 414, in __call__
ret = await asyncio.wait_for(future, timeout=None)
File "/usr/lib64/python3.10/asyncio/tasks.py", line 408, in wait_for
return await fut
File "/usr/lib64/python3.10/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/asgiref/sync.py", line 455, in thread_handler
return func(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 79, in _execute
with self.db.wrap_database_errors:
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
**To Reproduce**
Run the pulp_container sync `pytest -v pulp_container/tests/functional/ -k test_sync` over and over until you receive an exception and the sync fails.
**Expected behavior**
For the sync to not fail on a duplicate key error
**Additional context**
It's easier to reproduce with the refactored pulp_container sync pipeline contained in d801f93ee4f50b08bd9e8c699be668e2ed5732c1
| The reason is the prefetcher provides caching, see this bug https://code.djangoproject.com/ticket/33596 I learned from @dkliban on matrix that the last time we had this problem we used a solution [like this](https://github.com/pulp/pulpcore/commit/8dcdcd2dfb6d1f217e9725db9bbead1eef86c079). I'm going to try to put that into a PR now. | 2022-03-23T11:38:58 |
|
pulp/pulpcore | 2,408 | pulp__pulpcore-2408 | [
"2403"
] | 286ee72235c6f15221981e5e9e4de592837ed58a | diff --git a/pulpcore/app/migrations/0040_set_admin_is_staff.py b/pulpcore/app/migrations/0040_set_admin_is_staff.py
--- a/pulpcore/app/migrations/0040_set_admin_is_staff.py
+++ b/pulpcore/app/migrations/0040_set_admin_is_staff.py
@@ -1,5 +1,6 @@
# Generated by Django 2.2.13 on 2020-07-01 21:29
+from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import migrations
@@ -19,8 +20,9 @@ class Migration(migrations.Migration):
dependencies = [
('core', '0039_change_download_concurrency'),
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
- migrations.RunPython(allow_admin_as_staff),
+ migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),
]
| Migration 0040_set_admin_is_staff.py is missing dependency on user model
| This may have been triggered by the removal of the django admin app, which in turn lead to reshuffled migration execution. | 2022-03-24T12:20:21 |
|
pulp/pulpcore | 2,411 | pulp__pulpcore-2411 | [
"2403"
] | 4d396142a4177ed3532490604a6664b383ae3234 | diff --git a/pulpcore/app/migrations/0040_set_admin_is_staff.py b/pulpcore/app/migrations/0040_set_admin_is_staff.py
--- a/pulpcore/app/migrations/0040_set_admin_is_staff.py
+++ b/pulpcore/app/migrations/0040_set_admin_is_staff.py
@@ -1,5 +1,6 @@
# Generated by Django 2.2.13 on 2020-07-01 21:29
+from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import migrations
@@ -19,8 +20,9 @@ class Migration(migrations.Migration):
dependencies = [
('core', '0039_change_download_concurrency'),
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
- migrations.RunPython(allow_admin_as_staff),
+ migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),
]
| Migration 0040_set_admin_is_staff.py is missing dependency on user model
| This may have been triggered by the removal of the django admin app, which in turn lead to reshuffled migration execution. | 2022-03-24T13:26:32 |
|
pulp/pulpcore | 2,412 | pulp__pulpcore-2412 | [
"2403"
] | 970ea8838a4038f3eb170ddb944046b57b54eb37 | diff --git a/pulpcore/app/migrations/0040_set_admin_is_staff.py b/pulpcore/app/migrations/0040_set_admin_is_staff.py
--- a/pulpcore/app/migrations/0040_set_admin_is_staff.py
+++ b/pulpcore/app/migrations/0040_set_admin_is_staff.py
@@ -1,5 +1,6 @@
# Generated by Django 2.2.13 on 2020-07-01 21:29
+from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import migrations
@@ -19,8 +20,9 @@ class Migration(migrations.Migration):
dependencies = [
('core', '0039_change_download_concurrency'),
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
- migrations.RunPython(allow_admin_as_staff),
+ migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),
]
| Migration 0040_set_admin_is_staff.py is missing dependency on user model
| This may have been triggered by the removal of the django admin app, which in turn lead to reshuffled migration execution. | 2022-03-24T13:26:48 |
|
pulp/pulpcore | 2,421 | pulp__pulpcore-2421 | [
"2420"
] | f0681fea93371c78164a00b688bea40b65093112 | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -242,12 +242,13 @@ async def run(self):
if d_artifact.artifact._state.adding and not d_artifact.deferred_download:
d_artifact.artifact.file = str(d_artifact.artifact.file)
da_to_save.append(d_artifact)
+ da_to_save_ordered = sorted(da_to_save, key=lambda x: x.artifact.sha256)
if da_to_save:
for d_artifact, artifact in zip(
- da_to_save,
+ da_to_save_ordered,
await sync_to_async(Artifact.objects.bulk_get_or_create)(
- d_artifact.artifact for d_artifact in da_to_save
+ d_artifact.artifact for d_artifact in da_to_save_ordered
),
):
d_artifact.artifact = artifact
| bulk_create() deadlock
**Version**
3.14.12 (reproduceable in main)
**Describe the bug**
ArtifactSaver can deadlock on bulk_get_or_create() call in the presence of high concurrency and overlapping content.
Deadlock will be some variant of the following:
```
Exception ignored in thread started by: <function bulk_doit at 0x7f878f388dc0>
Traceback (most recent call last):
File "<ipython-input-1-297483773c63>", line 29, in bulk_doit
File "/home/vagrant/devel/pulpcore/pulpcore/app/models/content.py", line 87, in bulk_get_or_create
return super().bulk_create(objs, batch_size=batch_size)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: deadlock detected
DETAIL: Process 20326 waits for ShareLock on transaction 4661315; blocked by process 20320.
Process 20320 waits for ShareLock on transaction 4661333; blocked by process 20326.
HINT: See server log for query details.
CONTEXT: while inserting index tuple (1221,10) in relation "core_artifact_pkey"
```
See https://bugzilla.redhat.com/show_bug.cgi?id=2062526 for more details.
**To Reproduce**
Steps to reproduce the behavior:
Reproducing "in the wild" is difficuilt. The follwoing steps will reliably hit the deadlock:
1) Start with a clean pulp database (at a minimum, none of the Artifacts created at Step 5, below, should exist in the db)
2) make sure you have at least 10 pulpcore-workers up. This command will do the trick:
```
sudo systemctl start pulpcore-worker@1 pulpcore-worker@2 \
pulpcore-worker@3 pulpcore-worker@4 pulpcore-worker@5 pulpcore-worker@6 \
pulpcore-worker@7 pulpcore-worker@8 pulpcore-worker@9 pulpcore-worker@10
```
3) Create "a lot" of Artifacts - 10K is a good choice.
4) Arrange to sort those artifacts by sha256 (the collision point) ascending, descending, and mixed.
5) Arrange to bulk_get_or_create() those three sets, in independent threads, at least 4-5 threads per sort.
You can accomplish this with the following script in pulpcore-manager shell:
```
from pulpcore.app.models.content import Artifact
from hashlib import sha256
import _thread
artifacts = []
print(">>>BUILDING ARTIFACTS...")
for i in range(9999):
filename = f'/tmp/{i:06d}.txt'
with open(filename, "w") as f:
f.write(filename)
with open(filename, "rb") as f:
sum256 = sha256(f.read()).hexdigest()
attrs = {"file": filename, "sha256": sum256, "size": i}
artifacts.append(Artifact (**attrs))
print(">>> SORT ASC...")
artifacts_asc = sorted(artifacts, key=lambda a: a.sha256)
print(">>> SORT DSC...")
artifacts_dsc = sorted(artifacts, key=lambda a: a.sha256, reverse=True)
print(">>> SHUFFLE...")
artifacts_mix = []
for i in range(9999):
artifacts_mix.append(artifacts_asc[i])
artifacts_mix.append(artifacts_dsc[i])
def bulk_doit(art_list):
print(">>> ENTER...")
Artifact.objects.bulk_get_or_create(art_list)
print(">>> EXIT...")
print(">>> STARTING LOOP...")
for i in range(5):
_thread.start_new_thread(bulk_doit, (artifacts_asc,))
_thread.start_new_thread(bulk_doit, (artifacts_dsc,))
_thread.start_new_thread(bulk_doit, (artifacts_mix,))
```
**Expected behavior**
All the threads complete without error.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
| 2022-03-24T20:11:01 |
||
pulp/pulpcore | 2,425 | pulp__pulpcore-2425 | [
"2420"
] | 78f1c2655a0297ef59da1d1cae9e92d507fb0745 | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -242,12 +242,13 @@ async def run(self):
if d_artifact.artifact._state.adding and not d_artifact.deferred_download:
d_artifact.artifact.file = str(d_artifact.artifact.file)
da_to_save.append(d_artifact)
+ da_to_save_ordered = sorted(da_to_save, key=lambda x: x.artifact.sha256)
if da_to_save:
for d_artifact, artifact in zip(
- da_to_save,
+ da_to_save_ordered,
await sync_to_async(Artifact.objects.bulk_get_or_create)(
- d_artifact.artifact for d_artifact in da_to_save
+ d_artifact.artifact for d_artifact in da_to_save_ordered
),
):
d_artifact.artifact = artifact
| bulk_create() deadlock
**Version**
3.14.12 (reproduceable in main)
**Describe the bug**
ArtifactSaver can deadlock on bulk_get_or_create() call in the presence of high concurrency and overlapping content.
Deadlock will be some variant of the following:
```
Exception ignored in thread started by: <function bulk_doit at 0x7f878f388dc0>
Traceback (most recent call last):
File "<ipython-input-1-297483773c63>", line 29, in bulk_doit
File "/home/vagrant/devel/pulpcore/pulpcore/app/models/content.py", line 87, in bulk_get_or_create
return super().bulk_create(objs, batch_size=batch_size)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: deadlock detected
DETAIL: Process 20326 waits for ShareLock on transaction 4661315; blocked by process 20320.
Process 20320 waits for ShareLock on transaction 4661333; blocked by process 20326.
HINT: See server log for query details.
CONTEXT: while inserting index tuple (1221,10) in relation "core_artifact_pkey"
```
See https://bugzilla.redhat.com/show_bug.cgi?id=2062526 for more details.
**To Reproduce**
Steps to reproduce the behavior:
Reproducing "in the wild" is difficuilt. The follwoing steps will reliably hit the deadlock:
1) Start with a clean pulp database (at a minimum, none of the Artifacts created at Step 5, below, should exist in the db)
2) make sure you have at least 10 pulpcore-workers up. This command will do the trick:
```
sudo systemctl start pulpcore-worker@1 pulpcore-worker@2 \
pulpcore-worker@3 pulpcore-worker@4 pulpcore-worker@5 pulpcore-worker@6 \
pulpcore-worker@7 pulpcore-worker@8 pulpcore-worker@9 pulpcore-worker@10
```
3) Create "a lot" of Artifacts - 10K is a good choice.
4) Arrange to sort those artifacts by sha256 (the collision point) ascending, descending, and mixed.
5) Arrange to bulk_get_or_create() those three sets, in independent threads, at least 4-5 threads per sort.
You can accomplish this with the following script in pulpcore-manager shell:
```
from pulpcore.app.models.content import Artifact
from hashlib import sha256
import _thread
artifacts = []
print(">>>BUILDING ARTIFACTS...")
for i in range(9999):
filename = f'/tmp/{i:06d}.txt'
with open(filename, "w") as f:
f.write(filename)
with open(filename, "rb") as f:
sum256 = sha256(f.read()).hexdigest()
attrs = {"file": filename, "sha256": sum256, "size": i}
artifacts.append(Artifact (**attrs))
print(">>> SORT ASC...")
artifacts_asc = sorted(artifacts, key=lambda a: a.sha256)
print(">>> SORT DSC...")
artifacts_dsc = sorted(artifacts, key=lambda a: a.sha256, reverse=True)
print(">>> SHUFFLE...")
artifacts_mix = []
for i in range(9999):
artifacts_mix.append(artifacts_asc[i])
artifacts_mix.append(artifacts_dsc[i])
def bulk_doit(art_list):
print(">>> ENTER...")
Artifact.objects.bulk_get_or_create(art_list)
print(">>> EXIT...")
print(">>> STARTING LOOP...")
for i in range(5):
_thread.start_new_thread(bulk_doit, (artifacts_asc,))
_thread.start_new_thread(bulk_doit, (artifacts_dsc,))
_thread.start_new_thread(bulk_doit, (artifacts_mix,))
```
**Expected behavior**
All the threads complete without error.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
| 2022-03-25T10:59:35 |
||
pulp/pulpcore | 2,426 | pulp__pulpcore-2426 | [
"2420"
] | a39bd6b2c65a03a81fece5d4da8d0527c7af6d5b | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -236,12 +236,13 @@ async def run(self):
if d_artifact.artifact._state.adding and not d_artifact.deferred_download:
d_artifact.artifact.file = str(d_artifact.artifact.file)
da_to_save.append(d_artifact)
+ da_to_save_ordered = sorted(da_to_save, key=lambda x: x.artifact.sha256)
if da_to_save:
for d_artifact, artifact in zip(
- da_to_save,
+ da_to_save_ordered,
await sync_to_async(Artifact.objects.bulk_get_or_create)(
- d_artifact.artifact for d_artifact in da_to_save
+ d_artifact.artifact for d_artifact in da_to_save_ordered
),
):
d_artifact.artifact = artifact
| bulk_create() deadlock
**Version**
3.14.12 (reproduceable in main)
**Describe the bug**
ArtifactSaver can deadlock on bulk_get_or_create() call in the presence of high concurrency and overlapping content.
Deadlock will be some variant of the following:
```
Exception ignored in thread started by: <function bulk_doit at 0x7f878f388dc0>
Traceback (most recent call last):
File "<ipython-input-1-297483773c63>", line 29, in bulk_doit
File "/home/vagrant/devel/pulpcore/pulpcore/app/models/content.py", line 87, in bulk_get_or_create
return super().bulk_create(objs, batch_size=batch_size)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: deadlock detected
DETAIL: Process 20326 waits for ShareLock on transaction 4661315; blocked by process 20320.
Process 20320 waits for ShareLock on transaction 4661333; blocked by process 20326.
HINT: See server log for query details.
CONTEXT: while inserting index tuple (1221,10) in relation "core_artifact_pkey"
```
See https://bugzilla.redhat.com/show_bug.cgi?id=2062526 for more details.
**To Reproduce**
Steps to reproduce the behavior:
Reproducing "in the wild" is difficuilt. The follwoing steps will reliably hit the deadlock:
1) Start with a clean pulp database (at a minimum, none of the Artifacts created at Step 5, below, should exist in the db)
2) make sure you have at least 10 pulpcore-workers up. This command will do the trick:
```
sudo systemctl start pulpcore-worker@1 pulpcore-worker@2 \
pulpcore-worker@3 pulpcore-worker@4 pulpcore-worker@5 pulpcore-worker@6 \
pulpcore-worker@7 pulpcore-worker@8 pulpcore-worker@9 pulpcore-worker@10
```
3) Create "a lot" of Artifacts - 10K is a good choice.
4) Arrange to sort those artifacts by sha256 (the collision point) ascending, descending, and mixed.
5) Arrange to bulk_get_or_create() those three sets, in independent threads, at least 4-5 threads per sort.
You can accomplish this with the following script in pulpcore-manager shell:
```
from pulpcore.app.models.content import Artifact
from hashlib import sha256
import _thread
artifacts = []
print(">>>BUILDING ARTIFACTS...")
for i in range(9999):
filename = f'/tmp/{i:06d}.txt'
with open(filename, "w") as f:
f.write(filename)
with open(filename, "rb") as f:
sum256 = sha256(f.read()).hexdigest()
attrs = {"file": filename, "sha256": sum256, "size": i}
artifacts.append(Artifact (**attrs))
print(">>> SORT ASC...")
artifacts_asc = sorted(artifacts, key=lambda a: a.sha256)
print(">>> SORT DSC...")
artifacts_dsc = sorted(artifacts, key=lambda a: a.sha256, reverse=True)
print(">>> SHUFFLE...")
artifacts_mix = []
for i in range(9999):
artifacts_mix.append(artifacts_asc[i])
artifacts_mix.append(artifacts_dsc[i])
def bulk_doit(art_list):
print(">>> ENTER...")
Artifact.objects.bulk_get_or_create(art_list)
print(">>> EXIT...")
print(">>> STARTING LOOP...")
for i in range(5):
_thread.start_new_thread(bulk_doit, (artifacts_asc,))
_thread.start_new_thread(bulk_doit, (artifacts_dsc,))
_thread.start_new_thread(bulk_doit, (artifacts_mix,))
```
**Expected behavior**
All the threads complete without error.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
| 2022-03-25T10:59:48 |
||
pulp/pulpcore | 2,427 | pulp__pulpcore-2427 | [
"2420"
] | adff416828f624468ea75827d616787ffb782239 | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -242,12 +242,13 @@ async def run(self):
if d_artifact.artifact._state.adding and not d_artifact.deferred_download:
d_artifact.artifact.file = str(d_artifact.artifact.file)
da_to_save.append(d_artifact)
+ da_to_save_ordered = sorted(da_to_save, key=lambda x: x.artifact.sha256)
if da_to_save:
for d_artifact, artifact in zip(
- da_to_save,
+ da_to_save_ordered,
await sync_to_async(Artifact.objects.bulk_get_or_create)(
- d_artifact.artifact for d_artifact in da_to_save
+ d_artifact.artifact for d_artifact in da_to_save_ordered
),
):
d_artifact.artifact = artifact
| bulk_create() deadlock
**Version**
3.14.12 (reproduceable in main)
**Describe the bug**
ArtifactSaver can deadlock on bulk_get_or_create() call in the presence of high concurrency and overlapping content.
Deadlock will be some variant of the following:
```
Exception ignored in thread started by: <function bulk_doit at 0x7f878f388dc0>
Traceback (most recent call last):
File "<ipython-input-1-297483773c63>", line 29, in bulk_doit
File "/home/vagrant/devel/pulpcore/pulpcore/app/models/content.py", line 87, in bulk_get_or_create
return super().bulk_create(objs, batch_size=batch_size)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: deadlock detected
DETAIL: Process 20326 waits for ShareLock on transaction 4661315; blocked by process 20320.
Process 20320 waits for ShareLock on transaction 4661333; blocked by process 20326.
HINT: See server log for query details.
CONTEXT: while inserting index tuple (1221,10) in relation "core_artifact_pkey"
```
See https://bugzilla.redhat.com/show_bug.cgi?id=2062526 for more details.
**To Reproduce**
Steps to reproduce the behavior:
Reproducing "in the wild" is difficuilt. The follwoing steps will reliably hit the deadlock:
1) Start with a clean pulp database (at a minimum, none of the Artifacts created at Step 5, below, should exist in the db)
2) make sure you have at least 10 pulpcore-workers up. This command will do the trick:
```
sudo systemctl start pulpcore-worker@1 pulpcore-worker@2 \
pulpcore-worker@3 pulpcore-worker@4 pulpcore-worker@5 pulpcore-worker@6 \
pulpcore-worker@7 pulpcore-worker@8 pulpcore-worker@9 pulpcore-worker@10
```
3) Create "a lot" of Artifacts - 10K is a good choice.
4) Arrange to sort those artifacts by sha256 (the collision point) ascending, descending, and mixed.
5) Arrange to bulk_get_or_create() those three sets, in independent threads, at least 4-5 threads per sort.
You can accomplish this with the following script in pulpcore-manager shell:
```
from pulpcore.app.models.content import Artifact
from hashlib import sha256
import _thread
artifacts = []
print(">>>BUILDING ARTIFACTS...")
for i in range(9999):
filename = f'/tmp/{i:06d}.txt'
with open(filename, "w") as f:
f.write(filename)
with open(filename, "rb") as f:
sum256 = sha256(f.read()).hexdigest()
attrs = {"file": filename, "sha256": sum256, "size": i}
artifacts.append(Artifact (**attrs))
print(">>> SORT ASC...")
artifacts_asc = sorted(artifacts, key=lambda a: a.sha256)
print(">>> SORT DSC...")
artifacts_dsc = sorted(artifacts, key=lambda a: a.sha256, reverse=True)
print(">>> SHUFFLE...")
artifacts_mix = []
for i in range(9999):
artifacts_mix.append(artifacts_asc[i])
artifacts_mix.append(artifacts_dsc[i])
def bulk_doit(art_list):
print(">>> ENTER...")
Artifact.objects.bulk_get_or_create(art_list)
print(">>> EXIT...")
print(">>> STARTING LOOP...")
for i in range(5):
_thread.start_new_thread(bulk_doit, (artifacts_asc,))
_thread.start_new_thread(bulk_doit, (artifacts_dsc,))
_thread.start_new_thread(bulk_doit, (artifacts_mix,))
```
**Expected behavior**
All the threads complete without error.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
| 2022-03-25T11:00:08 |
||
pulp/pulpcore | 2,428 | pulp__pulpcore-2428 | [
"2420"
] | db8ea69ffeaf9ed7eaeede400a14a1ac324dc85a | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -242,12 +242,13 @@ async def run(self):
if d_artifact.artifact._state.adding and not d_artifact.deferred_download:
d_artifact.artifact.file = str(d_artifact.artifact.file)
da_to_save.append(d_artifact)
+ da_to_save_ordered = sorted(da_to_save, key=lambda x: x.artifact.sha256)
if da_to_save:
for d_artifact, artifact in zip(
- da_to_save,
+ da_to_save_ordered,
await sync_to_async(Artifact.objects.bulk_get_or_create)(
- d_artifact.artifact for d_artifact in da_to_save
+ d_artifact.artifact for d_artifact in da_to_save_ordered
),
):
d_artifact.artifact = artifact
| bulk_create() deadlock
**Version**
3.14.12 (reproduceable in main)
**Describe the bug**
ArtifactSaver can deadlock on bulk_get_or_create() call in the presence of high concurrency and overlapping content.
Deadlock will be some variant of the following:
```
Exception ignored in thread started by: <function bulk_doit at 0x7f878f388dc0>
Traceback (most recent call last):
File "<ipython-input-1-297483773c63>", line 29, in bulk_doit
File "/home/vagrant/devel/pulpcore/pulpcore/app/models/content.py", line 87, in bulk_get_or_create
return super().bulk_create(objs, batch_size=batch_size)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: deadlock detected
DETAIL: Process 20326 waits for ShareLock on transaction 4661315; blocked by process 20320.
Process 20320 waits for ShareLock on transaction 4661333; blocked by process 20326.
HINT: See server log for query details.
CONTEXT: while inserting index tuple (1221,10) in relation "core_artifact_pkey"
```
See https://bugzilla.redhat.com/show_bug.cgi?id=2062526 for more details.
**To Reproduce**
Steps to reproduce the behavior:
Reproducing "in the wild" is difficuilt. The follwoing steps will reliably hit the deadlock:
1) Start with a clean pulp database (at a minimum, none of the Artifacts created at Step 5, below, should exist in the db)
2) make sure you have at least 10 pulpcore-workers up. This command will do the trick:
```
sudo systemctl start pulpcore-worker@1 pulpcore-worker@2 \
pulpcore-worker@3 pulpcore-worker@4 pulpcore-worker@5 pulpcore-worker@6 \
pulpcore-worker@7 pulpcore-worker@8 pulpcore-worker@9 pulpcore-worker@10
```
3) Create "a lot" of Artifacts - 10K is a good choice.
4) Arrange to sort those artifacts by sha256 (the collision point) ascending, descending, and mixed.
5) Arrange to bulk_get_or_create() those three sets, in independent threads, at least 4-5 threads per sort.
You can accomplish this with the following script in pulpcore-manager shell:
```
from pulpcore.app.models.content import Artifact
from hashlib import sha256
import _thread
artifacts = []
print(">>>BUILDING ARTIFACTS...")
for i in range(9999):
filename = f'/tmp/{i:06d}.txt'
with open(filename, "w") as f:
f.write(filename)
with open(filename, "rb") as f:
sum256 = sha256(f.read()).hexdigest()
attrs = {"file": filename, "sha256": sum256, "size": i}
artifacts.append(Artifact (**attrs))
print(">>> SORT ASC...")
artifacts_asc = sorted(artifacts, key=lambda a: a.sha256)
print(">>> SORT DSC...")
artifacts_dsc = sorted(artifacts, key=lambda a: a.sha256, reverse=True)
print(">>> SHUFFLE...")
artifacts_mix = []
for i in range(9999):
artifacts_mix.append(artifacts_asc[i])
artifacts_mix.append(artifacts_dsc[i])
def bulk_doit(art_list):
print(">>> ENTER...")
Artifact.objects.bulk_get_or_create(art_list)
print(">>> EXIT...")
print(">>> STARTING LOOP...")
for i in range(5):
_thread.start_new_thread(bulk_doit, (artifacts_asc,))
_thread.start_new_thread(bulk_doit, (artifacts_dsc,))
_thread.start_new_thread(bulk_doit, (artifacts_mix,))
```
**Expected behavior**
All the threads complete without error.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
| 2022-03-25T11:00:26 |
||
pulp/pulpcore | 2,434 | pulp__pulpcore-2434 | [
"2420"
] | 719e52ca0067aa6f07d63d17930d97019c296829 | diff --git a/pulpcore/plugin/stages/artifact_stages.py b/pulpcore/plugin/stages/artifact_stages.py
--- a/pulpcore/plugin/stages/artifact_stages.py
+++ b/pulpcore/plugin/stages/artifact_stages.py
@@ -233,12 +233,13 @@ async def run(self):
if d_artifact.artifact._state.adding and not d_artifact.deferred_download:
d_artifact.artifact.file = str(d_artifact.artifact.file)
da_to_save.append(d_artifact)
+ da_to_save_ordered = sorted(da_to_save, key=lambda x: x.artifact.sha256)
if da_to_save:
for d_artifact, artifact in zip(
- da_to_save,
+ da_to_save_ordered,
Artifact.objects.bulk_get_or_create(
- d_artifact.artifact for d_artifact in da_to_save
+ d_artifact.artifact for d_artifact in da_to_save_ordered
),
):
d_artifact.artifact = artifact
| bulk_create() deadlock
**Version**
3.14.12 (reproduceable in main)
**Describe the bug**
ArtifactSaver can deadlock on bulk_get_or_create() call in the presence of high concurrency and overlapping content.
Deadlock will be some variant of the following:
```
Exception ignored in thread started by: <function bulk_doit at 0x7f878f388dc0>
Traceback (most recent call last):
File "<ipython-input-1-297483773c63>", line 29, in bulk_doit
File "/home/vagrant/devel/pulpcore/pulpcore/app/models/content.py", line 87, in bulk_get_or_create
return super().bulk_create(objs, batch_size=batch_size)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 502, in bulk_create
returned_columns = self._batched_insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1287, in _batched_insert
inserted_rows.extend(self._insert(
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/pulp/lib64/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
django.db.utils.OperationalError: deadlock detected
DETAIL: Process 20326 waits for ShareLock on transaction 4661315; blocked by process 20320.
Process 20320 waits for ShareLock on transaction 4661333; blocked by process 20326.
HINT: See server log for query details.
CONTEXT: while inserting index tuple (1221,10) in relation "core_artifact_pkey"
```
See https://bugzilla.redhat.com/show_bug.cgi?id=2062526 for more details.
**To Reproduce**
Steps to reproduce the behavior:
Reproducing "in the wild" is difficuilt. The follwoing steps will reliably hit the deadlock:
1) Start with a clean pulp database (at a minimum, none of the Artifacts created at Step 5, below, should exist in the db)
2) make sure you have at least 10 pulpcore-workers up. This command will do the trick:
```
sudo systemctl start pulpcore-worker@1 pulpcore-worker@2 \
pulpcore-worker@3 pulpcore-worker@4 pulpcore-worker@5 pulpcore-worker@6 \
pulpcore-worker@7 pulpcore-worker@8 pulpcore-worker@9 pulpcore-worker@10
```
3) Create "a lot" of Artifacts - 10K is a good choice.
4) Arrange to sort those artifacts by sha256 (the collision point) ascending, descending, and mixed.
5) Arrange to bulk_get_or_create() those three sets, in independent threads, at least 4-5 threads per sort.
You can accomplish this with the following script in pulpcore-manager shell:
```
from pulpcore.app.models.content import Artifact
from hashlib import sha256
import _thread
artifacts = []
print(">>>BUILDING ARTIFACTS...")
for i in range(9999):
filename = f'/tmp/{i:06d}.txt'
with open(filename, "w") as f:
f.write(filename)
with open(filename, "rb") as f:
sum256 = sha256(f.read()).hexdigest()
attrs = {"file": filename, "sha256": sum256, "size": i}
artifacts.append(Artifact (**attrs))
print(">>> SORT ASC...")
artifacts_asc = sorted(artifacts, key=lambda a: a.sha256)
print(">>> SORT DSC...")
artifacts_dsc = sorted(artifacts, key=lambda a: a.sha256, reverse=True)
print(">>> SHUFFLE...")
artifacts_mix = []
for i in range(9999):
artifacts_mix.append(artifacts_asc[i])
artifacts_mix.append(artifacts_dsc[i])
def bulk_doit(art_list):
print(">>> ENTER...")
Artifact.objects.bulk_get_or_create(art_list)
print(">>> EXIT...")
print(">>> STARTING LOOP...")
for i in range(5):
_thread.start_new_thread(bulk_doit, (artifacts_asc,))
_thread.start_new_thread(bulk_doit, (artifacts_dsc,))
_thread.start_new_thread(bulk_doit, (artifacts_mix,))
```
**Expected behavior**
All the threads complete without error.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
| 2022-03-25T14:00:54 |
||
pulp/pulpcore | 2,453 | pulp__pulpcore-2453 | [
"1968"
] | 406b33c0b367ff951f8c20f52c3c7e26b23cc7a7 | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -297,9 +297,62 @@
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://dynaconf.readthedocs.io/en/latest/guides/django.html
-import dynaconf # noqa
+from dynaconf import DjangoDynaconf, Validator # noqa
+
+# Validators
+content_origin_validator = Validator(
+ "CONTENT_ORIGIN",
+ must_exist=True,
+ messages={
+ "must_exist_true": _(
+ "CONTENT_ORIGIN is a required setting but it was not configured. This may be caused "
+ "by invalid read permissions of the settings file. Note that CONTENT_ORIGIN is set by "
+ "the installer automatically."
+ )
+ },
+)
+
+cache_enabled_validator = Validator("CACHE_ENABLED", eq=True)
+redis_url_validator = Validator("REDIS_URL", must_exist=True, when=cache_enabled_validator)
+redis_host_validator = Validator("REDIS_HOST", must_exist=True, when=cache_enabled_validator)
+redis_port_validator = Validator("REDIS_PORT", must_exist=True, when=cache_enabled_validator)
+cache_validator = redis_url_validator | (redis_host_validator & redis_port_validator)
+cache_validator.messages["combined"] = _(
+ "CACHE_ENABLED is enabled but it requires to have REDIS configured. Please check "
+ "https://docs.pulpproject.org/pulpcore/configuration/settings.html#redis-settings "
+ "for more information."
+)
+
+sha256_validator = Validator(
+ "ALLOWED_CONTENT_CHECKSUMS",
+ cont="sha256",
+ messages={
+ "operations": "ALLOWED_CONTENT_CHECKSUMS MUST contain 'sha256' - Pulp's "
+ "content addressable storage relies on sha256 to identify entities."
+ },
+)
+
+unknown_algs_validator = Validator(
+ "ALLOWED_CONTENT_CHECKSUMS",
+ condition=lambda x: len(set(x).difference(constants.ALL_KNOWN_CONTENT_CHECKSUMS)) == 0,
+ messages={
+ "condition": _(
+ "ALLOWED_CONTENT_CHECKSUMS may only contain algorithms known to pulp - see "
+ "constants.ALL_KNOWN_CONTENT_CHECKSUMS for the allowed list."
+ )
+ },
+)
+
+api_root_validator = Validator(
+ "API_ROOT",
+ condition=lambda x: x.startswith("/") and x.endswith("/"),
+ messages={
+ "condition": _("The API_ROOT must start and end with a '/', currently it is '{value}'")
+ },
+)
+
-settings = dynaconf.DjangoDynaconf(
+settings = DjangoDynaconf(
__name__,
GLOBAL_ENV_FOR_DYNACONF="PULP",
ENV_SWITCHER_FOR_DYNACONF="PULP_ENV",
@@ -308,34 +361,18 @@
],
ENVVAR_FOR_DYNACONF="PULP_SETTINGS",
load_dotenv=False,
+ validators=[
+ content_origin_validator,
+ cache_validator,
+ sha256_validator,
+ unknown_algs_validator,
+ api_root_validator,
+ ],
)
# HERE ENDS DYNACONF EXTENSION LOAD (No more code below this line)
_logger = getLogger(__name__)
-# Post-dyna-conf check if user provided a redis connection when enabling cache
-try:
- if CACHE_ENABLED:
- REDIS_URL or (REDIS_HOST and REDIS_PORT)
-except NameError:
- raise ImproperlyConfigured(
- _(
- "CACHE_ENABLED is enabled but it requires to have REDIS configured. Please check "
- "https://docs.pulpproject.org/pulpcore/configuration/settings.html#redis-settings "
- "for more information."
- )
- )
-
-try:
- CONTENT_ORIGIN
-except NameError:
- raise ImproperlyConfigured(
- _(
- "CONTENT_ORIGIN is a required setting but it was not configured. This may be caused "
- "by invalid read permissions of the settings file. Note that CONTENT_ORIGIN is set by "
- "the installer automatically."
- )
- )
if not (
Path(sys.argv[0]).name == "sphinx-build"
@@ -351,25 +388,6 @@
)
)
-# Check legality of ALLOWED_CONTENT_CHECKSUMS post-dynaconf-load, in case it has been overridden
-# in a site-specific location (eg, in /etc/pulp/settings.py)
-if "sha256" not in ALLOWED_CONTENT_CHECKSUMS:
- raise ImproperlyConfigured(
- _(
- "ALLOWED_CONTENT_CHECKSUMS MUST contain 'sha256' - Pulp's content-storage-addressing "
- "relies on sha256 to identify entities."
- )
- )
-
-unknown_algs = set(ALLOWED_CONTENT_CHECKSUMS).difference(constants.ALL_KNOWN_CONTENT_CHECKSUMS)
-if unknown_algs:
- raise ImproperlyConfigured(
- _(
- "ALLOWED_CONTENT_CHECKSUMS may only contain algorithms known to pulp - see "
- "constants.ALL_KNOWN_CONTENT_CHECKSUMS for the allowed list. Unknown algorithms "
- "provided: {}".format(unknown_algs)
- )
- )
FORBIDDEN_CHECKSUMS = set(constants.ALL_KNOWN_CONTENT_CHECKSUMS).difference(
ALLOWED_CONTENT_CHECKSUMS
@@ -433,15 +451,6 @@
finally:
connection.close()
-
-if not API_ROOT.startswith("/"):
- i8ln_msg = _("The API_ROOT must start with a '/', currently it is '{API_ROOT}'")
- raise ImproperlyConfigured(i8ln_msg.format(API_ROOT=API_ROOT))
-
-if not API_ROOT.endswith("/"):
- i8ln_msg = _("The API_ROOT must end with a '/', currently it is '{API_ROOT}'")
- raise ImproperlyConfigured(i8ln_msg.format(API_ROOT=API_ROOT))
-
settings.set("V3_API_ROOT", settings.API_ROOT + "api/v3/") # Not user configurable
settings.set(
"V3_API_ROOT_NO_FRONT_SLASH", settings.V3_API_ROOT.lstrip("/")
| diff --git a/pulpcore/tests/unit/test_settings.py b/pulpcore/tests/unit/test_settings.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/unit/test_settings.py
@@ -0,0 +1,59 @@
+from django.test import TestCase
+from django.conf import settings
+from dynaconf.validator import ValidationError
+
+
+class SettingsTestCase(TestCase):
+ def test_content_origin(self):
+ """Test validation error is raised when CONTENT_ORIGIN is missing."""
+ # See https://github.com/rochacbruno/dynaconf/issues/731
+ # keep needs to be True in order to copy all the current settings already initialized
+ msettings = settings.from_env("development", keep=True, validators=settings.validators)
+ # force needs to be True in order to remove CONTENT_ORIGIN since keep makes it a default
+ msettings.unset("CONTENT_ORIGIN", force=True)
+ with self.assertRaises(ValidationError):
+ msettings.validators.validate()
+
+ def test_cache_enabled(self):
+ """Test that when CACHE_ENABLED is set REDIS_URL or REDIS_HOST & REDIS_PORT."""
+ msettings = settings.from_env("development", keep=True, validators=settings.validators)
+ msettings.set("CACHE_ENABLED", True)
+ msettings.unset("REDIS_URL", force=True)
+ msettings.unset("REDIS_HOST", force=True)
+ msettings.unset("REDIS_PORT", force=True)
+ with self.assertRaises(ValidationError):
+ msettings.validators.validate()
+
+ msettings.set("REDIS_HOST", "localhost")
+ with self.assertRaises(ValidationError):
+ msettings.validators.validate()
+
+ msettings.unset("REDIS_HOST", force=True)
+ msettings.set("REDIS_PORT", 8000)
+ with self.assertRaises(ValidationError):
+ msettings.validators.validate()
+
+ def test_allowed_content_checksums(self):
+ """Test that removing 'sha256' from ALLOWED_CONTENT_CHECKSUMS raises ValidationError."""
+ msettings = settings.from_env("development", keep=True, validators=settings.validators)
+ msettings.set("ALLOWED_CONTENT_CHECKSUMS", ["sha224", "sha512"])
+ with self.assertRaises(ValidationError):
+ msettings.validators.validate()
+
+ def test_unknown_content_checksums(self):
+ """Test that providing invalid checksum for ALLOWED_CONTENT_CHECKSUMS fails."""
+ msettings = settings.from_env("development", keep=True, validators=settings.validators)
+ msettings.set("ALLOWED_CONTENT_CHECKSUMS", ["aaa"])
+ with self.assertRaises(ValidationError):
+ msettings.validators.validate()
+
+ def test_api_root(self):
+ """Test that API_ROOT validation checks for beginning and ending '/'."""
+ msettings = settings.from_env("development", keep=True, validators=settings.validators)
+ msettings.set("API_ROOT", "/hi/there")
+ with self.assertRaises(ValidationError):
+ msettings.validators.validate()
+
+ msettings.set("API_ROOT", "hi/there/")
+ with self.assertRaises(ValidationError):
+ msettings.validators.validate()
| AttributeError: 'Settings' object has no attribute 'CONTENT_ORIGIN'
Author: daviddavis (daviddavis)
Redmine Issue: 8235, https://pulp.plan.io/issues/8235
---
Reproducer:
1. Have pulpcore and pulp_ansible installed
2. Unset `CONTENT_ORIGIN` in your settings file.
Observe that you get an ugly AttributeError instead of [the user friendly error that pulpcore raises](https://github.com/pulp/pulpcore/blob/f8a8c64bb28cbe3908720ea56f417312a4389862/pulpcore/app/settings.py#L279-L281).
This is caused because pulpcore loads the plugin settings before checking if CONTENT_ORIGIN is defined.
| 2022-03-25T21:26:24 |
|
pulp/pulpcore | 2,475 | pulp__pulpcore-2475 | [
"1912"
] | d936ce5bb052973133336157fbeb2a1c24f08024 | diff --git a/pulpcore/app/viewsets/publication.py b/pulpcore/app/viewsets/publication.py
--- a/pulpcore/app/viewsets/publication.py
+++ b/pulpcore/app/viewsets/publication.py
@@ -11,6 +11,7 @@
ContentRedirectContentGuard,
Distribution,
Publication,
+ Repository,
Content,
)
from pulpcore.app.serializers import (
@@ -55,7 +56,23 @@ def filter(self, qs, value):
return qs.with_content([content.pk])
+class RepositoryThroughVersionFilter(Filter):
+ def filter(self, qs, value):
+ if value is None:
+ # user didn't supply a value
+ return qs
+
+ if not value:
+ raise serializers.ValidationError(
+ detail=_("No value supplied for {name} filter").format(name=self.field_name)
+ )
+
+ repository = NamedModelViewSet.get_resource(value, Repository)
+ return qs.filter(repository_version__repository=repository)
+
+
class PublicationFilter(BaseFilterSet):
+ repository = RepositoryThroughVersionFilter(help_text=_("Repository referenced by HREF"))
repository_version = RepositoryVersionFilter()
pulp_created = IsoDateTimeFilter()
content = PublicationContentFilter()
| As a user, I would like to be able to filter all publications for a given repository
Author: wibbit (wibbit)
Redmine Issue: 7036, https://pulp.plan.io/issues/7036
---
As a user, I would like to be able to filter all publications for a given repository.
| I believe this is not yet implemented because when I look in [the API docs](https://docs.pulpproject.org/pulpcore/restapi.html#tag/Publications:-File) I see filtering by `repository_version`, but not `repository`. | 2022-04-04T09:37:47 |
|
pulp/pulpcore | 2,478 | pulp__pulpcore-2478 | [
"2430"
] | 406b33c0b367ff951f8c20f52c3c7e26b23cc7a7 | diff --git a/pulpcore/plugin/stages/content_stages.py b/pulpcore/plugin/stages/content_stages.py
--- a/pulpcore/plugin/stages/content_stages.py
+++ b/pulpcore/plugin/stages/content_stages.py
@@ -155,13 +155,39 @@ def process_batch():
content_artifact.artifact = to_update_ca_artifact[key]
to_update_ca_bulk.append(content_artifact)
- # Sort the lists we're about to do bulk updates/creates on.
- # We know to_update_ca_bulk entries already are in the DB, so we can enforce
- # order just using pulp_id.
- to_update_ca_bulk.sort(key=lambda x: x.pulp_id)
- content_artifact_bulk.sort(key=lambda x: ContentArtifact.sort_key(x))
+ # to_update_ca_bulk are the CAs that we know are already persisted.
+ # We need to update their artifact_ids, and wish to do it in bulk to
+ # avoid hundreds of round-trips to the database.
+ #
+ # To avoid deadlocks in high-concurrency environments with overlapping
+ # content, we need to update the rows in some defined order. Unfortunately,
+ # postgres doesn't support order-on-update - but it *does* support ordering
+ # on select-for-update. So, we select-for-update, in pulp_id order, the
+ # rows we're about to update as one db-call, and then do the update in a
+ # second.
+ ids = [k.pulp_id for k in to_update_ca_bulk]
+ with transaction.atomic():
+ # "len()" forces the QA to be evaluated. Using exist() or count() won't
+ # work for us - Django is smart enough to either not-order, or even
+ # not-emit, a select-for-update in these cases.
+ #
+ # To maximize performance, we make sure to only ask for pulp_ids, and
+ # avoid instantiating a python-object for the affected CAs by using
+ # values_list()
+ len(
+ ContentArtifact.objects.filter(pulp_id__in=ids)
+ .only("pulp_id")
+ .order_by("pulp_id")
+ .select_for_update()
+ .values_list()
+ )
+ ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
- ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
+ # To avoid a similar deadlock issue when calling get_or_create, we sort the
+ # "new" CAs to make sure inserts happen in a defined order. Since we can't
+ # trust the pulp_id (by the time we go to create a CA, it may already exist,
+ # and be replaced by the 'real' one), we sort by their "natural key".
+ content_artifact_bulk.sort(key=lambda x: ContentArtifact.sort_key(x))
ContentArtifact.objects.bulk_get_or_create(content_artifact_bulk)
self._post_save(batch)
| bulk_update() in content-stages can cause (very rare) deadlock
**Version**
3.14
**Describe the bug**
In high-concurrency environments, with overlapping content, calling bulk_update() can cause a deadlock. Specifically, this call:
https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/content_stages.py#L158-L164
Ordering the list-to-be-updated does not, alas, protect us - because Postgres doesn't guarantee order when doing an update like this.
**To Reproduce**
We have only seen this "in the wild" once, syncing 8-10 repos with similar content at the same time with 10 workers available.
**Expected behavior**
Don't deadlock.
**Additional context**
This is the traceback from the initial description for
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
We fixed the deadlock noted in https://bugzilla.redhat.com/show_bug.cgi?id=2062526#c2 under #2420
| 2022-04-04T19:05:17 |
||
pulp/pulpcore | 2,485 | pulp__pulpcore-2485 | [
"2215"
] | 334d9f4c953f1ee02c8df22df0a487a8a31f38d7 | diff --git a/pulpcore/app/tasks/purge.py b/pulpcore/app/tasks/purge.py
--- a/pulpcore/app/tasks/purge.py
+++ b/pulpcore/app/tasks/purge.py
@@ -5,6 +5,48 @@
Task,
)
from pulpcore.app.role_util import get_objects_for_user
+from pulpcore.constants import TASK_STATES
+
+# Delete 1K at a time - better to use less memory, and take a little longer, with a utility
+# function like this.
+DELETE_LIMIT = 1000
+# Key that delete() returns for Tasks
+TASK_KEY = "core.Task"
+
+
+def _details_reporting(current_reports, current_details, totals_pb):
+ """
+ Create and update progress-reports for each detail-key returned from a delete() call.
+
+ We don't know how many entities will be deleted via cascade-delete until we're all done.
+
+ The function has one special case: we know how many Tasks we're expecting to delete right
+ from the beginning. Therefore, we "assume" that the key `core.Task` has been pre-seeded
+ with a ProgressReport whose total is correct, in advance, and therefore don't update
+ total for that key.
+
+ Args:
+ current_reports (dict): key:ProgressReport to record into
+ Returns:
+ updated current_reports
+ """
+ entity_count = 0
+ for key, curr_detail in current_details.items():
+ entity_count += current_details[key]
+ if key in current_reports:
+ current_reports[key].increase_by(curr_detail)
+ else:
+ pb = ProgressReport(
+ message=_("Purged task-objects of type {}".format(key)),
+ code="purge.tasks.key.{}".format(key),
+ total=None,
+ done=curr_detail,
+ )
+ pb.save()
+ current_reports[key] = pb
+ # Update/save totals once
+ totals_pb.increase_by(entity_count)
+ return current_reports
def purge(finished_before, states):
@@ -26,25 +68,48 @@ def purge(finished_before, states):
"""
current_user = get_current_authenticated_user()
- qs = Task.objects.filter(finished_at__lt=finished_before, state__in=states)
- units_deleted, details = get_objects_for_user(current_user, "core.delete_task", qs=qs).delete()
-
+ # Tasks, prior to the specified date, in the specified state, owned by the current-user
+ tasks_qs = Task.objects.filter(finished_at__lt=finished_before, state__in=states)
+ candidate_qs = get_objects_for_user(current_user, "core.delete_task", qs=tasks_qs)
+ delete_qs = get_objects_for_user(current_user, "core.delete_task", qs=tasks_qs[:DELETE_LIMIT])
# Progress bar reporting total-units
- progress_bar = ProgressReport(
- message=_("Purged task-objects total"),
- total=units_deleted,
+ totals_pb = ProgressReport(
+ message=_("Purged task-related-objects total"),
+ total=None,
code="purge.tasks.total",
- done=units_deleted,
- state="completed",
+ done=0,
+ )
+ totals_pb.save()
+ # Dictionary to hold progress-reports by delete-details-key
+ details_reports = {}
+
+ # Figure out how many Tasks owned by the current user we're about to delete
+ expected_total = candidate_qs.count()
+ # Build and save a progress-report for that detail
+ pb = ProgressReport(
+ message=_("Purged task-objects of type {}".format(TASK_KEY)),
+ total=expected_total,
+ code="purge.tasks.key.{}".format(TASK_KEY),
+ done=0,
)
- progress_bar.save()
- # This loop reports back the specific entities deleted and the number removed
- for key in details:
- progress_bar = ProgressReport(
- message=_("Purged task-objects of type {}".format(key)),
- total=details[key],
- code="purge.tasks.key.{}".format(key),
- done=details[key],
- state="completed",
- )
- progress_bar.save()
+ pb.save()
+ details_reports[TASK_KEY] = pb
+
+ # Our delete-query is going to deal with "the first DELETE_LIMIT tasks that match our
+ # criteria", looping until we've deleted everything that fits our parameters
+ units_deleted, details = delete_qs.delete()
+ # Until our query returns "No tasks deleted", add results into totals and Do It Again
+ while units_deleted > 0:
+ _details_reporting(details_reports, details, totals_pb)
+ units_deleted, details = delete_qs.delete()
+
+ # Complete the progress-reports for the specific entities deleted
+ for key, pb in details_reports.items():
+ pb.total = pb.done
+ pb.state = TASK_STATES.COMPLETED
+ pb.save()
+
+ # Complete the totals-ProgressReport
+ totals_pb.total = totals_pb.done
+ totals_pb.state = TASK_STATES.COMPLETED
+ totals_pb.save()
| Task purging endpoint loads every task in memory
**Version**
`pulpcore-3.17.3`
`pulp_rpm-3.17.3`
`pulp_ansible-0.12.0`
`pulp_container-2.10.0`
`pulp_deb-2.17.0`
`pulp_file-1.10.1`
`pulp_python-3.6.0`
**Describe the bug**
We have a large number of finished pulp tasks (around 1.5kk). So we're trying to purge it with [`pulp/api/v3/tasks/purge`](https://docs.pulpproject.org/pulpcore/restapi.html#operation/tasks_purge) and quickly realized, that this task is trying to load whole finished queue (all 1.5kk finished tasks) into memory, which cause it killed after consuming 15GB of memory.
**To Reproduce**
Steps to reproduce the behavior:
1. Create large number of pulp-tasks (more than 1kk will be enough)
2. Run [`pulp/api/v3/tasks/purge`](https://docs.pulpproject.org/pulpcore/restapi.html#operation/tasks_purge) with `finished_before` set to future date or something.
3. Your server will run out-of-memory and task won't be completed.
**Expected behavior**
Task purging api-endpoint does not load the entire finished queue into memory
| Related: https://github.com/pulp/pulpcore/issues/5048 | 2022-04-06T08:44:54 |
|
pulp/pulpcore | 2,486 | pulp__pulpcore-2486 | [
"2215"
] | e4d0b51180550ba27ba2cde56656dc350824fb87 | diff --git a/pulpcore/app/tasks/purge.py b/pulpcore/app/tasks/purge.py
--- a/pulpcore/app/tasks/purge.py
+++ b/pulpcore/app/tasks/purge.py
@@ -5,6 +5,48 @@
Task,
)
from pulpcore.app.role_util import get_objects_for_user
+from pulpcore.constants import TASK_STATES
+
+# Delete 1K at a time - better to use less memory, and take a little longer, with a utility
+# function like this.
+DELETE_LIMIT = 1000
+# Key that delete() returns for Tasks
+TASK_KEY = "core.Task"
+
+
+def _details_reporting(current_reports, current_details, totals_pb):
+ """
+ Create and update progress-reports for each detail-key returned from a delete() call.
+
+ We don't know how many entities will be deleted via cascade-delete until we're all done.
+
+ The function has one special case: we know how many Tasks we're expecting to delete right
+ from the beginning. Therefore, we "assume" that the key `core.Task` has been pre-seeded
+ with a ProgressReport whose total is correct, in advance, and therefore don't update
+ total for that key.
+
+ Args:
+ current_reports (dict): key:ProgressReport to record into
+ Returns:
+ updated current_reports
+ """
+ entity_count = 0
+ for key, curr_detail in current_details.items():
+ entity_count += current_details[key]
+ if key in current_reports:
+ current_reports[key].increase_by(curr_detail)
+ else:
+ pb = ProgressReport(
+ message=_("Purged task-objects of type {}".format(key)),
+ code="purge.tasks.key.{}".format(key),
+ total=None,
+ done=curr_detail,
+ )
+ pb.save()
+ current_reports[key] = pb
+ # Update/save totals once
+ totals_pb.increase_by(entity_count)
+ return current_reports
def purge(finished_before, states):
@@ -26,25 +68,48 @@ def purge(finished_before, states):
"""
current_user = get_current_authenticated_user()
- qs = Task.objects.filter(finished_at__lt=finished_before, state__in=states)
- units_deleted, details = get_objects_for_user(current_user, "core.delete_task", qs=qs).delete()
-
+ # Tasks, prior to the specified date, in the specified state, owned by the current-user
+ tasks_qs = Task.objects.filter(finished_at__lt=finished_before, state__in=states)
+ candidate_qs = get_objects_for_user(current_user, "core.delete_task", qs=tasks_qs)
+ delete_qs = get_objects_for_user(current_user, "core.delete_task", qs=tasks_qs[:DELETE_LIMIT])
# Progress bar reporting total-units
- progress_bar = ProgressReport(
- message=_("Purged task-objects total"),
- total=units_deleted,
+ totals_pb = ProgressReport(
+ message=_("Purged task-related-objects total"),
+ total=None,
code="purge.tasks.total",
- done=units_deleted,
- state="completed",
+ done=0,
+ )
+ totals_pb.save()
+ # Dictionary to hold progress-reports by delete-details-key
+ details_reports = {}
+
+ # Figure out how many Tasks owned by the current user we're about to delete
+ expected_total = candidate_qs.count()
+ # Build and save a progress-report for that detail
+ pb = ProgressReport(
+ message=_("Purged task-objects of type {}".format(TASK_KEY)),
+ total=expected_total,
+ code="purge.tasks.key.{}".format(TASK_KEY),
+ done=0,
)
- progress_bar.save()
- # This loop reports back the specific entities deleted and the number removed
- for key in details:
- progress_bar = ProgressReport(
- message=_("Purged task-objects of type {}".format(key)),
- total=details[key],
- code="purge.tasks.key.{}".format(key),
- done=details[key],
- state="completed",
- )
- progress_bar.save()
+ pb.save()
+ details_reports[TASK_KEY] = pb
+
+ # Our delete-query is going to deal with "the first DELETE_LIMIT tasks that match our
+ # criteria", looping until we've deleted everything that fits our parameters
+ units_deleted, details = delete_qs.delete()
+ # Until our query returns "No tasks deleted", add results into totals and Do It Again
+ while units_deleted > 0:
+ _details_reporting(details_reports, details, totals_pb)
+ units_deleted, details = delete_qs.delete()
+
+ # Complete the progress-reports for the specific entities deleted
+ for key, pb in details_reports.items():
+ pb.total = pb.done
+ pb.state = TASK_STATES.COMPLETED
+ pb.save()
+
+ # Complete the totals-ProgressReport
+ totals_pb.total = totals_pb.done
+ totals_pb.state = TASK_STATES.COMPLETED
+ totals_pb.save()
| Task purging endpoint loads every task in memory
**Version**
`pulpcore-3.17.3`
`pulp_rpm-3.17.3`
`pulp_ansible-0.12.0`
`pulp_container-2.10.0`
`pulp_deb-2.17.0`
`pulp_file-1.10.1`
`pulp_python-3.6.0`
**Describe the bug**
We have a large number of finished pulp tasks (around 1.5kk). So we're trying to purge it with [`pulp/api/v3/tasks/purge`](https://docs.pulpproject.org/pulpcore/restapi.html#operation/tasks_purge) and quickly realized, that this task is trying to load whole finished queue (all 1.5kk finished tasks) into memory, which cause it killed after consuming 15GB of memory.
**To Reproduce**
Steps to reproduce the behavior:
1. Create large number of pulp-tasks (more than 1kk will be enough)
2. Run [`pulp/api/v3/tasks/purge`](https://docs.pulpproject.org/pulpcore/restapi.html#operation/tasks_purge) with `finished_before` set to future date or something.
3. Your server will run out-of-memory and task won't be completed.
**Expected behavior**
Task purging api-endpoint does not load the entire finished queue into memory
| Related: https://github.com/pulp/pulpcore/issues/5048 | 2022-04-06T08:45:32 |
|
pulp/pulpcore | 2,489 | pulp__pulpcore-2489 | [
"2430"
] | 4730e3603a5a585b241fdc471d411d3a38574524 | diff --git a/pulpcore/plugin/stages/content_stages.py b/pulpcore/plugin/stages/content_stages.py
--- a/pulpcore/plugin/stages/content_stages.py
+++ b/pulpcore/plugin/stages/content_stages.py
@@ -155,13 +155,39 @@ def process_batch():
content_artifact.artifact = to_update_ca_artifact[key]
to_update_ca_bulk.append(content_artifact)
- # Sort the lists we're about to do bulk updates/creates on.
- # We know to_update_ca_bulk entries already are in the DB, so we can enforce
- # order just using pulp_id.
- to_update_ca_bulk.sort(key=lambda x: x.pulp_id)
- content_artifact_bulk.sort(key=lambda x: ContentArtifact.sort_key(x))
+ # to_update_ca_bulk are the CAs that we know are already persisted.
+ # We need to update their artifact_ids, and wish to do it in bulk to
+ # avoid hundreds of round-trips to the database.
+ #
+ # To avoid deadlocks in high-concurrency environments with overlapping
+ # content, we need to update the rows in some defined order. Unfortunately,
+ # postgres doesn't support order-on-update - but it *does* support ordering
+ # on select-for-update. So, we select-for-update, in pulp_id order, the
+ # rows we're about to update as one db-call, and then do the update in a
+ # second.
+ ids = [k.pulp_id for k in to_update_ca_bulk]
+ with transaction.atomic():
+ # "len()" forces the QA to be evaluated. Using exist() or count() won't
+ # work for us - Django is smart enough to either not-order, or even
+ # not-emit, a select-for-update in these cases.
+ #
+ # To maximize performance, we make sure to only ask for pulp_ids, and
+ # avoid instantiating a python-object for the affected CAs by using
+ # values_list()
+ len(
+ ContentArtifact.objects.filter(pulp_id__in=ids)
+ .only("pulp_id")
+ .order_by("pulp_id")
+ .select_for_update()
+ .values_list()
+ )
+ ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
- ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
+ # To avoid a similar deadlock issue when calling get_or_create, we sort the
+ # "new" CAs to make sure inserts happen in a defined order. Since we can't
+ # trust the pulp_id (by the time we go to create a CA, it may already exist,
+ # and be replaced by the 'real' one), we sort by their "natural key".
+ content_artifact_bulk.sort(key=lambda x: ContentArtifact.sort_key(x))
ContentArtifact.objects.bulk_get_or_create(content_artifact_bulk)
self._post_save(batch)
| bulk_update() in content-stages can cause (very rare) deadlock
**Version**
3.14
**Describe the bug**
In high-concurrency environments, with overlapping content, calling bulk_update() can cause a deadlock. Specifically, this call:
https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/content_stages.py#L158-L164
Ordering the list-to-be-updated does not, alas, protect us - because Postgres doesn't guarantee order when doing an update like this.
**To Reproduce**
We have only seen this "in the wild" once, syncing 8-10 repos with similar content at the same time with 10 workers available.
**Expected behavior**
Don't deadlock.
**Additional context**
This is the traceback from the initial description for
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
We fixed the deadlock noted in https://bugzilla.redhat.com/show_bug.cgi?id=2062526#c2 under #2420
| 2022-04-06T16:56:06 |
||
pulp/pulpcore | 2,490 | pulp__pulpcore-2490 | [
"2430"
] | b0a8d1bd5443e1b47af9c085c05931cb9515c9c0 | diff --git a/pulpcore/plugin/stages/content_stages.py b/pulpcore/plugin/stages/content_stages.py
--- a/pulpcore/plugin/stages/content_stages.py
+++ b/pulpcore/plugin/stages/content_stages.py
@@ -155,13 +155,39 @@ def process_batch():
content_artifact.artifact = to_update_ca_artifact[key]
to_update_ca_bulk.append(content_artifact)
- # Sort the lists we're about to do bulk updates/creates on.
- # We know to_update_ca_bulk entries already are in the DB, so we can enforce
- # order just using pulp_id.
- to_update_ca_bulk.sort(key=lambda x: x.pulp_id)
- content_artifact_bulk.sort(key=lambda x: ContentArtifact.sort_key(x))
+ # to_update_ca_bulk are the CAs that we know are already persisted.
+ # We need to update their artifact_ids, and wish to do it in bulk to
+ # avoid hundreds of round-trips to the database.
+ #
+ # To avoid deadlocks in high-concurrency environments with overlapping
+ # content, we need to update the rows in some defined order. Unfortunately,
+ # postgres doesn't support order-on-update - but it *does* support ordering
+ # on select-for-update. So, we select-for-update, in pulp_id order, the
+ # rows we're about to update as one db-call, and then do the update in a
+ # second.
+ ids = [k.pulp_id for k in to_update_ca_bulk]
+ with transaction.atomic():
+ # "len()" forces the QA to be evaluated. Using exist() or count() won't
+ # work for us - Django is smart enough to either not-order, or even
+ # not-emit, a select-for-update in these cases.
+ #
+ # To maximize performance, we make sure to only ask for pulp_ids, and
+ # avoid instantiating a python-object for the affected CAs by using
+ # values_list()
+ len(
+ ContentArtifact.objects.filter(pulp_id__in=ids)
+ .only("pulp_id")
+ .order_by("pulp_id")
+ .select_for_update()
+ .values_list()
+ )
+ ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
- ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
+ # To avoid a similar deadlock issue when calling get_or_create, we sort the
+ # "new" CAs to make sure inserts happen in a defined order. Since we can't
+ # trust the pulp_id (by the time we go to create a CA, it may already exist,
+ # and be replaced by the 'real' one), we sort by their "natural key".
+ content_artifact_bulk.sort(key=lambda x: ContentArtifact.sort_key(x))
ContentArtifact.objects.bulk_get_or_create(content_artifact_bulk)
self._post_save(batch)
| bulk_update() in content-stages can cause (very rare) deadlock
**Version**
3.14
**Describe the bug**
In high-concurrency environments, with overlapping content, calling bulk_update() can cause a deadlock. Specifically, this call:
https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/content_stages.py#L158-L164
Ordering the list-to-be-updated does not, alas, protect us - because Postgres doesn't guarantee order when doing an update like this.
**To Reproduce**
We have only seen this "in the wild" once, syncing 8-10 repos with similar content at the same time with 10 workers available.
**Expected behavior**
Don't deadlock.
**Additional context**
This is the traceback from the initial description for
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
We fixed the deadlock noted in https://bugzilla.redhat.com/show_bug.cgi?id=2062526#c2 under #2420
| 2022-04-06T16:56:52 |
||
pulp/pulpcore | 2,491 | pulp__pulpcore-2491 | [
"2430"
] | 3e9046da857827f889ba6833de79b3d2285af2a4 | diff --git a/pulpcore/plugin/stages/content_stages.py b/pulpcore/plugin/stages/content_stages.py
--- a/pulpcore/plugin/stages/content_stages.py
+++ b/pulpcore/plugin/stages/content_stages.py
@@ -155,13 +155,39 @@ def process_batch():
content_artifact.artifact = to_update_ca_artifact[key]
to_update_ca_bulk.append(content_artifact)
- # Sort the lists we're about to do bulk updates/creates on.
- # We know to_update_ca_bulk entries already are in the DB, so we can enforce
- # order just using pulp_id.
- to_update_ca_bulk.sort(key=lambda x: x.pulp_id)
- content_artifact_bulk.sort(key=lambda x: ContentArtifact.sort_key(x))
+ # to_update_ca_bulk are the CAs that we know are already persisted.
+ # We need to update their artifact_ids, and wish to do it in bulk to
+ # avoid hundreds of round-trips to the database.
+ #
+ # To avoid deadlocks in high-concurrency environments with overlapping
+ # content, we need to update the rows in some defined order. Unfortunately,
+ # postgres doesn't support order-on-update - but it *does* support ordering
+ # on select-for-update. So, we select-for-update, in pulp_id order, the
+ # rows we're about to update as one db-call, and then do the update in a
+ # second.
+ ids = [k.pulp_id for k in to_update_ca_bulk]
+ with transaction.atomic():
+ # "len()" forces the QA to be evaluated. Using exist() or count() won't
+ # work for us - Django is smart enough to either not-order, or even
+ # not-emit, a select-for-update in these cases.
+ #
+ # To maximize performance, we make sure to only ask for pulp_ids, and
+ # avoid instantiating a python-object for the affected CAs by using
+ # values_list()
+ len(
+ ContentArtifact.objects.filter(pulp_id__in=ids)
+ .only("pulp_id")
+ .order_by("pulp_id")
+ .select_for_update()
+ .values_list()
+ )
+ ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
- ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
+ # To avoid a similar deadlock issue when calling get_or_create, we sort the
+ # "new" CAs to make sure inserts happen in a defined order. Since we can't
+ # trust the pulp_id (by the time we go to create a CA, it may already exist,
+ # and be replaced by the 'real' one), we sort by their "natural key".
+ content_artifact_bulk.sort(key=lambda x: ContentArtifact.sort_key(x))
ContentArtifact.objects.bulk_get_or_create(content_artifact_bulk)
self._post_save(batch)
| bulk_update() in content-stages can cause (very rare) deadlock
**Version**
3.14
**Describe the bug**
In high-concurrency environments, with overlapping content, calling bulk_update() can cause a deadlock. Specifically, this call:
https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/content_stages.py#L158-L164
Ordering the list-to-be-updated does not, alas, protect us - because Postgres doesn't guarantee order when doing an update like this.
**To Reproduce**
We have only seen this "in the wild" once, syncing 8-10 repos with similar content at the same time with 10 workers available.
**Expected behavior**
Don't deadlock.
**Additional context**
This is the traceback from the initial description for
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
We fixed the deadlock noted in https://bugzilla.redhat.com/show_bug.cgi?id=2062526#c2 under #2420
| 2022-04-06T16:57:12 |
||
pulp/pulpcore | 2,492 | pulp__pulpcore-2492 | [
"2430"
] | a863843d4ef7000e38ce74fe71d034d50bfea64a | diff --git a/pulpcore/plugin/stages/content_stages.py b/pulpcore/plugin/stages/content_stages.py
--- a/pulpcore/plugin/stages/content_stages.py
+++ b/pulpcore/plugin/stages/content_stages.py
@@ -152,12 +152,40 @@ async def run(self):
# Maybe remove dict elements after to reduce memory?
content_artifact.artifact = to_update_ca_artifact[key]
to_update_ca_bulk.append(content_artifact)
- # Sort the lists we're about to do bulk updates/creates on.
- # We know to_update_ca_bulk entries already are in the DB, so we can enforce
- # order just using pulp_id.
- to_update_ca_bulk.sort(key=lambda x: x.pulp_id)
+
+ # to_update_ca_bulk are the CAs that we know are already persisted.
+ # We need to update their artifact_ids, and wish to do it in bulk to
+ # avoid hundreds of round-trips to the database.
+ #
+ # To avoid deadlocks in high-concurrency environments with overlapping
+ # content, we need to update the rows in some defined order. Unfortunately,
+ # postgres doesn't support order-on-update - but it *does* support ordering
+ # on select-for-update. So, we select-for-update, in pulp_id order, the
+ # rows we're about to update as one db-call, and then do the update in a
+ # second.
+ ids = [k.pulp_id for k in to_update_ca_bulk]
+ with transaction.atomic():
+ # "len()" forces the QA to be evaluated. Using exist() or count() won't
+ # work for us - Django is smart enough to either not-order, or even
+ # not-emit, a select-for-update in these cases.
+ #
+ # To maximize performance, we make sure to only ask for pulp_ids, and
+ # avoid instantiating a python-object for the affected CAs by using
+ # values_list()
+ len(
+ ContentArtifact.objects.filter(pulp_id__in=ids)
+ .only("pulp_id")
+ .order_by("pulp_id")
+ .select_for_update()
+ .values_list()
+ )
+ ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
+
+ # To avoid a similar deadlock issue when calling get_or_create, we sort the
+ # "new" CAs to make sure inserts happen in a defined order. Since we can't
+ # trust the pulp_id (by the time we go to create a CA, it may already exist,
+ # and be replaced by the 'real' one), we sort by their "natural key".
content_artifact_bulk.sort(key=lambda x: ContentArtifact.sort_key(x))
- ContentArtifact.objects.bulk_update(to_update_ca_bulk, ["artifact"])
ContentArtifact.objects.bulk_get_or_create(content_artifact_bulk)
await self._post_save(batch)
for declarative_content in batch:
| bulk_update() in content-stages can cause (very rare) deadlock
**Version**
3.14
**Describe the bug**
In high-concurrency environments, with overlapping content, calling bulk_update() can cause a deadlock. Specifically, this call:
https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/content_stages.py#L158-L164
Ordering the list-to-be-updated does not, alas, protect us - because Postgres doesn't guarantee order when doing an update like this.
**To Reproduce**
We have only seen this "in the wild" once, syncing 8-10 repos with similar content at the same time with 10 workers available.
**Expected behavior**
Don't deadlock.
**Additional context**
This is the traceback from the initial description for
https://bugzilla.redhat.com/show_bug.cgi?id=2062526
We fixed the deadlock noted in https://bugzilla.redhat.com/show_bug.cgi?id=2062526#c2 under #2420
| 2022-04-06T20:47:18 |
||
pulp/pulpcore | 2,496 | pulp__pulpcore-2496 | [
"2495"
] | 0238741f707c4f2c7d702e2ac221c1f73df6cd60 | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -238,7 +238,7 @@
},
}
-DRF_ACCESS_POLICY = {"reusable_conditions": "pulpcore.app.global_access_conditions"}
+DRF_ACCESS_POLICY = {"reusable_conditions": ["pulpcore.app.global_access_conditions"]}
CONTENT_PATH_PREFIX = "/pulp/content/"
CONTENT_APP_TTL = 30
| Implement chainloading of plugins reusable app conditions
Currently, plugins need to subclass the access policies to be able to use additional conditions in access policies. This is incompatible with the desire to configure the access policy for Pulp globally. The issue can be circumvented by injecting plugin modules in this list:
https://rsinger86.github.io/drf-access-policy/reusable_conditions.html
| I meant ap as in AccessPolicy. XD
@rochacbruno can we get some advice on the dynaconf way to accomplish this? The setting is `DRF_ACCESS_POLICY` which is defined [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/app/settings.py#L241) with the value:
`DRF_ACCESS_POLICY = {"reusable_conditions": "pulpcore.app.global_access_conditions"}`
drf-access-policy now supports this type of value instead, so let's assume we'll change the settings.py to be:
`DRF_ACCESS_POLICY = {"reusable_conditions": ["pulpcore.app.global_access_conditions"]}`
Then we want apps, e.g. pulp_ansible to additively provide its settings. For example pulp_ansible has its settings [here](https://github.com/pulp/pulp_ansible/blob/main/pulp_ansible/app/settings.py).
Is this just as simple as using the merge option? I looked through here https://www.dynaconf.com/merging/ but I'm looking for the python version of a merge. Say we wanted to add "pulp_ansible.app.global_access_conditions" as the additional string, we would want the layered option to look like:
`DRF_ACCESS_POLICY = {"reusable_conditions": ["pulpcore.app.global_access_conditions", "pulp_ansible.app.global_access_conditions"]}`
@bmbouter all of that works on python files (same way on any filetype or envvar)
Option 1
```py
DRF_ACCESS_POLICY__reusable_conditions = "@merge pulp_ansible.app.global_access_conditions"
```
> If you want to avoid duplicates use `@merge_unique`
Option 2
```py
DRF_ACCESS_POLICY__reusable_conditions = ["dynaconf_merge", "pulp_ansible.app.global_access_conditions"]
```
> id you want to avoid duplication use `dynaconf_merge_unique`
Option 3
```py
DRF_ACCESS_POLICY = {
"dynaconf_merge": True,
"reusable_conditions": ["pulp_ansible.app.global_access_conditions"]
}
```
Right now dynaconf can only perform **append** to lists, there is yet no way to perform **insert** | 2022-04-08T10:15:04 |
|
pulp/pulpcore | 2,497 | pulp__pulpcore-2497 | [
"2269"
] | 4cdba449ad8cb7b3ab1a2e0f59f90be830554302 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -91,7 +91,12 @@ def _import_file(fpath, resource_class, retry=False):
def _check_versions(version_json):
- """Compare the export version_json to the installed components."""
+ """
+ Compare the export version_json to the installed components.
+
+ An upstream whose db-metadata doesn't match the downstream won't import successfully; check
+ for compatibility and raise a ValidationError if incompatible versions are found.
+ """
error_messages = []
for component in version_json:
try:
@@ -101,10 +106,13 @@ def _check_versions(version_json):
_("Export uses {} which is not installed.").format(component["component"])
)
else:
- if version != component["version"]:
+ # Check that versions are compatible. Currently, "compatible" is defined as "same X.Y".
+ # Versions are strings that generally look like "X.Y.Z" or "X.Y.Z.dev"; we check that
+ # first two places are the same.
+ if version.split(".")[:2] != component["version"].split(".")[:2]:
error_messages.append(
_(
- "Export version {export_ver} of {component} does not match "
+ "Export version {export_ver} of {component} incompatible with "
"installed version {ver}."
).format(
export_ver=component["version"],
@@ -113,8 +121,8 @@ def _check_versions(version_json):
)
)
- if error_messages:
- raise ValidationError((" ".join(error_messages)))
+ if error_messages:
+ raise ValidationError((" ".join(error_messages)))
def import_repository_version(importer_pk, destination_repo_pk, source_repo_name, tar_path):
| diff --git a/pulpcore/tests/unit/test_import_checks.py b/pulpcore/tests/unit/test_import_checks.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/unit/test_import_checks.py
@@ -0,0 +1,28 @@
+from unittest.mock import patch
+
+from django.test import TestCase
+from rest_framework.serializers import ValidationError
+
+from pulpcore.app.tasks.importer import _check_versions
+
+
+class TestObject:
+ version = "1.2.3" # Every component is vers 1.2.3
+
+
+class TestCheckVersions(TestCase):
+ @patch("pulpcore.app.tasks.importer.get_distribution", return_value=TestObject())
+ def test_vers_check(self, mock_get_distribution):
+ export_json = [{"component": "xyz", "version": "1.2.3"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "xy", "version": "1.2"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "x_noty_z", "version": "1.4.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
+
+ export_json = [{"component": "notx_y_z", "version": "2.2.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
| Version Matching for Import/Export too stringent
**Is your feature request related to a problem? Please describe.**
When importing a previously exported content pulp runs a check on pulpcore, pulp_rpm and other plugin versions of both the export archive and currently installed system. For example if my exporter is using pulp 3.16.1 and import server 3.16.2 I 'd get an error like
`Export version 3.16.1 of pulpcore does not match installed version 3.16.2`
While checking compatibility is a good thing, we need to be mindful of the fact customers in disconnected environments will often have different minor releases of pulp. Expecting the z-stream to also match will make it unwieldy and very hard for us to control the environment.
Location of the check -> https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/importer.py#L93-L118
As described in https://hackmd.io/HLptudH9R6S4PCmm8nRmmg?view#Open-Questions
- [DONE] How will backwards incompatible changes in the exported data format be handled over time? e.g. I exported this data a loooong time ago and now the system I'm importing into expects a newer, different format?
- idea 1: Tie the data format to the exported system's pulpcore version number. Put that version number into the exported data somehow. Then have systems importing know the oldest version they support and refuse to import older exports.
- idea 2: Same idea as (1) except use it's own numbering scheme, probably semver based
- **Current Status**: versions.json included in export, with core and plugin versions for all
plugins involved in the export. Import checks for **exact match** to versions.json on import,
and errors if there are diffs.
We definitely need more flexibility on this.
**Describe the solution you'd like**
Discuss various approaches to solve this in both short term and long term.
- Only require match of x.y releases and not x.y.z when checking compatibility (not sure that is sufficient).
- As described in the design docs use a unique numbering/version scheme that changes only when 2 versions are incompatible. That way if there are no breaking changes between 3.16-3.18 the version will be identical and hence compatible
**Additional context**
Further discussion pending on this topic. Pulpcore needs to be careful to not make it too stringent,
| https://bugzilla.redhat.com/show_bug.cgi?id=2067301 | 2022-04-08T13:54:38 |
pulp/pulpcore | 2,498 | pulp__pulpcore-2498 | [
"2070"
] | 215165ebc0ae595ab54f3db5bef039d46f33e8e0 | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -375,7 +375,8 @@
if not (
- Path(sys.argv[0]).name == "sphinx-build"
+ Path(sys.argv[0]).name == "pytest"
+ or Path(sys.argv[0]).name == "sphinx-build"
or (len(sys.argv) >= 2 and sys.argv[1] == "collectstatic")
):
try:
| diff --git a/pulpcore/tests/conftest.py b/pulpcore/tests/functional/conftest.py
similarity index 100%
rename from pulpcore/tests/conftest.py
rename to pulpcore/tests/functional/conftest.py
diff --git a/pulpcore/tests/conftest_pulp_file.py b/pulpcore/tests/functional/conftest_pulp_file.py
similarity index 100%
rename from pulpcore/tests/conftest_pulp_file.py
rename to pulpcore/tests/functional/conftest_pulp_file.py
diff --git a/pulpcore/tests/fixtures/basic/1.iso b/pulpcore/tests/functional/fixtures/basic/1.iso
similarity index 100%
rename from pulpcore/tests/fixtures/basic/1.iso
rename to pulpcore/tests/functional/fixtures/basic/1.iso
diff --git a/pulpcore/tests/fixtures/basic/2.iso b/pulpcore/tests/functional/fixtures/basic/2.iso
similarity index 100%
rename from pulpcore/tests/fixtures/basic/2.iso
rename to pulpcore/tests/functional/fixtures/basic/2.iso
diff --git a/pulpcore/tests/fixtures/basic/3.iso b/pulpcore/tests/functional/fixtures/basic/3.iso
similarity index 100%
rename from pulpcore/tests/fixtures/basic/3.iso
rename to pulpcore/tests/functional/fixtures/basic/3.iso
diff --git a/pulpcore/tests/fixtures/basic/PULP_MANIFEST b/pulpcore/tests/functional/fixtures/basic/PULP_MANIFEST
similarity index 100%
rename from pulpcore/tests/fixtures/basic/PULP_MANIFEST
rename to pulpcore/tests/functional/fixtures/basic/PULP_MANIFEST
diff --git a/pulpcore/tests/unit/serializers/test_content.py b/pulpcore/tests/unit/serializers/test_content.py
deleted file mode 100644
--- a/pulpcore/tests/unit/serializers/test_content.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from unittest import TestCase
-
-import mock
-from pulpcore.app.models import Artifact
-from pulpcore.app.serializers import ArtifactSerializer
-from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
-from rest_framework import serializers
-
-
-class TestArtifactSerializer(TestCase):
- def test_validate_file_checksum(self):
- mock_file = mock.MagicMock(size=42)
- mock_file.hashers.__getitem__.return_value.hexdigest.return_value = "asdf"
-
- data = {"file": mock_file}
- serializer = ArtifactSerializer(data=data)
- self.assertTrue(serializer.is_valid())
- new_data = serializer.validated_data
- self.assertEqual(new_data["file"], mock_file)
- self.assertEqual(new_data["size"], 42)
- for csum in Artifact.DIGEST_FIELDS:
- self.assertEqual(new_data[csum], "asdf")
-
- for csum in ALL_KNOWN_CONTENT_CHECKSUMS.difference(Artifact.DIGEST_FIELDS):
- self.assertFalse(csum in new_data, f"Found forbidden checksum {csum}")
-
- # This part of the test will only fire if the system-under-test has forbidden
- # use of 'md5'
- if "md5" not in Artifact.DIGEST_FIELDS:
- data = {"file": mock_file, "md5": "asdf"}
- with self.assertRaises(serializers.ValidationError) as cm: # noqa
- serializer.validate(data)
-
- def test_emtpy_data(self):
- data = {}
- serializer = ArtifactSerializer(data=data)
- self.assertFalse(serializer.is_valid())
diff --git a/pulpcore/tests/unit/serializers/test_repository.py b/pulpcore/tests/unit/serializers/test_repository.py
--- a/pulpcore/tests/unit/serializers/test_repository.py
+++ b/pulpcore/tests/unit/serializers/test_repository.py
@@ -4,9 +4,7 @@
import mock
from rest_framework import serializers
-from pulpcore.app.models import Distribution
from pulpcore.app.serializers import (
- DistributionSerializer,
PublicationSerializer,
RemoteSerializer,
)
@@ -15,38 +13,6 @@
class TestRemoteSerializer(TestCase):
minimal_data = {"name": "test", "url": "http://whatever"}
- def test_minimal_data(self):
- data = {}
- data.update(self.minimal_data)
- serializer = RemoteSerializer(data=data)
- serializer.is_valid(raise_exception=True)
-
- def test_validate_proxy(self):
- data = {"proxy_url": "http://whatever"}
- data.update(self.minimal_data)
- serializer = RemoteSerializer(data=data)
- serializer.is_valid(raise_exception=True)
-
- def test_validate_proxy_invalid(self):
- data = {"proxy_url": "http://user:pass@whatever"}
- data.update(self.minimal_data)
- serializer = RemoteSerializer(data=data)
- with self.assertRaises(serializers.ValidationError):
- serializer.is_valid(raise_exception=True)
-
- def test_validate_proxy_creds(self):
- data = {"proxy_url": "http://whatever", "proxy_username": "user", "proxy_password": "pass"}
- data.update(self.minimal_data)
- serializer = RemoteSerializer(data=data)
- serializer.is_valid(raise_exception=True)
-
- def test_validate_proxy_creds_invalid(self):
- data = {"proxy_url": "http://whatever", "proxy_username": "user"}
- data.update(self.minimal_data)
- serializer = RemoteSerializer(data=data)
- with self.assertRaises(serializers.ValidationError):
- serializer.is_valid(raise_exception=True)
-
def test_validate_proxy_creds_update(self):
Remote = SimpleNamespace(
proxy_url="http://whatever",
@@ -115,58 +81,3 @@ def test_validate_repository_version_only_unknown_field(self):
serializer = PublicationSerializer(data=data)
with self.assertRaises(serializers.ValidationError):
serializer.validate(data)
-
-
-class TestDistributionPath(TestCase):
- def test_overlap(self):
- Distribution.objects.create(base_path="foo/bar", name="foobar")
- overlap_errors = {"base_path": ["Overlaps with existing distribution 'foobar'"]}
-
- # test that the new distribution cannot be nested in an existing path
- data = {"name": "foobarbaz", "base_path": "foo/bar/baz"}
- serializer = DistributionSerializer(data=data)
- self.assertFalse(serializer.is_valid())
- self.assertDictEqual(overlap_errors, serializer.errors)
-
- # test that the new distribution cannot nest an existing path
- data = {"name": "foo", "base_path": "foo"}
- serializer = DistributionSerializer(data=data)
- self.assertFalse(serializer.is_valid())
- self.assertDictEqual(overlap_errors, serializer.errors)
-
- def test_no_overlap(self):
- Distribution.objects.create(base_path="fu/bar", name="fubar")
-
- # different path
- data = {"name": "fufu", "base_path": "fubar"}
- serializer = DistributionSerializer(data=data)
- self.assertTrue(serializer.is_valid())
- self.assertDictEqual({}, serializer.errors)
-
- # common base path but different path
- data = {"name": "fufu", "base_path": "fu/baz"}
- serializer = DistributionSerializer(data=data)
- self.assertTrue(serializer.is_valid())
- self.assertDictEqual({}, serializer.errors)
-
- def test_slashes(self):
- overlap_errors = {"base_path": ["Relative path cannot begin or end with slashes."]}
-
- data = {"name": "fefe", "base_path": "fefe/"}
- serializer = DistributionSerializer(data=data)
- self.assertFalse(serializer.is_valid())
- self.assertDictEqual(overlap_errors, serializer.errors)
-
- data = {"name": "fefe", "base_path": "/fefe/foo"}
- serializer = DistributionSerializer(data=data)
- self.assertFalse(serializer.is_valid())
- self.assertDictEqual(overlap_errors, serializer.errors)
-
- def test_uniqueness(self):
- Distribution.objects.create(base_path="fizz/buzz", name="fizzbuzz")
- data = {"name": "feefee", "base_path": "fizz/buzz"}
- overlap_errors = {"base_path": ["This field must be unique."]}
-
- serializer = DistributionSerializer(data=data)
- self.assertFalse(serializer.is_valid())
- self.assertDictEqual(overlap_errors, serializer.errors)
| As a developer, I can have pytest run the unit tests
Author: @bmbouter (bmbouter)
Redmine Issue: 9643, https://pulp.plan.io/issues/9643
---
As part of the testing effort, it would be nice to have pytest run the unittests in addition to our functional tests.
| 2022-04-08T19:32:29 |
|
pulp/pulpcore | 2,507 | pulp__pulpcore-2507 | [
"2506"
] | aa6742c6b2e6384ab4a46c7987be1a1c1494f359 | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -275,21 +275,24 @@ def sleep(self):
"""Wait for signals on the wakeup channel while heart beating."""
_logger.debug(_("Worker %s entering sleep state."), self.name)
+ wakeup = False
while not self.shutdown_requested:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_wakeup":
+ _logger.debug(_("Worker %s received wakeup call."), self.name)
+ wakeup = True
+ # ignore all other notifications
+ if wakeup:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection], [], [], self.heartbeat_period
)
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_wakeup"
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- break
if self.sentinel in r:
os.read(self.sentinel, 256)
@@ -307,6 +310,17 @@ def supervise_task(self, task):
task_process = Process(target=_perform_task, args=(task.pk, task_working_dir_rel_path))
task_process.start()
while True:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_cancel" and item.payload == str(task.pk):
+ _logger.info(_("Received signal to cancel current task %s."), task.pk)
+ os.kill(task_process.pid, signal.SIGUSR1)
+ cancel_state = TASK_STATES.CANCELED
+ # ignore all other notifications
+ if cancel_state:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection, task_process.sentinel],
[],
@@ -316,17 +330,6 @@ def supervise_task(self, task):
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_cancel" and item.payload == str(task.pk)
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- _logger.info(_("Received signal to cancel current task %s."), task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
- cancel_state = TASK_STATES.CANCELED
- break
if task_process.sentinel in r:
if not task_process.is_alive():
break
| All workers idle while a task is pending
**Version**
At least 3.14.14 used in katello
**Describe the bug**
Workers can all enter sleep state and miss the wakeup notification leading to pending tasks while all workers idle. Dispatching a new tasks unfreezes the system again.
**To Reproduce**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
**Expected behavior**
At least one worker is expected to pick up the next pending task.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
| 2022-04-11T13:01:38 |
||
pulp/pulpcore | 2,517 | pulp__pulpcore-2517 | [
"2087"
] | c32b0befa8164f239f7d514b2deabd348fbe6ec8 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -108,6 +108,7 @@ def export_artifacts(export, artifacts):
with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:
temp_file.write(artifact.file.read())
temp_file.flush()
+ artifact.file.close()
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
diff --git a/pulpcore/app/tasks/upload.py b/pulpcore/app/tasks/upload.py
--- a/pulpcore/app/tasks/upload.py
+++ b/pulpcore/app/tasks/upload.py
@@ -29,6 +29,7 @@ def commit(upload_id, sha256):
with NamedTemporaryFile("ab") as temp_file:
for chunk in chunks:
temp_file.write(chunk.file.read())
+ chunk.file.close()
temp_file.flush()
file = files.PulpTemporaryUploadedFile.from_file(File(open(temp_file.name, "rb")))
| Uploading large files fails on the "Too many open files" error
Author: @lubosmj (lmjachky)
Redmine Issue: 9634, https://pulp.plan.io/issues/9634
---
Steps to reproduce the behaviour:
```
(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ truncate -s 3G gentoo_root.img
(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ pulp artifact upload --file gentoo_root.img
Uploading file gentoo_root.img
................[truncated the number of dots]...................Upload complete. Creating artifact.
Started background task /pulp/api/v3/tasks/2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30/
........Error: Task /pulp/api/v3/tasks/2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30/ failed: '[Errno 24] Too many open files: '/var/lib/pulp/media/upload/ec2d5c92-57e9-4191-ae95-ff4b9ec94353''
(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ ls -la gentoo_root.img
-rw-r--r--. 1 vagrant vagrant 3221225472 Dec 13 11:32 gentoo_root.img
```
Traceback:
```
Task 2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30 failed ([Errno 24] Too many open files: '/var/lib/pulp/media/upload/ec2d5c92-57e9-4191-ae95-ff4b9ec94353')
pulp [3a3a9ea662994f609eea7d43ac8f30aa]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 362, in _perform_task
result = func(*args, **kwargs)
File "/home/vagrant/devel/pulpcore/pulpcore/app/tasks/upload.py", line 31, in commit
temp_file.write(chunk.file.read())
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/utils.py", line 42, in <lambda>
read = property(lambda self: self.file.read)
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/db/models/fields/files.py", line 45, in _get_file
self._file = self.storage.open(self.name, 'rb')
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/storage.py", line 38, in open
return self._open(name, mode)
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/storage.py", line 238, in _open
return File(open(self.path(name), mode))
```
| 2022-04-12T04:17:28 |
||
pulp/pulpcore | 2,518 | pulp__pulpcore-2518 | [
"2087"
] | 8433b5729c5d64da40d4f3733972130d9cad4516 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -83,6 +83,7 @@ def export_artifacts(export, artifacts):
with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:
temp_file.write(artifact.file.read())
temp_file.flush()
+ artifact.file.close()
export.tarfile.add(temp_file.name, dest)
else:
export.tarfile.add(artifact.file.path, dest)
diff --git a/pulpcore/app/tasks/upload.py b/pulpcore/app/tasks/upload.py
--- a/pulpcore/app/tasks/upload.py
+++ b/pulpcore/app/tasks/upload.py
@@ -29,6 +29,7 @@ def commit(upload_id, sha256):
with NamedTemporaryFile("ab") as temp_file:
for chunk in chunks:
temp_file.write(chunk.file.read())
+ chunk.file.close()
temp_file.flush()
file = files.PulpTemporaryUploadedFile.from_file(File(open(temp_file.name, "rb")))
| Uploading large files fails on the "Too many open files" error
Author: @lubosmj (lmjachky)
Redmine Issue: 9634, https://pulp.plan.io/issues/9634
---
Steps to reproduce the behaviour:
```
(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ truncate -s 3G gentoo_root.img
(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ pulp artifact upload --file gentoo_root.img
Uploading file gentoo_root.img
................[truncated the number of dots]...................Upload complete. Creating artifact.
Started background task /pulp/api/v3/tasks/2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30/
........Error: Task /pulp/api/v3/tasks/2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30/ failed: '[Errno 24] Too many open files: '/var/lib/pulp/media/upload/ec2d5c92-57e9-4191-ae95-ff4b9ec94353''
(pulp) [vagrant@pulp3-source-fedora34 pulp_ostree]$ ls -la gentoo_root.img
-rw-r--r--. 1 vagrant vagrant 3221225472 Dec 13 11:32 gentoo_root.img
```
Traceback:
```
Task 2d3cf569-2d5c-449d-9ac6-7a53ab5f3a30 failed ([Errno 24] Too many open files: '/var/lib/pulp/media/upload/ec2d5c92-57e9-4191-ae95-ff4b9ec94353')
pulp [3a3a9ea662994f609eea7d43ac8f30aa]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 362, in _perform_task
result = func(*args, **kwargs)
File "/home/vagrant/devel/pulpcore/pulpcore/app/tasks/upload.py", line 31, in commit
temp_file.write(chunk.file.read())
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/utils.py", line 42, in <lambda>
read = property(lambda self: self.file.read)
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/db/models/fields/files.py", line 45, in _get_file
self._file = self.storage.open(self.name, 'rb')
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/storage.py", line 38, in open
return self._open(name, mode)
File "/usr/local/lib/pulp/lib64/python3.9/site-packages/django/core/files/storage.py", line 238, in _open
return File(open(self.path(name), mode))
```
| 2022-04-12T04:18:29 |
||
pulp/pulpcore | 2,519 | pulp__pulpcore-2519 | [
"2506"
] | a5f06a7444b92827ddcc2be97b9ef6f863f61158 | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -239,21 +239,24 @@ def sleep(self):
"""Wait for signals on the wakeup channel while heart beating."""
_logger.debug(_("Worker %s entering sleep state."), self.name)
+ wakeup = False
while not self.shutdown_requested:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_wakeup":
+ _logger.debug(_("Worker %s received wakeup call."), self.name)
+ wakeup = True
+ # ignore all other notifications
+ if wakeup:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection], [], [], self.heartbeat_period
)
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_wakeup"
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- break
if self.sentinel in r:
os.read(self.sentinel, 256)
@@ -271,6 +274,17 @@ def supervise_task(self, task):
task_process = Process(target=_perform_task, args=(task.pk, task_working_dir_rel_path))
task_process.start()
while True:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_cancel" and item.payload == str(task.pk):
+ _logger.info(_("Received signal to cancel current task %s."), task.pk)
+ os.kill(task_process.pid, signal.SIGUSR1)
+ cancel_state = TASK_STATES.CANCELED
+ # ignore all other notifications
+ if cancel_state:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection, task_process.sentinel],
[],
@@ -280,17 +294,6 @@ def supervise_task(self, task):
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_cancel" and item.payload == str(task.pk)
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- _logger.info(_("Received signal to cancel current task %s."), task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
- cancel_state = TASK_STATES.CANCELED
- break
if task_process.sentinel in r:
if not task_process.is_alive():
break
| All workers idle while a task is pending
**Version**
At least 3.14.14 used in katello
**Describe the bug**
Workers can all enter sleep state and miss the wakeup notification leading to pending tasks while all workers idle. Dispatching a new tasks unfreezes the system again.
**To Reproduce**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
**Expected behavior**
At least one worker is expected to pick up the next pending task.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
| 2022-04-12T13:39:55 |
||
pulp/pulpcore | 2,520 | pulp__pulpcore-2520 | [
"2506"
] | 0bc3cc97bda16f57e0ab9db55d23ad827df92f4a | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -262,21 +262,24 @@ def sleep(self):
"""Wait for signals on the wakeup channel while heart beating."""
_logger.debug(_("Worker %s entering sleep state."), self.name)
+ wakeup = False
while not self.shutdown_requested:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_wakeup":
+ _logger.debug(_("Worker %s received wakeup call."), self.name)
+ wakeup = True
+ # ignore all other notifications
+ if wakeup:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection], [], [], self.heartbeat_period
)
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_wakeup"
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- break
if self.sentinel in r:
os.read(self.sentinel, 256)
@@ -294,6 +297,17 @@ def supervise_task(self, task):
task_process = Process(target=_perform_task, args=(task.pk, task_working_dir_rel_path))
task_process.start()
while True:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_cancel" and item.payload == str(task.pk):
+ _logger.info(_("Received signal to cancel current task %s."), task.pk)
+ os.kill(task_process.pid, signal.SIGUSR1)
+ cancel_state = TASK_STATES.CANCELED
+ # ignore all other notifications
+ if cancel_state:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection, task_process.sentinel],
[],
@@ -303,17 +317,6 @@ def supervise_task(self, task):
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_cancel" and item.payload == str(task.pk)
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- _logger.info(_("Received signal to cancel current task %s."), task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
- cancel_state = TASK_STATES.CANCELED
- break
if task_process.sentinel in r:
if not task_process.is_alive():
break
| All workers idle while a task is pending
**Version**
At least 3.14.14 used in katello
**Describe the bug**
Workers can all enter sleep state and miss the wakeup notification leading to pending tasks while all workers idle. Dispatching a new tasks unfreezes the system again.
**To Reproduce**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
**Expected behavior**
At least one worker is expected to pick up the next pending task.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
| 2022-04-12T13:40:37 |
||
pulp/pulpcore | 2,521 | pulp__pulpcore-2521 | [
"2506"
] | 7e3274f0c8b61e487569e03c2331bb8d5ae5972c | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -247,26 +247,26 @@ def sleep(self):
"""Wait for signals on the wakeup channel while heart beating."""
_logger.debug(_("Worker %s entering sleep state."), self.name)
- # Subscribe to "pulp_worker_wakeup"
- self.cursor.execute("LISTEN pulp_worker_wakeup")
+ wakeup = False
while not self.shutdown_requested:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_wakeup":
+ _logger.debug(_("Worker %s received wakeup call."), self.name)
+ wakeup = True
+ # ignore all other notifications
+ if wakeup:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection], [], [], self.heartbeat_period
)
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_wakeup"
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- break
if self.sentinel in r:
os.read(self.sentinel, 256)
- self.cursor.execute("UNLISTEN pulp_worker_wakeup")
def supervise_task(self, task):
"""Call and supervise the task process while heart beating.
@@ -274,7 +274,6 @@ def supervise_task(self, task):
This function must only be called while holding the lock for that task."""
self.task_grace_timeout = TASK_GRACE_INTERVAL
- self.cursor.execute("LISTEN pulp_worker_cancel")
task.worker = self.worker
task.save(update_fields=["worker"])
cancel_state = None
@@ -283,6 +282,17 @@ def supervise_task(self, task):
task_process = Process(target=_perform_task, args=(task.pk, task_working_dir_rel_path))
task_process.start()
while True:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_cancel" and item.payload == str(task.pk):
+ _logger.info(_("Received signal to cancel current task %s."), task.pk)
+ os.kill(task_process.pid, signal.SIGUSR1)
+ cancel_state = TASK_STATES.CANCELED
+ # ignore all other notifications
+ if cancel_state:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection, task_process.sentinel],
[],
@@ -292,17 +302,6 @@ def supervise_task(self, task):
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_cancel" and item.payload == str(task.pk)
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- _logger.info(_("Received signal to cancel current task %s."), task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
- cancel_state = TASK_STATES.CANCELED
- break
if task_process.sentinel in r:
if not task_process.is_alive():
break
@@ -324,17 +323,21 @@ def supervise_task(self, task):
self.cancel_abandoned_task(task, cancel_state, cancel_reason)
if task.reserved_resources_record:
self.notify_workers()
- self.cursor.execute("UNLISTEN pulp_worker_cancel")
def run_forever(self):
with WorkerDirectory(self.name):
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
+ # Subscribe to pgsql channels
+ self.cursor.execute("LISTEN pulp_worker_wakeup")
+ self.cursor.execute("LISTEN pulp_worker_cancel")
while not self.shutdown_requested:
for task in self.iter_tasks():
self.supervise_task(task)
if not self.shutdown_requested:
self.sleep()
+ self.cursor.execute("UNLISTEN pulp_worker_cancel")
+ self.cursor.execute("UNLISTEN pulp_worker_wakeup")
self.shutdown()
| All workers idle while a task is pending
**Version**
At least 3.14.14 used in katello
**Describe the bug**
Workers can all enter sleep state and miss the wakeup notification leading to pending tasks while all workers idle. Dispatching a new tasks unfreezes the system again.
**To Reproduce**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
**Expected behavior**
At least one worker is expected to pick up the next pending task.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
| 2022-04-12T14:43:02 |
||
pulp/pulpcore | 2,522 | pulp__pulpcore-2522 | [
"2506"
] | 677273d4da94b42fea9eedebdc9297338d51221b | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -102,26 +102,39 @@ def beat(self):
def notify_workers(self):
self.cursor.execute("NOTIFY pulp_worker_wakeup")
- def cancel_abandoned_task(self, task):
+ def cancel_abandoned_task(self, task, final_state, reason=None):
"""Cancel and clean up an abandoned task.
- This function must only be called while holding the lock for that task.
+ This function must only be called while holding the lock for that task. It is a no-op if
+ the task is neither in "running" nor "canceling" state.
Return ``True`` if the task was actually canceled, ``False`` otherwise.
"""
# A task is considered abandoned when in running state, but no worker holds its lock
- _logger.info(_("Cleaning up and canceling Task %s"), task.pk)
Task.objects.filter(pk=task.pk, state=TASK_STATES.RUNNING).update(
state=TASK_STATES.CANCELING
)
task.refresh_from_db()
if task.state == TASK_STATES.CANCELING:
+ if reason:
+ _logger.info(
+ _("Cleaning up task %s and marking as %s. Reason: %s"),
+ task.pk,
+ final_state,
+ reason,
+ )
+ else:
+ _logger.info(_("Cleaning up task %s and marking as %s."), task.pk, final_state)
_delete_incomplete_resources(task)
if task.reserved_resources_record:
self.notify_workers()
- Task.objects.filter(pk=task.pk, state=TASK_STATES.CANCELING).update(
- state=TASK_STATES.CANCELED
- )
+ task_data = {
+ "state": final_state,
+ "finished_at": timezone.now(),
+ }
+ if reason:
+ task_data["error"] = {"reason": reason}
+ Task.objects.filter(pk=task.pk, state=TASK_STATES.CANCELING).update(**task_data)
return True
return False
@@ -160,7 +173,9 @@ def iter_tasks(self):
continue
if task.state in [TASK_STATES.RUNNING, TASK_STATES.CANCELING]:
# A running task without a lock must be abandoned
- if self.cancel_abandoned_task(task):
+ if self.cancel_abandoned_task(
+ task, TASK_STATES.FAILED, "Worker has gone missing."
+ ):
# Continue looking for the next task
# without considering this tasks resources
# as we just released them
@@ -205,26 +220,26 @@ def sleep(self):
"""Wait for signals on the wakeup channel while heart beating."""
_logger.debug(_("Worker %s entering sleep state."), self.name)
- # Subscribe to "pulp_worker_wakeup"
- self.cursor.execute("LISTEN pulp_worker_wakeup")
+ wakeup = False
while not self.shutdown_requested:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_wakeup":
+ _logger.debug(_("Worker %s received wakeup call."), self.name)
+ wakeup = True
+ # ignore all other notifications
+ if wakeup:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection], [], [], self.heartbeat_period
)
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_wakeup"
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- break
if self.sentinel in r:
os.read(self.sentinel, 256)
- self.cursor.execute("UNLISTEN pulp_worker_wakeup")
def supervise_task(self, task):
"""Call and supervise the task process while heart beating.
@@ -232,13 +247,25 @@ def supervise_task(self, task):
This function must only be called while holding the lock for that task."""
self.task_grace_timeout = TASK_GRACE_INTERVAL
- self.cursor.execute("LISTEN pulp_worker_cancel")
task.worker = self.worker
task.save(update_fields=["worker"])
+ cancel_state = None
+ cancel_reason = None
with TemporaryDirectory(dir=".") as task_working_dir_rel_path:
task_process = Process(target=_perform_task, args=(task.pk, task_working_dir_rel_path))
task_process.start()
while True:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_cancel" and item.payload == str(task.pk):
+ _logger.info(_("Received signal to cancel current task %s."), task.pk)
+ os.kill(task_process.pid, signal.SIGUSR1)
+ cancel_state = TASK_STATES.CANCELED
+ # ignore all other notifications
+ if cancel_state:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection, task_process.sentinel],
[],
@@ -248,16 +275,6 @@ def supervise_task(self, task):
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_cancel" and item.payload == str(task.pk)
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- _logger.info(_("Received signal to cancel current task %s."), task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
- break
if task_process.sentinel in r:
if not task_process.is_alive():
break
@@ -271,23 +288,29 @@ def supervise_task(self, task):
else:
_logger.info(_("Aborting current task %s due to worker shutdown."), task.pk)
os.kill(task_process.pid, signal.SIGUSR1)
- task_process.join()
- self.cancel_abandoned_task(task)
+ cancel_state = TASK_STATES.FAILED
+ cancel_reason = "Aborted during worker shutdown."
break
task_process.join()
+ if cancel_state:
+ self.cancel_abandoned_task(task, cancel_state, cancel_reason)
if task.reserved_resources_record:
self.notify_workers()
- self.cursor.execute("UNLISTEN pulp_worker_cancel")
def run_forever(self):
with WorkerDirectory(self.name):
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
+ # Subscribe to pgsql channels
+ self.cursor.execute("LISTEN pulp_worker_wakeup")
+ self.cursor.execute("LISTEN pulp_worker_cancel")
while not self.shutdown_requested:
for task in self.iter_tasks():
self.supervise_task(task)
if not self.shutdown_requested:
self.sleep()
+ self.cursor.execute("UNLISTEN pulp_worker_cancel")
+ self.cursor.execute("UNLISTEN pulp_worker_wakeup")
self.shutdown()
| All workers idle while a task is pending
**Version**
At least 3.14.14 used in katello
**Describe the bug**
Workers can all enter sleep state and miss the wakeup notification leading to pending tasks while all workers idle. Dispatching a new tasks unfreezes the system again.
**To Reproduce**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
**Expected behavior**
At least one worker is expected to pick up the next pending task.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
| 2022-04-12T14:49:14 |
||
pulp/pulpcore | 2,523 | pulp__pulpcore-2523 | [
"2506"
] | 4d0e5e4974c0d23f6cc2634b1f2ad7f14d9c1e0b | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -185,21 +185,24 @@ def sleep(self):
"""Wait for signals on the wakeup channel while heart beating."""
_logger.debug(_("Worker %s entering sleep state."), self.name)
+ wakeup = False
while not self.shutdown_requested:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_wakeup":
+ _logger.debug(_("Worker %s received wakeup call."), self.name)
+ wakeup = True
+ # ignore all other notifications
+ if wakeup:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection], [], [], self.heartbeat_period
)
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_wakeup"
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- break
if self.sentinel in r:
os.read(self.sentinel, 256)
@@ -217,6 +220,17 @@ def supervise_task(self, task):
task_process = Process(target=_perform_task, args=(task.pk, task_working_dir_rel_path))
task_process.start()
while True:
+ # Handle all notifications before sleeping in `select`
+ while connection.connection.notifies:
+ item = connection.connection.notifies.pop(0)
+ if item.channel == "pulp_worker_cancel" and item.payload == str(task.pk):
+ _logger.info(_("Received signal to cancel current task %s."), task.pk)
+ os.kill(task_process.pid, signal.SIGUSR1)
+ cancel_state = TASK_STATES.CANCELED
+ # ignore all other notifications
+ if cancel_state:
+ break
+
r, w, x = select.select(
[self.sentinel, connection.connection, task_process.sentinel],
[],
@@ -226,17 +240,6 @@ def supervise_task(self, task):
self.beat()
if connection.connection in r:
connection.connection.poll()
- if any(
- (
- item.channel == "pulp_worker_cancel" and item.payload == str(task.pk)
- for item in connection.connection.notifies
- )
- ):
- connection.connection.notifies.clear()
- _logger.info(_("Received signal to cancel current task."), task.pk)
- os.kill(task_process.pid, signal.SIGUSR1)
- cancel_state = TASK_STATES.CANCELED
- break
if task_process.sentinel in r:
if not task_process.is_alive():
break
| All workers idle while a task is pending
**Version**
At least 3.14.14 used in katello
**Describe the bug**
Workers can all enter sleep state and miss the wakeup notification leading to pending tasks while all workers idle. Dispatching a new tasks unfreezes the system again.
**To Reproduce**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
**Expected behavior**
At least one worker is expected to pick up the next pending task.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2050440
| 2022-04-12T14:54:31 |
||
pulp/pulpcore | 2,524 | pulp__pulpcore-2524 | [
"2269"
] | e02fdb5a738da21b3987809b41ea3d6e257116ea | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -91,7 +91,12 @@ def _import_file(fpath, resource_class, retry=False):
def _check_versions(version_json):
- """Compare the export version_json to the installed components."""
+ """
+ Compare the export version_json to the installed components.
+
+ An upstream whose db-metadata doesn't match the downstream won't import successfully; check
+ for compatibility and raise a ValidationError if incompatible versions are found.
+ """
error_messages = []
for component in version_json:
try:
@@ -101,10 +106,13 @@ def _check_versions(version_json):
_("Export uses {} which is not installed.").format(component["component"])
)
else:
- if version != component["version"]:
+ # Check that versions are compatible. Currently, "compatible" is defined as "same X.Y".
+ # Versions are strings that generally look like "X.Y.Z" or "X.Y.Z.dev"; we check that
+ # first two places are the same.
+ if version.split(".")[:2] != component["version"].split(".")[:2]:
error_messages.append(
_(
- "Export version {export_ver} of {component} does not match "
+ "Export version {export_ver} of {component} incompatible with "
"installed version {ver}."
).format(
export_ver=component["version"],
@@ -113,8 +121,8 @@ def _check_versions(version_json):
)
)
- if error_messages:
- raise ValidationError((" ".join(error_messages)))
+ if error_messages:
+ raise ValidationError((" ".join(error_messages)))
def import_repository_version(importer_pk, destination_repo_pk, source_repo_name, tar_path):
| diff --git a/pulpcore/tests/unit/test_import_checks.py b/pulpcore/tests/unit/test_import_checks.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/unit/test_import_checks.py
@@ -0,0 +1,28 @@
+from unittest.mock import patch
+
+from django.test import TestCase
+from rest_framework.serializers import ValidationError
+
+from pulpcore.app.tasks.importer import _check_versions
+
+
+class TestObject:
+ version = "1.2.3" # Every component is vers 1.2.3
+
+
+class TestCheckVersions(TestCase):
+ @patch("pulpcore.app.tasks.importer.get_distribution", return_value=TestObject())
+ def test_vers_check(self, mock_get_distribution):
+ export_json = [{"component": "xyz", "version": "1.2.3"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "xy", "version": "1.2"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "x_noty_z", "version": "1.4.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
+
+ export_json = [{"component": "notx_y_z", "version": "2.2.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
| Version Matching for Import/Export too stringent
**Is your feature request related to a problem? Please describe.**
When importing a previously exported content pulp runs a check on pulpcore, pulp_rpm and other plugin versions of both the export archive and currently installed system. For example if my exporter is using pulp 3.16.1 and import server 3.16.2 I 'd get an error like
`Export version 3.16.1 of pulpcore does not match installed version 3.16.2`
While checking compatibility is a good thing, we need to be mindful of the fact customers in disconnected environments will often have different minor releases of pulp. Expecting the z-stream to also match will make it unwieldy and very hard for us to control the environment.
Location of the check -> https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/importer.py#L93-L118
As described in https://hackmd.io/HLptudH9R6S4PCmm8nRmmg?view#Open-Questions
- [DONE] How will backwards incompatible changes in the exported data format be handled over time? e.g. I exported this data a loooong time ago and now the system I'm importing into expects a newer, different format?
- idea 1: Tie the data format to the exported system's pulpcore version number. Put that version number into the exported data somehow. Then have systems importing know the oldest version they support and refuse to import older exports.
- idea 2: Same idea as (1) except use it's own numbering scheme, probably semver based
- **Current Status**: versions.json included in export, with core and plugin versions for all
plugins involved in the export. Import checks for **exact match** to versions.json on import,
and errors if there are diffs.
We definitely need more flexibility on this.
**Describe the solution you'd like**
Discuss various approaches to solve this in both short term and long term.
- Only require match of x.y releases and not x.y.z when checking compatibility (not sure that is sufficient).
- As described in the design docs use a unique numbering/version scheme that changes only when 2 versions are incompatible. That way if there are no breaking changes between 3.16-3.18 the version will be identical and hence compatible
**Additional context**
Further discussion pending on this topic. Pulpcore needs to be careful to not make it too stringent,
| https://bugzilla.redhat.com/show_bug.cgi?id=2067301 | 2022-04-12T17:23:30 |
pulp/pulpcore | 2,525 | pulp__pulpcore-2525 | [
"2269"
] | 7e3274f0c8b61e487569e03c2331bb8d5ae5972c | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -91,7 +91,12 @@ def _import_file(fpath, resource_class, retry=False):
def _check_versions(version_json):
- """Compare the export version_json to the installed components."""
+ """
+ Compare the export version_json to the installed components.
+
+ An upstream whose db-metadata doesn't match the downstream won't import successfully; check
+ for compatibility and raise a ValidationError if incompatible versions are found.
+ """
error_messages = []
for component in version_json:
try:
@@ -101,10 +106,13 @@ def _check_versions(version_json):
_("Export uses {} which is not installed.").format(component["component"])
)
else:
- if version != component["version"]:
+ # Check that versions are compatible. Currently, "compatible" is defined as "same X.Y".
+ # Versions are strings that generally look like "X.Y.Z" or "X.Y.Z.dev"; we check that
+ # first two places are the same.
+ if version.split(".")[:2] != component["version"].split(".")[:2]:
error_messages.append(
_(
- "Export version {export_ver} of {component} does not match "
+ "Export version {export_ver} of {component} incompatible with "
"installed version {ver}."
).format(
export_ver=component["version"],
@@ -113,8 +121,8 @@ def _check_versions(version_json):
)
)
- if error_messages:
- raise ValidationError((" ".join(error_messages)))
+ if error_messages:
+ raise ValidationError((" ".join(error_messages)))
def import_repository_version(importer_pk, destination_repo_pk, source_repo_name, tar_path):
| diff --git a/pulpcore/tests/unit/test_import_checks.py b/pulpcore/tests/unit/test_import_checks.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/unit/test_import_checks.py
@@ -0,0 +1,28 @@
+from unittest.mock import patch
+
+from django.test import TestCase
+from rest_framework.serializers import ValidationError
+
+from pulpcore.app.tasks.importer import _check_versions
+
+
+class TestObject:
+ version = "1.2.3" # Every component is vers 1.2.3
+
+
+class TestCheckVersions(TestCase):
+ @patch("pulpcore.app.tasks.importer.get_distribution", return_value=TestObject())
+ def test_vers_check(self, mock_get_distribution):
+ export_json = [{"component": "xyz", "version": "1.2.3"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "xy", "version": "1.2"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "x_noty_z", "version": "1.4.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
+
+ export_json = [{"component": "notx_y_z", "version": "2.2.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
| Version Matching for Import/Export too stringent
**Is your feature request related to a problem? Please describe.**
When importing a previously exported content pulp runs a check on pulpcore, pulp_rpm and other plugin versions of both the export archive and currently installed system. For example if my exporter is using pulp 3.16.1 and import server 3.16.2 I 'd get an error like
`Export version 3.16.1 of pulpcore does not match installed version 3.16.2`
While checking compatibility is a good thing, we need to be mindful of the fact customers in disconnected environments will often have different minor releases of pulp. Expecting the z-stream to also match will make it unwieldy and very hard for us to control the environment.
Location of the check -> https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/importer.py#L93-L118
As described in https://hackmd.io/HLptudH9R6S4PCmm8nRmmg?view#Open-Questions
- [DONE] How will backwards incompatible changes in the exported data format be handled over time? e.g. I exported this data a loooong time ago and now the system I'm importing into expects a newer, different format?
- idea 1: Tie the data format to the exported system's pulpcore version number. Put that version number into the exported data somehow. Then have systems importing know the oldest version they support and refuse to import older exports.
- idea 2: Same idea as (1) except use it's own numbering scheme, probably semver based
- **Current Status**: versions.json included in export, with core and plugin versions for all
plugins involved in the export. Import checks for **exact match** to versions.json on import,
and errors if there are diffs.
We definitely need more flexibility on this.
**Describe the solution you'd like**
Discuss various approaches to solve this in both short term and long term.
- Only require match of x.y releases and not x.y.z when checking compatibility (not sure that is sufficient).
- As described in the design docs use a unique numbering/version scheme that changes only when 2 versions are incompatible. That way if there are no breaking changes between 3.16-3.18 the version will be identical and hence compatible
**Additional context**
Further discussion pending on this topic. Pulpcore needs to be careful to not make it too stringent,
| https://bugzilla.redhat.com/show_bug.cgi?id=2067301 | 2022-04-12T17:24:07 |
pulp/pulpcore | 2,527 | pulp__pulpcore-2527 | [
"2269"
] | 0cd6acf6dada8d364156293b118d1396f45e947c | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -91,7 +91,12 @@ def _import_file(fpath, resource_class, retry=False):
def _check_versions(version_json):
- """Compare the export version_json to the installed components."""
+ """
+ Compare the export version_json to the installed components.
+
+ An upstream whose db-metadata doesn't match the downstream won't import successfully; check
+ for compatibility and raise a ValidationError if incompatible versions are found.
+ """
error_messages = []
for component in version_json:
try:
@@ -101,10 +106,13 @@ def _check_versions(version_json):
_("Export uses {} which is not installed.").format(component["component"])
)
else:
- if version != component["version"]:
+ # Check that versions are compatible. Currently, "compatible" is defined as "same X.Y".
+ # Versions are strings that generally look like "X.Y.Z" or "X.Y.Z.dev"; we check that
+ # first two places are the same.
+ if version.split(".")[:2] != component["version"].split(".")[:2]:
error_messages.append(
_(
- "Export version {export_ver} of {component} does not match "
+ "Export version {export_ver} of {component} incompatible with "
"installed version {ver}."
).format(
export_ver=component["version"],
@@ -113,8 +121,8 @@ def _check_versions(version_json):
)
)
- if error_messages:
- raise ValidationError((" ".join(error_messages)))
+ if error_messages:
+ raise ValidationError((" ".join(error_messages)))
def import_repository_version(importer_pk, destination_repo_pk, source_repo_name, tar_path):
| diff --git a/pulpcore/tests/unit/test_import_checks.py b/pulpcore/tests/unit/test_import_checks.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/unit/test_import_checks.py
@@ -0,0 +1,28 @@
+from unittest.mock import patch
+
+from django.test import TestCase
+from rest_framework.serializers import ValidationError
+
+from pulpcore.app.tasks.importer import _check_versions
+
+
+class TestObject:
+ version = "1.2.3" # Every component is vers 1.2.3
+
+
+class TestCheckVersions(TestCase):
+ @patch("pulpcore.app.tasks.importer.get_distribution", return_value=TestObject())
+ def test_vers_check(self, mock_get_distribution):
+ export_json = [{"component": "xyz", "version": "1.2.3"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "xy", "version": "1.2"}]
+ _check_versions(export_json)
+
+ export_json = [{"component": "x_noty_z", "version": "1.4.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
+
+ export_json = [{"component": "notx_y_z", "version": "2.2.3"}]
+ with self.assertRaises(ValidationError):
+ _check_versions(export_json)
| Version Matching for Import/Export too stringent
**Is your feature request related to a problem? Please describe.**
When importing a previously exported content pulp runs a check on pulpcore, pulp_rpm and other plugin versions of both the export archive and currently installed system. For example if my exporter is using pulp 3.16.1 and import server 3.16.2 I 'd get an error like
`Export version 3.16.1 of pulpcore does not match installed version 3.16.2`
While checking compatibility is a good thing, we need to be mindful of the fact customers in disconnected environments will often have different minor releases of pulp. Expecting the z-stream to also match will make it unwieldy and very hard for us to control the environment.
Location of the check -> https://github.com/pulp/pulpcore/blob/main/pulpcore/app/tasks/importer.py#L93-L118
As described in https://hackmd.io/HLptudH9R6S4PCmm8nRmmg?view#Open-Questions
- [DONE] How will backwards incompatible changes in the exported data format be handled over time? e.g. I exported this data a loooong time ago and now the system I'm importing into expects a newer, different format?
- idea 1: Tie the data format to the exported system's pulpcore version number. Put that version number into the exported data somehow. Then have systems importing know the oldest version they support and refuse to import older exports.
- idea 2: Same idea as (1) except use it's own numbering scheme, probably semver based
- **Current Status**: versions.json included in export, with core and plugin versions for all
plugins involved in the export. Import checks for **exact match** to versions.json on import,
and errors if there are diffs.
We definitely need more flexibility on this.
**Describe the solution you'd like**
Discuss various approaches to solve this in both short term and long term.
- Only require match of x.y releases and not x.y.z when checking compatibility (not sure that is sufficient).
- As described in the design docs use a unique numbering/version scheme that changes only when 2 versions are incompatible. That way if there are no breaking changes between 3.16-3.18 the version will be identical and hence compatible
**Additional context**
Further discussion pending on this topic. Pulpcore needs to be careful to not make it too stringent,
| https://bugzilla.redhat.com/show_bug.cgi?id=2067301 | 2022-04-12T17:24:28 |
pulp/pulpcore | 2,558 | pulp__pulpcore-2558 | [
"2557"
] | 5a2df085dd404206c5d84a74f697d1242022379f | diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py
--- a/pulpcore/plugin/stages/models.py
+++ b/pulpcore/plugin/stages/models.py
@@ -136,7 +136,7 @@ def does_batch(self):
"""Whether this content is being awaited on and must therefore not wait forever in batches.
When overwritten in subclasses, a `True` value must never be turned into `False`.
"""
- return not self._resolved and self._future is None
+ return self._resolved or self._future is None
async def resolution(self):
"""Coroutine that waits for the content to be saved to database.
| Implementation of does_batch in DeclarativeContent is wrong
**Describe the bug**
`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.
**Additional context**
This is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466
| 2022-04-19T12:38:35 |
||
pulp/pulpcore | 2,560 | pulp__pulpcore-2560 | [
"2559"
] | 97e26b2aec34b0c745ba4519f60096ccd02c99a7 | diff --git a/pulpcore/app/access_policy.py b/pulpcore/app/access_policy.py
--- a/pulpcore/app/access_policy.py
+++ b/pulpcore/app/access_policy.py
@@ -1,6 +1,6 @@
from rest_access_policy import AccessPolicy
+from rest_framework.exceptions import APIException
-from pulpcore.app.loggers import deprecation_logger
from pulpcore.app.models import AccessPolicy as AccessPolicyModel
from pulpcore.app.util import get_view_urlpattern, get_viewset_for_model
from pulpcore.app.role_util import get_objects_for_user
@@ -48,19 +48,16 @@ def handle_creation_hooks(cls, obj):
access_policy = cls.get_access_policy(viewset)
if access_policy and access_policy.creation_hooks is not None:
for creation_hook in access_policy.creation_hooks:
- function = obj.REGISTERED_CREATION_HOOKS.get(creation_hook["function"])
- if function is not None:
- kwargs = creation_hook.get("parameters") or {}
- function(**kwargs)
- else:
- # Old interface deprecated for removal in 3.20
- function = getattr(obj, creation_hook["function"])
- deprecation_logger.warn(
- "Calling unregistered creation hooks from the access policy is deprecated"
- " and may be removed with pulpcore 3.20."
- f"[hook={creation_hook}, viewset={access_policy.viewset_name}]."
+ hook_name = creation_hook["function"]
+ try:
+ function = obj.REGISTERED_CREATION_HOOKS[hook_name]
+ except KeyError:
+ raise APIException(
+ f"Creation hook '{hook_name}' was not registered for this view set."
)
- function(creation_hook.get("permissions"), creation_hook.get("parameters"))
+
+ kwargs = creation_hook.get("parameters") or {}
+ function(**kwargs)
def scope_queryset(self, view, qs):
"""
diff --git a/pulpcore/app/apps.py b/pulpcore/app/apps.py
--- a/pulpcore/app/apps.py
+++ b/pulpcore/app/apps.py
@@ -197,25 +197,6 @@ def ready(self):
post_migrate.connect(_delete_anon_user, sender=self, dispatch_uid="delete_anon_identifier")
-def _rename_permissions_assignment_workaround(access_policy, viewset):
- from pulpcore.app.loggers import deprecation_logger
-
- if "permissions_assignment" in access_policy:
- deprecation_logger.warn(
- f"In AccessPolicy for {viewset}, "
- "the field 'permissions_assignment' has been renamed to 'creation_hooks'. This "
- "workaround may be removed with pulpcore 3.20."
- )
- access_policy["creation_hooks"] = access_policy.pop("permissions_assignment")
- if "creation_hooks" in access_policy:
- if any(hook.get("permissions") is not None for hook in access_policy["creation_hooks"]):
- deprecation_logger.warn(
- f"In AccessPolicy for {viewset}, "
- "the 'permissions' field in 'creation_hooks' is deprecated and may be removed "
- "with pulpcore 3.20. Use the 'parameters' field instead."
- )
-
-
def _populate_access_policies(sender, apps, verbosity, **kwargs):
from pulpcore.app.util import get_view_urlpattern
@@ -231,7 +212,6 @@ def _populate_access_policies(sender, apps, verbosity, **kwargs):
access_policy = getattr(viewset, "DEFAULT_ACCESS_POLICY", None)
if access_policy is not None:
viewset_name = get_view_urlpattern(viewset)
- _rename_permissions_assignment_workaround(access_policy, viewset)
db_access_policy, created = AccessPolicy.objects.get_or_create(
viewset_name=viewset_name, defaults=access_policy
)
| Remove deprecated creation_hooks interface for 3.20
| 2022-04-19T18:12:47 |
||
pulp/pulpcore | 2,564 | pulp__pulpcore-2564 | [
"2557"
] | f3afe501b532cc8a52eab0949e524aef56161e10 | diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py
--- a/pulpcore/plugin/stages/models.py
+++ b/pulpcore/plugin/stages/models.py
@@ -136,7 +136,7 @@ def does_batch(self):
"""Whether this content is being awaited on and must therefore not wait forever in batches.
When overwritten in subclasses, a `True` value must never be turned into `False`.
"""
- return not self._resolved and self._future is None
+ return self._resolved or self._future is None
async def resolution(self):
"""Coroutine that waits for the content to be saved to database.
| Implementation of does_batch in DeclarativeContent is wrong
**Describe the bug**
`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.
**Additional context**
This is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466
| 2022-04-20T01:56:08 |
||
pulp/pulpcore | 2,565 | pulp__pulpcore-2565 | [
"2557"
] | 0d9e9022fdc6a35b8418848a526435416e65c899 | diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py
--- a/pulpcore/plugin/stages/models.py
+++ b/pulpcore/plugin/stages/models.py
@@ -136,7 +136,7 @@ def does_batch(self):
"""Whether this content is being awaited on and must therefore not wait forever in batches.
When overwritten in subclasses, a `True` value must never be turned into `False`.
"""
- return not self._resolved and self._future is None
+ return self._resolved or self._future is None
async def resolution(self):
"""Coroutine that waits for the content to be saved to database.
| Implementation of does_batch in DeclarativeContent is wrong
**Describe the bug**
`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.
**Additional context**
This is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466
| 2022-04-20T01:56:25 |
||
pulp/pulpcore | 2,566 | pulp__pulpcore-2566 | [
"2557"
] | 97a3d5412d093e7b2bc8e35a465c659b1c72606d | diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py
--- a/pulpcore/plugin/stages/models.py
+++ b/pulpcore/plugin/stages/models.py
@@ -136,7 +136,7 @@ def does_batch(self):
"""Whether this content is being awaited on and must therefore not wait forever in batches.
When overwritten in subclasses, a `True` value must never be turned into `False`.
"""
- return not self._resolved and self._future is None
+ return self._resolved or self._future is None
async def resolution(self):
"""Coroutine that waits for the content to be saved to database.
| Implementation of does_batch in DeclarativeContent is wrong
**Describe the bug**
`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.
**Additional context**
This is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466
| 2022-04-20T01:57:04 |
||
pulp/pulpcore | 2,567 | pulp__pulpcore-2567 | [
"2557"
] | a420544bb7b7cbf4ac5ea69781593c88fe3cd91d | diff --git a/pulpcore/plugin/stages/models.py b/pulpcore/plugin/stages/models.py
--- a/pulpcore/plugin/stages/models.py
+++ b/pulpcore/plugin/stages/models.py
@@ -136,7 +136,7 @@ def does_batch(self):
"""Whether this content is being awaited on and must therefore not wait forever in batches.
When overwritten in subclasses, a `True` value must never be turned into `False`.
"""
- return not self._resolved and self._future is None
+ return self._resolved or self._future is None
async def resolution(self):
"""Coroutine that waits for the content to be saved to database.
| Implementation of does_batch in DeclarativeContent is wrong
**Describe the bug**
`does_batch` is supposed to decide, whether content can be waited on, or must be evaluated asap. It produces `False` unnecessarily often, leading to performance issues.
**Additional context**
This is suspected to at least one of the reasons for: https://github.com/pulp/pulp_deb/issues/466
| 2022-04-20T01:57:18 |
||
pulp/pulpcore | 2,568 | pulp__pulpcore-2568 | [
"2556"
] | 11cf32d395c7f897fc0cfc020cbdce626a9095e6 | diff --git a/pulpcore/plugin/constants.py b/pulpcore/plugin/constants.py
--- a/pulpcore/plugin/constants.py
+++ b/pulpcore/plugin/constants.py
@@ -6,16 +6,3 @@
TASK_CHOICES,
TASK_FINAL_STATES,
)
-
-
-@property
-def API_ROOT():
- from django.conf import settings
- from pulpcore.app.loggers import deprecation_logger
-
- deprecation_logger.warn(
- "The API_ROOT constant has been deprecated and turned into a setting. Please use "
- "`settings.V3_API_ROOT_NO_FRONT_SLASH` instead. This symbol will be deleted with pulpcore "
- "3.20."
- )
- return settings.V3_API_ROOT_NO_FRONT_SLASH
| Remove pulpcore.plugin.constants.API_ROOT
This constant is deprecated and is intended to be removed in 3.20.
| 2022-04-20T18:48:43 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.