repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
zostera/django-bootstrap4 | 163 | zostera__django-bootstrap4-163 | [
"113"
] | 7a7f3c25efa26e04055ce9fb4829be448f5a27a9 | diff --git a/src/bootstrap4/components.py b/src/bootstrap4/components.py
--- a/src/bootstrap4/components.py
+++ b/src/bootstrap4/components.py
@@ -21,6 +21,6 @@ def render_alert(content, alert_type=None, dismissible=True):
render_tag(
"div",
attrs={"class": " ".join(css_classes), "role": "alert"},
- content=button_placeholder + text_value(content),
+ content=mark_safe(button_placeholder) + text_value(content),
).replace(button_placeholder, button)
)
| diff --git a/tests/test_components.py b/tests/test_components.py
--- a/tests/test_components.py
+++ b/tests/test_components.py
@@ -1,10 +1,11 @@
from django.test import TestCase
+from django.utils.safestring import mark_safe
from bootstrap4.components import render_alert
class ComponentsTest(TestCase):
- def test_render_alert_with_type(self):
+ def test_render_alert_without_type(self):
alert = render_alert("content")
self.assertEqual(
alert,
@@ -13,3 +14,35 @@ def test_render_alert_with_type(self):
+ 'aria-label="close">'
+ "×</button>content</div>",
)
+
+ def test_render_alert_with_type(self):
+ alert = render_alert("content", alert_type="danger")
+ self.assertEqual(
+ alert,
+ '<div class="alert alert-danger alert-dismissible" role="alert">'
+ + '<button type="button" class="close" data-dismiss="alert" '
+ + 'aria-label="close">'
+ + "×</button>content</div>",
+ )
+
+ def test_render_alert_with_safe_content(self):
+ msg = mark_safe('This is <a href="https://example.com" class="alert-link">a safe link</a>!')
+ alert = render_alert(msg)
+ self.assertEqual(
+ alert,
+ '<div class="alert alert-info alert-dismissible" role="alert">'
+ + '<button type="button" class="close" data-dismiss="alert" '
+ + 'aria-label="close">'
+ + '×</button>This is <a href="https://example.com" class="alert-link">a safe link</a>!</div>',
+ )
+
+ def test_render_alert_with_unsafe_content(self):
+ msg = "This is <b>unsafe</b>!"
+ alert = render_alert(msg)
+ self.assertEqual(
+ alert,
+ '<div class="alert alert-info alert-dismissible" role="alert">'
+ + '<button type="button" class="close" data-dismiss="alert" '
+ + 'aria-label="close">'
+ + "×</button>This is <b>unsafe</b>!</div>",
+ )
| Putting a link in bootstrap_alert
How do I put a link in `bootstrap_alert`?
```
msg=mark_safe('Please visit our forum site <a href="talk.edgle.com" class="alert-link">Edgle Talk</a> as well!')
render_alert(msg)
'<div class="alert alert-info alert-dismissable"><button type="button" class="close" data-dismiss="alert" aria-hidden="true">×</button>Please visit our forum site <a href="talk.edgle.com" class="alert-link">Edgle Talk</a> as well!</div>'
```
As seen, html elements are escaped due to the following chain of commands:
bootstrap_alert -> render_alert -> render_tag ->format_html -> conditional_escape
| The problem is here `render_alert` line: 26:
`content=button_placeholder + text_value(content),`
str + SafeText => str
And it would be solved if '\__\__BUTTON\__\__' was marked safe (line 22). | 2019-08-09T10:03:15 |
zostera/django-bootstrap4 | 186 | zostera__django-bootstrap4-186 | [
"468"
] | 83c56c6dd1aacef95d50f597f6780bf27c12c0bb | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
author_email="[email protected]",
url="https://github.com/zostera/django-bootstrap4",
license="BSD-3-Clause",
- use_scm_version=True,
+ use_scm_version={"write_to": "src/bootstrap4/_version.py",},
setup_requires=["setuptools_scm"],
packages=find_packages(where="src"),
package_dir={"": "src"},
diff --git a/src/bootstrap4/__init__.py b/src/bootstrap4/__init__.py
--- a/src/bootstrap4/__init__.py
+++ b/src/bootstrap4/__init__.py
@@ -0,0 +1,10 @@
+try:
+ from _version import version
+except ImportError:
+ try:
+ from setuptools_scm import get_version
+
+ version = get_version()
+ except ImportError:
+ version = "???"
+__version__ = version
| Bump setuptools from 60.10.0 to 62.3.1
Bumps [setuptools](https://github.com/pypa/setuptools) from 60.10.0 to 62.3.1.
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/pypa/setuptools/blob/main/CHANGES.rst">setuptools's changelog</a>.</em></p>
<blockquote>
<h2>v62.3.1</h2>
<p>Misc
^^^^</p>
<ul>
<li><a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/3320">#3320</a>: Fixed typo which causes <code>namespace_packages</code> to raise an error instead of
warning.</li>
</ul>
<h2>v62.3.0</h2>
<p>Deprecations
^^^^^^^^^^^^</p>
<ul>
<li>
<p><a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/3262">#3262</a>: Formally added deprecation messages for <code>namespace_packages</code>.
The methodology that uses <code>pkg_resources</code> and <code>namespace_packages</code> for
creating namespaces was already discouraged by the :doc:<code>setuptools docs </userguide/package_discovery></code> and the
:doc:<code>Python Packaging User Guide <PyPUG:guides/packaging-namespace-packages></code>,
therefore this change just make the deprecation more official.
Users can consider migrating to native/implicit namespaces (as introduced in
:pep:<code>420</code>).</p>
</li>
<li>
<p><a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/3308">#3308</a>: Relying on <code>include_package_data</code> to ensure sub-packages are automatically
added to the build wheel distribution (as "data") is now considered a
deprecated practice.</p>
<p>This behaviour was controversial and caused inconsistencies (<a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/3260">#3260</a>).</p>
<p>Instead, projects are encouraged to properly configure <code>packages</code> or use
discovery tools. General information can be found in :doc:<code>userguide/package_discovery</code>.</p>
</li>
</ul>
<p>Changes
^^^^^^^</p>
<ul>
<li><a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/1806">#1806</a>: Allowed recursive globs (<code>**</code>) in <code>package_data</code>. -- by :user:<code>nullableVoidPtr</code></li>
<li><a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/3206">#3206</a>: Fixed behaviour when both <code>install_requires</code> (in <code>setup.py</code>) and
<code>dependencies</code> (in <code>pyproject.toml</code>) are specified.
The configuration in <code>pyproject.toml</code> will take precedence over <code>setup.py</code>
(in accordance with PEP 621). A warning was added to inform users.</li>
</ul>
<p>Documentation changes
^^^^^^^^^^^^^^^^^^^^^</p>
<ul>
<li><a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/3307">#3307</a>: Added introduction to references/keywords
Added deprecation tags to test kwargs
Moved userguide/keywords to deprecated section
Clarified in deprecated doc what keywords came from distutils and which were added or changed by setuptools</li>
</ul>
<p>Misc
^^^^</p>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/pypa/setuptools/commit/b4164438c8927b9f7593bd543475dd3010d7b3b1"><code>b416443</code></a> Bump version: 62.3.0 → 62.3.1</li>
<li><a href="https://github.com/pypa/setuptools/commit/287040fab8bda70752704ba40563d9ca4c1cb8d2"><code>287040f</code></a> Add news fragment</li>
<li><a href="https://github.com/pypa/setuptools/commit/93dd5091e82c9a093041df44260f49910c047abe"><code>93dd509</code></a> Remove comma</li>
<li><a href="https://github.com/pypa/setuptools/commit/1b7642be56c767bf064c2d0ab1bbd16f8ca3be38"><code>1b7642b</code></a> Bump version: 62.2.0 → 62.3.0</li>
<li><a href="https://github.com/pypa/setuptools/commit/73bc126cd05b25a24e2a3e7ad347a245e980b673"><code>73bc126</code></a> Add deprecation messages for <code>namespace_packages</code> (<a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/3262">#3262</a>)</li>
<li><a href="https://github.com/pypa/setuptools/commit/42d940f04e6d39e999ad184d4a6b5062957f4ae6"><code>42d940f</code></a> Update vendored <code>pyparsing</code> to fix problems with 3.11 (<a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/3276">#3276</a>)</li>
<li><a href="https://github.com/pypa/setuptools/commit/e66ee6206feb49fe4bdec417c84d601d6b4bbf72"><code>e66ee62</code></a> Warn about deprecation of behaviour that considers modules/packages as data w...</li>
<li><a href="https://github.com/pypa/setuptools/commit/aec2215be6711c850339484cd7f47d542a6c06a1"><code>aec2215</code></a> Remove residual dependencies from setup.py when dependencies are set in pypro...</li>
<li><a href="https://github.com/pypa/setuptools/commit/fd072dcf4dfcf5c737139c9f7546952ecd9f9767"><code>fd072dc</code></a> Allow recursive globs for package_data (<a href="https://github-redirect.dependabot.com/pypa/setuptools/issues/3309">#3309</a>)</li>
<li><a href="https://github.com/pypa/setuptools/commit/269f3acbf4e3a2f717fb37dc23ff48bf36b8bc2a"><code>269f3ac</code></a> Add deprecation notices to docs</li>
<li>Additional commits viewable in <a href="https://github.com/pypa/setuptools/compare/v60.10.0...v62.3.1">compare view</a></li>
</ul>
</details>
<br />
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-12-09T19:37:30 |
||
zostera/django-bootstrap4 | 191 | zostera__django-bootstrap4-191 | [
"188"
] | 225c7d3d666ad0e9112f0968e32a94c3ffce5e25 | diff --git a/src/bootstrap4/__init__.py b/src/bootstrap4/__init__.py
--- a/src/bootstrap4/__init__.py
+++ b/src/bootstrap4/__init__.py
@@ -1,5 +1,5 @@
try:
- from _version import version
+ from ._version import version
except ImportError:
try:
from setuptools_scm import get_version
| diff --git a/tests/test_version.py b/tests/test_version.py
new file mode 100644
--- /dev/null
+++ b/tests/test_version.py
@@ -0,0 +1,12 @@
+from django.test import TestCase
+
+
+class VersionTest(TestCase):
+ """Test presence of package version."""
+
+ def test_version(self):
+ import bootstrap4
+
+ version = bootstrap4.__version__
+ version_parts = version.split(".")
+ self.assertTrue(len(version_parts) >= 3)
| Building docs locally gives ImportError
The `make docs` command raises `ImportError`.
```
WARNING: autodoc: failed to import function 'templatetags.bootstrap4.bootstrap_form' from module 'bootstrap4'; the following exception was raised:
Traceback (most recent call last):
File "/Users/dylan/Projects/django-bootstrap4/src/bootstrap4/__init__.py", line 2, in <module>
from _version import version
ModuleNotFoundError: No module named '_version'
```
| 2019-12-12T12:12:08 |
|
zostera/django-bootstrap4 | 217 | zostera__django-bootstrap4-217 | [
"215"
] | e67f4766aacbbd6f5f0422be70b6b36babde98f0 | diff --git a/src/bootstrap4/renderers.py b/src/bootstrap4/renderers.py
--- a/src/bootstrap4/renderers.py
+++ b/src/bootstrap4/renderers.py
@@ -338,7 +338,10 @@ def list_to_class(self, html, klass):
if enclosing_div:
for label in enclosing_div.find_all("label"):
label.attrs["class"] = label.attrs.get("class", []) + ["form-check-label"]
- label.input.attrs["class"] = label.input.attrs.get("class", []) + ["form-check-input"]
+ try:
+ label.input.attrs["class"] = label.input.attrs.get("class", []) + ["form-check-input"]
+ except AttributeError:
+ pass
return str(soup)
def add_checkbox_label(self, html):
| 'NoneType' object has no attribute 'attrs' when upgrading to 2.0.0
I just tried upgrading my project to django-bootstrap 2.0.0, and encountered the following error when loading websites.
```
Traceback (most recent call last):
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/core/handlers/base.py", line 145, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/core/handlers/base.py", line 143, in _get_response
response = response.render()
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/response.py", line 105, in render
self.content = self.rendered_content
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/response.py", line 83, in rendered_content
return template.render(context, self._request)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/backends/django.py", line 61, in render
return self.template.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 171, in render
return self._render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/test/utils.py", line 95, in instrumented_test_render
return self.nodelist.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 936, in render
bit = node.render_annotated(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 903, in render_annotated
return self.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/loader_tags.py", line 150, in render
return compiled_parent._render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/test/utils.py", line 95, in instrumented_test_render
return self.nodelist.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 936, in render
bit = node.render_annotated(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 903, in render_annotated
return self.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/loader_tags.py", line 150, in render
return compiled_parent._render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/test/utils.py", line 95, in instrumented_test_render
return self.nodelist.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 936, in render
bit = node.render_annotated(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 903, in render_annotated
return self.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/loader_tags.py", line 62, in render
result = block.nodelist.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 936, in render
bit = node.render_annotated(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 903, in render_annotated
return self.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/loader_tags.py", line 62, in render
result = block.nodelist.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 936, in render
bit = node.render_annotated(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/base.py", line 903, in render_annotated
return self.render(context)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/django/template/library.py", line 192, in render
output = self.func(*resolved_args, **resolved_kwargs)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/bootstrap4/templatetags/bootstrap4.py", line 627, in bootstrap_field
return render_field(*args, **kwargs)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/bootstrap4/forms.py", line 39, in render_field
return renderer_cls(field, **kwargs).render()
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/bootstrap4/renderers.py", line 84, in render
return mark_safe(self._render())
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/bootstrap4/renderers.py", line 515, in _render
html = self.post_widget_render(html)
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/bootstrap4/renderers.py", line 363, in post_widget_render
html = self.list_to_class(html, "checkbox")
File "/home/rixx/.local/share/virtualenvs/pretalx/lib/python3.8/site-packages/bootstrap4/renderers.py", line 341, in list_to_class
label.input.attrs["class"] = label.input.attrs.get("class", []) + ["form-check-input"]
AttributeError: 'NoneType' object has no attribute 'attrs'
```
This is a direct regression to #181, where django-bootstrap4 assumes that all inputs have to be inside labels (which is not idiomatic or at least not required in HTML).
| Thanks. Apparently there is no test on this regression. Will look into that.
The major version dump in 2 is because of the drop n support fro Python and Django versions. You should be fine with `django-bootstrap4<2`. | 2020-06-02T14:44:19 |
|
zostera/django-bootstrap4 | 219 | zostera__django-bootstrap4-219 | [
"220"
] | fe054a046548f73ec013a232ebbe066c75411182 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -19,7 +19,8 @@
# The short X.Y version.
version = ".".join(release.split(".")[:2])
-extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
+extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "m2r"]
+source_suffix = [".rst", ".md"]
pygments_style = "sphinx"
htmlhelp_basename = f"{PROJECT_NAME}-doc"
| Use Markdown
The README, HISTORY, CONTRIBUTING and AUTHORS file are written in RST. Markdown is easier to read and easier to write, and usually the standard for files like these. Let's use Markdown for these files.
Also, rename HISTORY to CHANGELOG, this is a more recognizable name.
| 2020-06-05T09:08:25 |
||
prowler-cloud/prowler | 2,272 | prowler-cloud__prowler-2272 | [
"2271"
] | 8d577b872fd26aac2a3e3f3d28dd5f05105e5453 | diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled.py
@@ -13,11 +13,17 @@ def execute(self):
report.resource_id = log_group.name
report.resource_arn = log_group.arn
report.resource_tags = log_group.tags
- if log_group.retention_days < specific_retention_days:
+ if (
+ log_group.never_expire is False
+ and log_group.retention_days < specific_retention_days
+ ):
report.status = "FAIL"
report.status_extended = f"Log Group {log_group.name} has less than {specific_retention_days} days retention period ({log_group.retention_days} days)."
else:
report.status = "PASS"
- report.status_extended = f"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it has {log_group.retention_days} days."
+ if log_group.never_expire is True:
+ report.status_extended = f"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it never expires."
+ else:
+ report.status_extended = f"Log Group {log_group.name} comply with {specific_retention_days} days retention period since it has {log_group.retention_days} days."
findings.append(report)
return findings
diff --git a/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py b/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py
--- a/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py
+++ b/prowler/providers/aws/services/cloudwatch/cloudwatch_service.py
@@ -152,17 +152,18 @@ def __describe_log_groups__(self, regional_client):
if not self.audit_resources or (
is_resource_filtered(log_group["arn"], self.audit_resources)
):
- kms = None
- retention_days = 0
- if "kmsKeyId" in log_group:
- kms = log_group["kmsKeyId"]
- if "retentionInDays" in log_group:
- retention_days = log_group["retentionInDays"]
+ never_expire = False
+ kms = log_group.get("kmsKeyId")
+ retention_days = log_group.get("retentionInDays")
+ if not retention_days:
+ never_expire = True
+ retention_days = 9999
self.log_groups.append(
LogGroup(
arn=log_group["arn"],
name=log_group["logGroupName"],
retention_days=retention_days,
+ never_expire=never_expire,
kms_id=kms,
region=regional_client.region,
)
@@ -240,6 +241,7 @@ class LogGroup(BaseModel):
arn: str
name: str
retention_days: int
+ never_expire: bool
kms_id: Optional[str]
region: str
log_streams: dict[
| diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled_test.py
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_log_group_retention_policy_specific_days_enabled/cloudwatch_log_group_retention_policy_specific_days_enabled_test.py
@@ -66,7 +66,7 @@ def test_cloudwatch_no_log_groups(self):
assert len(result) == 0
@mock_logs
- def test_cloudwatch_log_group_without_retention_days(self):
+ def test_cloudwatch_log_group_without_retention_days_never_expires(self):
# Generate Logs Client
logs_client = client("logs", region_name=AWS_REGION)
# Request Logs group
@@ -103,12 +103,17 @@ def test_cloudwatch_log_group_without_retention_days(self):
result = check.execute()
assert len(result) == 1
- assert result[0].status == "FAIL"
+ assert result[0].status == "PASS"
assert (
result[0].status_extended
- == "Log Group test has less than 365 days retention period (0 days)."
+ == "Log Group test comply with 365 days retention period since it never expires."
)
assert result[0].resource_id == "test"
+ assert (
+ result[0].resource_arn
+ == f"arn:aws:logs:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:log-group:test"
+ )
+ assert result[0].region == AWS_REGION
@mock_logs
def test_cloudwatch_log_group_with_compliant_retention_days(self):
@@ -155,6 +160,11 @@ def test_cloudwatch_log_group_with_compliant_retention_days(self):
== "Log Group test comply with 365 days retention period since it has 400 days."
)
assert result[0].resource_id == "test"
+ assert (
+ result[0].resource_arn
+ == f"arn:aws:logs:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:log-group:test"
+ )
+ assert result[0].region == AWS_REGION
@mock_logs
def test_cloudwatch_log_group_with_no_compliant_retention_days(self):
@@ -201,3 +211,8 @@ def test_cloudwatch_log_group_with_no_compliant_retention_days(self):
== "Log Group test has less than 365 days retention period (7 days)."
)
assert result[0].resource_id == "test"
+ assert (
+ result[0].resource_arn
+ == f"arn:aws:logs:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:log-group:test"
+ )
+ assert result[0].region == AWS_REGION
diff --git a/tests/providers/aws/services/cloudwatch/cloudwatch_service_test.py b/tests/providers/aws/services/cloudwatch/cloudwatch_service_test.py
--- a/tests/providers/aws/services/cloudwatch/cloudwatch_service_test.py
+++ b/tests/providers/aws/services/cloudwatch/cloudwatch_service_test.py
@@ -195,6 +195,34 @@ def test__describe_log_groups__(self):
assert logs.log_groups[0].name == "/log-group/test"
assert logs.log_groups[0].retention_days == 400
assert logs.log_groups[0].kms_id == "test_kms_id"
+ assert not logs.log_groups[0].never_expire
+ assert logs.log_groups[0].region == AWS_REGION
+ assert logs.log_groups[0].tags == [
+ {"tag_key_1": "tag_value_1", "tag_key_2": "tag_value_2"}
+ ]
+
+ @mock_logs
+ def test__describe_log_groups__never_expire(self):
+ # Logs client for this test class
+ logs_client = client("logs", region_name=AWS_REGION)
+ logs_client.create_log_group(
+ logGroupName="/log-group/test",
+ kmsKeyId="test_kms_id",
+ tags={"tag_key_1": "tag_value_1", "tag_key_2": "tag_value_2"},
+ )
+
+ audit_info = self.set_mocked_audit_info()
+ logs = Logs(audit_info)
+ assert len(logs.log_groups) == 1
+ assert (
+ logs.log_groups[0].arn
+ == f"arn:aws:logs:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:log-group:/log-group/test"
+ )
+ assert logs.log_groups[0].name == "/log-group/test"
+ assert logs.log_groups[0].never_expire
+ # Since it never expires we don't use the retention_days
+ assert logs.log_groups[0].retention_days == 9999
+ assert logs.log_groups[0].kms_id == "test_kms_id"
assert logs.log_groups[0].region == AWS_REGION
assert logs.log_groups[0].tags == [
{"tag_key_1": "tag_value_1", "tag_key_2": "tag_value_2"}
| [Bug]: cloudwatch_log_group_retention_policy_specific_days_enabled not measuring "never expire"
### Steps to Reproduce
When having log groups in cloudwatch that are set to "never expire", the mentioned check sees it as "0 days"
### Expected behavior
Check should be able to detect the "never expire" log group retention setting
### Actual Result with Screenshots or Logs


### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
Workstation
### OS used
WSL2 on Windows 11
### Prowler version
Prowler 3.4.0 (it is the latest version, yay!)
### Pip version
pip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)
### Context
_No response_
| Hi @blaargh, we are going to review this and we'll get back to you.
Thanks for using Prowler! | 2023-04-25T08:18:26 |
prowler-cloud/prowler | 2,282 | prowler-cloud__prowler-2282 | [
"2278"
] | 7adcf5ca462bfeb8feafe8850fcce3f5e1fd88ae | diff --git a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py
--- a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py
+++ b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py
@@ -12,7 +12,7 @@ def execute(self):
report.resource_arn = topic.arn
report.resource_tags = topic.tags
report.status = "PASS"
- report.status_extended = f"SNS topic {topic.name} without public access"
+ report.status_extended = f"SNS topic {topic.name} is not publicly accesible"
if topic.policy:
for statement in topic.policy["Statement"]:
# Only check allow statements
@@ -31,11 +31,11 @@ def execute(self):
if "Condition" not in statement:
report.status = "FAIL"
report.status_extended = (
- f"SNS topic {topic.name} policy with public access"
+ f"SNS topic {topic.name} is publicly accesible"
)
else:
- report.status = "FAIL"
- report.status_extended = f"SNS topic {topic.name} policy with public access but has a Condition"
+ report.status = "PASS"
+ report.status_extended = f"SNS topic {topic.name} is publicly accesible but has a Condition that could filter it"
findings.append(report)
| diff --git a/tests/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible_test.py b/tests/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible_test.py
--- a/tests/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible_test.py
+++ b/tests/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible_test.py
@@ -1,4 +1,3 @@
-from re import search
from unittest import mock
from uuid import uuid4
@@ -61,7 +60,7 @@ def test_no_topics(self):
result = check.execute()
assert len(result) == 0
- def test_topics_not_public(self):
+ def test_topic_not_public(self):
sns_client = mock.MagicMock
sns_client.topics = []
sns_client.topics.append(
@@ -84,11 +83,16 @@ def test_topics_not_public(self):
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
- assert search("without public access", result[0].status_extended)
+ assert (
+ result[0].status_extended
+ == f"SNS topic {topic_name} is not publicly accesible"
+ )
assert result[0].resource_id == topic_name
assert result[0].resource_arn == topic_arn
+ assert result[0].region == AWS_REGION
+ assert result[0].resource_tags == []
- def test_topics_no_policy(self):
+ def test_topic_no_policy(self):
sns_client = mock.MagicMock
sns_client.topics = []
sns_client.topics.append(
@@ -106,11 +110,16 @@ def test_topics_no_policy(self):
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
- assert search("without public access", result[0].status_extended)
+ assert (
+ result[0].status_extended
+ == f"SNS topic {topic_name} is not publicly accesible"
+ )
assert result[0].resource_id == topic_name
assert result[0].resource_arn == topic_arn
+ assert result[0].region == AWS_REGION
+ assert result[0].resource_tags == []
- def test_topics_public_with_condition(self):
+ def test_topic_public_with_condition(self):
sns_client = mock.MagicMock
sns_client.topics = []
sns_client.topics.append(
@@ -132,12 +141,17 @@ def test_topics_public_with_condition(self):
check = sns_topics_not_publicly_accessible()
result = check.execute()
assert len(result) == 1
- assert result[0].status == "FAIL"
- assert search("but has a Condition", result[0].status_extended)
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"SNS topic {topic_name} is publicly accesible but has a Condition that could filter it"
+ )
assert result[0].resource_id == topic_name
assert result[0].resource_arn == topic_arn
+ assert result[0].region == AWS_REGION
+ assert result[0].resource_tags == []
- def test_topics_no_key(self):
+ def test_topic_public(self):
sns_client = mock.MagicMock
sns_client.topics = []
sns_client.topics.append(
@@ -160,6 +174,11 @@ def test_topics_no_key(self):
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
- assert search("with public access", result[0].status_extended)
+ assert (
+ result[0].status_extended
+ == f"SNS topic {topic_name} is publicly accesible"
+ )
assert result[0].resource_id == topic_name
assert result[0].resource_arn == topic_arn
+ assert result[0].region == AWS_REGION
+ assert result[0].resource_tags == []
| [Bug]: False positives on some checks?
### Steps to Reproduce
Hi,
it looks like some checks produce false positives (they are tagged as warning because I've allowlisted them):
```
Check ID: ec2_ebs_snapshots_encrypted - ec2 [medium]
WARNING eu-central-1: EBS Snapshot snap-112 is unencrypted.
WARNING eu-central-1: EBS Snapshot snap-113 is encrypted. <<<<
```
```
Check ID: iam_policy_allows_privilege_escalation - iam [high]
WARNING eu-central-1: Custom Policy arn:aws:iam::112:policy/aws_admin_access does not allow privilege escalation
```
Are you maybe simply overring the status (also "PASS") by WARNING in case of an allowlist match?
Another type of issue but more like a question:
_sns_topics_not_publicly_accessible_ triggers with
` WARNING eu-central-1: SNS topic cloudwatch-pagerduty-alarms-ec2-state-changes policy with public access but has a Condition`
which is (from the User's perspective) a false positive as well because we have a condition, which prowler cannot evaluate?
### Expected behavior
none
### Actual Result with Screenshots or Logs
none
### How did you install Prowler?
Cloning the repository from github.com (git clone)
### Environment Resource
locally
### OS used
Linux
### Prowler version
3.4.1
### Pip version
none
### Context
_No response_
| Hi @HarryBo112, I'm going to answer each question down below:
> it looks like some checks produce false positives (they are tagged as warning because I've allowlisted them):
>
> ```
> Check ID: ec2_ebs_snapshots_encrypted - ec2 [medium]
> WARNING eu-central-1: EBS Snapshot snap-112 is unencrypted.
> WARNING eu-central-1: EBS Snapshot snap-113 is encrypted. <<<<
> ```
>
> ```
> Check ID: iam_policy_allows_privilege_escalation - iam [high]
> WARNING eu-central-1: Custom Policy arn:aws:iam::112:policy/aws_admin_access does not allow privilege escalation
> ```
>
> Are you maybe simply overring the status (also "PASS") by WARNING in case of an allowlist match?
That is right, the behaviour of the allowlist is just to add a `WARNING` instead of `INFO`, `PASS` or `FAIL` to any output format. Please, refer to the documentation here https://docs.prowler.cloud/en/latest/tutorials/allowlist/ to get more details and how the allowlist works.
>
> Another type of issue but more like a question:
>
> _sns_topics_not_publicly_accessible_ triggers with ` WARNING eu-central-1: SNS topic cloudwatch-pagerduty-alarms-ec2-state-changes policy with public access but has a Condition` which is (from the User's perspective) a false positive as well because we have a condition, which prowler cannot evaluate?
>
We're going to review this and we'll get back to you soon.
Thanks for using Prowler!
| 2023-04-26T10:13:42 |
prowler-cloud/prowler | 2,291 | prowler-cloud__prowler-2291 | [
"2276"
] | 9afe7408cdf152dc0f4aa4b0bd83ccc080085f2c | diff --git a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
--- a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
@@ -9,11 +9,13 @@ def execute(self):
report.status = "FAIL"
report.status_extended = "No Backup Plan Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_plans:
report.status = "PASS"
- report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
+ report.status_extended = (
+ f"At least one backup plan exists: {backup_client.backup_plans[0].name}"
+ )
report.resource_arn = backup_client.backup_plans[0].arn
report.resource_id = backup_client.backup_plans[0].name
report.region = backup_client.backup_plans[0].region
diff --git a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
--- a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
@@ -5,18 +5,20 @@
class backup_reportplans_exist(Check):
def execute(self):
findings = []
- report = Check_Report_AWS(self.metadata())
- report.status = "FAIL"
- report.status_extended = "No Backup Report Plan Exist"
- report.resource_arn = ""
- report.resource_id = "No Backups"
- report.region = backup_client.region
- if backup_client.backup_report_plans:
- report.status = "PASS"
- report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
- report.resource_arn = backup_client.backup_report_plans[0].arn
- report.resource_id = backup_client.backup_report_plans[0].name
- report.region = backup_client.backup_report_plans[0].region
+ # We only check report plans if backup plans exist, reducing noise
+ if backup_client.backup_plans:
+ report = Check_Report_AWS(self.metadata())
+ report.status = "FAIL"
+ report.status_extended = "No Backup Report Plan Exist"
+ report.resource_arn = ""
+ report.resource_id = "Backups"
+ report.region = backup_client.region
+ if backup_client.backup_report_plans:
+ report.status = "PASS"
+ report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
+ report.resource_arn = backup_client.backup_report_plans[0].arn
+ report.resource_id = backup_client.backup_report_plans[0].name
+ report.region = backup_client.backup_report_plans[0].region
- findings.append(report)
+ findings.append(report)
return findings
diff --git a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
--- a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
+++ b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
@@ -9,7 +9,7 @@ def execute(self):
report.status = "FAIL"
report.status_extended = "No Backup Vault Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_vaults:
report.status = "PASS"
| diff --git a/tests/providers/aws/services/backup/backup_plans_exist/backup_plans_exist_test.py b/tests/providers/aws/services/backup/backup_plans_exist/backup_plans_exist_test.py
--- a/tests/providers/aws/services/backup/backup_plans_exist/backup_plans_exist_test.py
+++ b/tests/providers/aws/services/backup/backup_plans_exist/backup_plans_exist_test.py
@@ -26,7 +26,7 @@ def test_no_backup_plans(self):
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].status_extended == "No Backup Plan Exist"
- assert result[0].resource_id == "No Backups"
+ assert result[0].resource_id == "Backups"
assert result[0].resource_arn == ""
assert result[0].region == AWS_REGION
diff --git a/tests/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist_test.py b/tests/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist_test.py
--- a/tests/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist_test.py
+++ b/tests/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist_test.py
@@ -1,15 +1,47 @@
from datetime import datetime
from unittest import mock
-from prowler.providers.aws.services.backup.backup_service import BackupReportPlan
+from prowler.providers.aws.services.backup.backup_service import (
+ BackupPlan,
+ BackupReportPlan,
+)
AWS_REGION = "eu-west-1"
class Test_backup_reportplans_exist:
+ def test_no_backup_plans(self):
+ backup_client = mock.MagicMock
+ backup_client.region = AWS_REGION
+ backup_client.backup_plans = []
+ with mock.patch(
+ "prowler.providers.aws.services.backup.backup_service.Backup",
+ new=backup_client,
+ ):
+ # Test Check
+ from prowler.providers.aws.services.backup.backup_reportplans_exist.backup_reportplans_exist import (
+ backup_reportplans_exist,
+ )
+
+ check = backup_reportplans_exist()
+ result = check.execute()
+
+ assert len(result) == 0
+
def test_no_backup_report_plans(self):
backup_client = mock.MagicMock
backup_client.region = AWS_REGION
+ backup_client.backup_plans = [
+ BackupPlan(
+ arn="ARN",
+ id="MyBackupPlan",
+ region=AWS_REGION,
+ name="MyBackupPlan",
+ version_id="version_id",
+ last_execution_date=datetime(2015, 1, 1),
+ advanced_settings=[],
+ )
+ ]
backup_client.backup_report_plans = []
with mock.patch(
"prowler.providers.aws.services.backup.backup_service.Backup",
@@ -26,13 +58,24 @@ def test_no_backup_report_plans(self):
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].status_extended == "No Backup Report Plan Exist"
- assert result[0].resource_id == "No Backups"
+ assert result[0].resource_id == "Backups"
assert result[0].resource_arn == ""
assert result[0].region == AWS_REGION
def test_one_backup_report_plan(self):
backup_client = mock.MagicMock
backup_client.region = AWS_REGION
+ backup_client.backup_plans = [
+ BackupPlan(
+ arn="ARN",
+ id="MyBackupPlan",
+ region=AWS_REGION,
+ name="MyBackupPlan",
+ version_id="version_id",
+ last_execution_date=datetime(2015, 1, 1),
+ advanced_settings=[],
+ )
+ ]
backup_client.backup_report_plans = [
BackupReportPlan(
arn="ARN",
diff --git a/tests/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist_test.py b/tests/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist_test.py
--- a/tests/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist_test.py
+++ b/tests/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist_test.py
@@ -25,7 +25,7 @@ def test_no_backup_vaults(self):
assert len(result) == 1
assert result[0].status == "FAIL"
assert result[0].status_extended == "No Backup Vault Exist"
- assert result[0].resource_id == "No Backups"
+ assert result[0].resource_id == "Backups"
assert result[0].resource_arn == ""
assert result[0].region == AWS_REGION
| [Bug]: backup_plans_exist and backup_reportplans_exist trigger in regions where not backups exist
### Steps to Reproduce
The mentioned checks are triggered even if no backups are present or configured.
### Expected behavior
When the check can't find a resource ID (it actually says "No Backups"), the check shouldn't trigger
### Actual Result with Screenshots or Logs

### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
Workstation
### OS used
WSL2 under Windows 11
### Prowler version
Prowler 3.4.0 (it is the latest version, yay!)
### Pip version
pip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)
### Context
_No response_
| Hi @blaargh, we are checking that. We will get back to you when we finish our analysis.
Thanks for using Prowler 🚀 | 2023-04-28T11:18:29 |
prowler-cloud/prowler | 2,430 | prowler-cloud__prowler-2430 | [
"2425"
] | 298373742eb2a19fbcefbabf0ff2cdfcd04fa89e | diff --git a/prowler/providers/aws/services/cloudfront/cloudfront_service.py b/prowler/providers/aws/services/cloudfront/cloudfront_service.py
--- a/prowler/providers/aws/services/cloudfront/cloudfront_service.py
+++ b/prowler/providers/aws/services/cloudfront/cloudfront_service.py
@@ -1,4 +1,3 @@
-from dataclasses import dataclass
from enum import Enum
from typing import Optional
@@ -140,9 +139,8 @@ class GeoRestrictionType(Enum):
whitelist = "whitelist"
-@dataclass
-class DefaultCacheConfigBehaviour:
- realtime_log_config_arn: str
+class DefaultCacheConfigBehaviour(BaseModel):
+ realtime_log_config_arn: Optional[str]
viewer_protocol_policy: ViewerProtocolPolicy
field_level_encryption_id: str
| diff --git a/tests/providers/aws/services/cloudfront/cloudfront_service_test.py b/tests/providers/aws/services/cloudfront/cloudfront_service_test.py
--- a/tests/providers/aws/services/cloudfront/cloudfront_service_test.py
+++ b/tests/providers/aws/services/cloudfront/cloudfront_service_test.py
@@ -6,7 +6,10 @@
from moto.core import DEFAULT_ACCOUNT_ID
from prowler.providers.aws.lib.audit_info.models import AWS_Audit_Info
-from prowler.providers.aws.services.cloudfront.cloudfront_service import CloudFront
+from prowler.providers.aws.services.cloudfront.cloudfront_service import (
+ CloudFront,
+ ViewerProtocolPolicy,
+)
# Mock Test Region
AWS_REGION = "eu-west-1"
@@ -247,7 +250,7 @@ def test__list_distributions__complete(self):
cloudfront.distributions[
cloudfront_distribution_id
].default_cache_config.viewer_protocol_policy
- == "https-only"
+ == ViewerProtocolPolicy.https_only
)
assert (
cloudfront.distributions[
| [Bug]: cloudfront_distributions_https_enabled false positive
### Steps to Reproduce
Prowler reports cloudfront_distributions_https_enabled as failed when the cloudfront distribution has "Redirect HTTP to HTTPS" set in viewer protocol policy on behavior.
### Expected behavior
For the example attached, it should "PASS".
### Actual Result with Screenshots or Logs
```
{
"ETag": "XXXXXXXX",
"DistributionConfig": {
"CallerReference": "1561029664879",
"Aliases": {
"Quantity": 1,
"Items": [
xxxxxxxxx"
]
},
"DefaultRootObject": "",
"Origins": {
"Quantity": 1,
"Items": [
{
"Id": "xxxxxxxx",
"DomainName": "xxxxxxxx",
"OriginPath": "",
"CustomHeaders": {
"Quantity": 0
},
"S3OriginConfig": {
"OriginAccessIdentity": ""
},
"ConnectionAttempts": 3,
"ConnectionTimeout": 10,
"OriginShield": {
"Enabled": false
},
"OriginAccessControlId": ""
}
]
},
"OriginGroups": {
"Quantity": 0
},
"DefaultCacheBehavior": {
"TargetOriginId": "xxxxxxxx",
"TrustedSigners": {
"Enabled": false,
"Quantity": 0
},
"TrustedKeyGroups": {
"Enabled": false,
"Quantity": 0
},
"ViewerProtocolPolicy": "redirect-to-https",
"AllowedMethods": {
"Quantity": 2,
"Items": [
"HEAD",
"GET"
],
"CachedMethods": {
"Quantity": 2,
"Items": [
"HEAD",
"GET"
]
}
},
"SmoothStreaming": false,
"Compress": true,
"LambdaFunctionAssociations": {
"Quantity": 0
},
"FunctionAssociations": {
"Quantity": 0
},
"FieldLevelEncryptionId": "",
"ResponseHeadersPolicyId": "4dde66c4-bea6-48eb-9d5c-520b29617292",
"ForwardedValues": {
"QueryString": true,
"Cookies": {
"Forward": "none"
},
"Headers": {
"Quantity": 2,
"Items": [
"Origin",
"Referer"
]
},
"QueryStringCacheKeys": {
"Quantity": 0
}
},
"MinTTL": 0,
"DefaultTTL": 86400,
"MaxTTL": 31536000
},
"CacheBehaviors": {
"Quantity": 0
},
"CustomErrorResponses": {
"Quantity": 0
},
"Comment": "xxxxxxx",
"Logging": {
"Enabled": true,
"IncludeCookies": false,
"Bucket": "xxxxxxx",
"Prefix": "xxxxxxx"
},
"PriceClass": "PriceClass_100",
"Enabled": true,
"ViewerCertificate": {
"CloudFrontDefaultCertificate": false,
"ACMCertificateArn": "xxxxxxxx",
"SSLSupportMethod": "sni-only",
"MinimumProtocolVersion": "TLSv1.2_2021",
"Certificate": "xxxxxxxxxxxxx",
"CertificateSource": "acm"
},
"Restrictions": {
"GeoRestriction": {
"RestrictionType": "none",
"Quantity": 0
}
},
"WebACLId": "",
"HttpVersion": "http2and3",
"IsIPV6Enabled": true,
"ContinuousDeploymentPolicyId": "",
"Staging": false
}
}
```
### How did you install Prowler?
Docker (docker pull toniblyx/prowler)
### Environment Resource
7. Workstation
### OS used
5. Docker
### Prowler version
3.5.3
### Pip version
From official docker image
### Context
_No response_
| 2023-06-01T15:57:53 |
|
prowler-cloud/prowler | 2,585 | prowler-cloud__prowler-2585 | [
"2584",
"2584"
] | f33b96861c22eefe79e78fdbfd6229a17dba1378 | diff --git a/prowler/providers/aws/lib/allowlist/allowlist.py b/prowler/providers/aws/lib/allowlist/allowlist.py
--- a/prowler/providers/aws/lib/allowlist/allowlist.py
+++ b/prowler/providers/aws/lib/allowlist/allowlist.py
@@ -126,8 +126,8 @@ def is_allowlisted(allowlist, audited_account, check, region, resource, tags):
# want to merge allowlisted checks from * to the other accounts check list
if "*" in allowlist["Accounts"]:
checks_multi_account = allowlist["Accounts"]["*"]["Checks"]
+ allowlisted_checks.update(checks_multi_account)
# Test if it is allowlisted
- allowlisted_checks.update(checks_multi_account)
if is_allowlisted_in_check(
allowlisted_checks,
audited_account,
| diff --git a/tests/providers/aws/lib/allowlist/allowlist_test.py b/tests/providers/aws/lib/allowlist/allowlist_test.py
--- a/tests/providers/aws/lib/allowlist/allowlist_test.py
+++ b/tests/providers/aws/lib/allowlist/allowlist_test.py
@@ -305,6 +305,30 @@ def test_is_allowlisted_all_and_single_account(self):
)
)
+ def test_is_allowlisted_single_account(self):
+ allowlist = {
+ "Accounts": {
+ AWS_ACCOUNT_NUMBER: {
+ "Checks": {
+ "check_test": {
+ "Regions": [AWS_REGION],
+ "Resources": ["prowler"],
+ }
+ }
+ }
+ }
+ }
+
+ assert is_allowlisted(
+ allowlist, AWS_ACCOUNT_NUMBER, "check_test", AWS_REGION, "prowler", ""
+ )
+
+ assert not (
+ is_allowlisted(
+ allowlist, AWS_ACCOUNT_NUMBER, "check_test", "us-east-2", "test", ""
+ )
+ )
+
def test_is_allowlisted_in_region(self):
# Allowlist example
allowlisted_regions = [AWS_REGION, "eu-west-1"]
| [Bug]: Unbound local variable `checks_multi_account`
### Steps to Reproduce
1. What command are you running:
`prowler aws -M html json -f us-west-2 -w allowlist.yaml --ignore-exit-code-3`
2. Cloud provider you are launching: AWS
3. Environment you have: single account
4. See error
### Expected behavior
Prowler analyzes the account and successfully returns a report.
### Actual Result with Screenshots or Logs
```
_ __ _ __ _____ _| | ___ _ __
| '_ \| '__/ _ \ \ /\ / / |/ _ \ '__|
| |_) | | | (_) \ V V /| | __/ |
| .__/|_| \___/ \_/\_/ |_|\___|_|v3.7.1
|_| the handy cloud security tool
Date: 2023-07-13 10:33:15
This report is being generated using credentials below:
AWS-CLI Profile: [default] AWS Filter Region: [us-west-2]
AWS Account: [************] UserId: [**************]
Caller Identity ARN: [arn:aws:sts::************:assumed-role/AWSReservedSSO_AdministratorAccess_*******/****@****.***]
Executing 284 checks, please wait...
2023-07-13 10:33:17,306 [File: allowlist.py:144] [Module: allowlist] CRITICAL: UnboundLocalError -- cannot access local variable 'checks_multi_account' where it is not associated with a value[130]
-> Scanning accessanalyzer service |⚠︎ | (!) 0/284 [0%] in 0.2s
```
### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
We are seeing the error on multiple environments:
- CI/CD build node
- Local macOS workstation
### OS used
1. RHEL
2. macOS 13.4
### Prowler version
3.7.1
### Pip version
23.0.1
### Context
Prowler performs as expected in version `3.7.0`, and fails on `3.7.1`
We are using an `allowlist.yaml` file, with our account number specified instead of using a `*` for the account.
[Bug]: Unbound local variable `checks_multi_account`
### Steps to Reproduce
1. What command are you running:
`prowler aws -M html json -f us-west-2 -w allowlist.yaml --ignore-exit-code-3`
2. Cloud provider you are launching: AWS
3. Environment you have: single account
4. See error
### Expected behavior
Prowler analyzes the account and successfully returns a report.
### Actual Result with Screenshots or Logs
```
_ __ _ __ _____ _| | ___ _ __
| '_ \| '__/ _ \ \ /\ / / |/ _ \ '__|
| |_) | | | (_) \ V V /| | __/ |
| .__/|_| \___/ \_/\_/ |_|\___|_|v3.7.1
|_| the handy cloud security tool
Date: 2023-07-13 10:33:15
This report is being generated using credentials below:
AWS-CLI Profile: [default] AWS Filter Region: [us-west-2]
AWS Account: [************] UserId: [**************]
Caller Identity ARN: [arn:aws:sts::************:assumed-role/AWSReservedSSO_AdministratorAccess_*******/****@****.***]
Executing 284 checks, please wait...
2023-07-13 10:33:17,306 [File: allowlist.py:144] [Module: allowlist] CRITICAL: UnboundLocalError -- cannot access local variable 'checks_multi_account' where it is not associated with a value[130]
-> Scanning accessanalyzer service |⚠︎ | (!) 0/284 [0%] in 0.2s
```
### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
We are seeing the error on multiple environments:
- CI/CD build node
- Local macOS workstation
### OS used
1. RHEL
2. macOS 13.4
### Prowler version
3.7.1
### Pip version
23.0.1
### Context
Prowler performs as expected in version `3.7.0`, and fails on `3.7.1`
We are using an `allowlist.yaml` file, with our account number specified instead of using a `*` for the account.
| 2023-07-13T18:10:59 |
|
prowler-cloud/prowler | 2,611 | prowler-cloud__prowler-2611 | [
"2605"
] | 47a82560ead4b852e8b9c829b8375585d6c7910e | diff --git a/prowler/providers/aws/lib/policy_condition_parser/policy_condition_parser.py b/prowler/providers/aws/lib/policy_condition_parser/policy_condition_parser.py
--- a/prowler/providers/aws/lib/policy_condition_parser/policy_condition_parser.py
+++ b/prowler/providers/aws/lib/policy_condition_parser/policy_condition_parser.py
@@ -1,39 +1,45 @@
+# lista de cuentas y te devuelva las válidas
def is_account_only_allowed_in_condition(
condition_statement: dict, source_account: str
):
is_condition_valid = False
valid_condition_options = {
- "StringEquals": "aws:SourceAccount",
- "ArnLike": "aws:SourceArn",
- "ArnEquals": "aws:SourceArn",
+ "StringEquals": [
+ "aws:SourceAccount",
+ "s3:ResourceAccount",
+ "aws:PrincipalAccount",
+ ],
+ "StringLike": ["aws:SourceArn", "aws:PrincipalArn"],
+ "ArnLike": ["aws:SourceArn", "aws:PrincipalArn"],
+ "ArnEquals": ["aws:SourceArn", "aws:PrincipalArn"],
}
+
for condition_operator, condition_operator_key in valid_condition_options.items():
if condition_operator in condition_statement:
- if condition_operator_key in condition_statement[condition_operator]:
- # values are a list
- if isinstance(
- condition_statement[condition_operator][condition_operator_key],
- list,
- ):
- # if there is an arn/account without the source account -> we do not consider it safe
- # here by default we assume is true and look for false entries
- is_condition_valid = True
- for item in condition_statement[condition_operator][
- condition_operator_key
- ]:
- if source_account not in item:
- is_condition_valid = False
- break
- # value is a string
- elif isinstance(
- condition_statement[condition_operator][condition_operator_key], str
- ):
- if (
- source_account
- in condition_statement[condition_operator][
- condition_operator_key
- ]
+ for value in condition_operator_key:
+ if value in condition_statement[condition_operator]:
+ # values are a list
+ if isinstance(
+ condition_statement[condition_operator][value],
+ list,
):
+ # if there is an arn/account without the source account -> we do not consider it safe
+ # here by default we assume is true and look for false entries
is_condition_valid = True
+ for item in condition_statement[condition_operator][value]:
+ if source_account not in item:
+ is_condition_valid = False
+ break
+
+ # value is a string
+ elif isinstance(
+ condition_statement[condition_operator][value],
+ str,
+ ):
+ if (
+ source_account
+ in condition_statement[condition_operator][value]
+ ):
+ is_condition_valid = True
return is_condition_valid
diff --git a/prowler/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py b/prowler/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py
--- a/prowler/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py
+++ b/prowler/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py
@@ -1,5 +1,10 @@
+from re import compile
+
from prowler.config.config import get_config_var
from prowler.lib.check.models import Check, Check_Report_AWS
+from prowler.providers.aws.lib.policy_condition_parser.policy_condition_parser import (
+ is_account_only_allowed_in_condition,
+)
from prowler.providers.aws.services.vpc.vpc_client import vpc_client
@@ -8,52 +13,141 @@ def execute(self):
findings = []
# Get trusted account_ids from prowler.config.yaml
trusted_account_ids = get_config_var("trusted_account_ids")
+ # Always include the same account as trusted
+ trusted_account_ids.append(vpc_client.audited_account)
for endpoint in vpc_client.vpc_endpoints:
# Check VPC endpoint policy
if endpoint.policy_document:
+ access_from_trusted_accounts = True
for statement in endpoint.policy_document["Statement"]:
+ # If one policy allows access from a non-trusted account
+ if not access_from_trusted_accounts:
+ break
if "*" == statement["Principal"]:
report = Check_Report_AWS(self.metadata())
report.region = endpoint.region
- report.status = "FAIL"
- report.status_extended = f"VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id} has full access."
report.resource_id = endpoint.id
report.resource_arn = endpoint.arn
report.resource_tags = endpoint.tags
+
+ for account_id in trusted_account_ids:
+ if (
+ "Condition" in statement
+ and is_account_only_allowed_in_condition(
+ statement["Condition"], account_id
+ )
+ ):
+ access_from_trusted_accounts = True
+ else:
+ access_from_trusted_accounts = False
+ break
+
+ if (
+ not access_from_trusted_accounts
+ or len(trusted_account_ids) == 0
+ ):
+ access_from_trusted_accounts = False
+ report.status = "FAIL"
+ report.status_extended = f"VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id} can be accessed from non-trusted accounts."
+ else:
+ report.status = "PASS"
+ report.status_extended = f"VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id} can only be accessed from trusted accounts."
+
findings.append(report)
- break
+ if not access_from_trusted_accounts:
+ break
else:
- if type(statement["Principal"]["AWS"]) == str:
+ if isinstance(statement["Principal"]["AWS"], str):
principals = [statement["Principal"]["AWS"]]
else:
principals = statement["Principal"]["AWS"]
for principal_arn in principals:
- report = Check_Report_AWS(self.metadata())
- report.region = endpoint.region
if principal_arn == "*":
- report.status = "FAIL"
- report.status_extended = f"VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id} has full access."
+ report = Check_Report_AWS(self.metadata())
+ report.region = endpoint.region
report.resource_id = endpoint.id
report.resource_arn = endpoint.arn
report.resource_tags = endpoint.tags
+
+ for account_id in trusted_account_ids:
+ if (
+ "Condition" in statement
+ and is_account_only_allowed_in_condition(
+ statement["Condition"], account_id
+ )
+ ):
+ access_from_trusted_accounts = True
+ else:
+ access_from_trusted_accounts = False
+ break
+
+ if (
+ not access_from_trusted_accounts
+ or len(trusted_account_ids) == 0
+ ):
+ access_from_trusted_accounts = False
+ report.status = "FAIL"
+ report.status_extended = f"VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id} can be accessed from non-trusted accounts."
+ else:
+ report.status = "PASS"
+ report.status_extended = f"VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id} can only be accessed from trusted accounts."
+
+ findings.append(report)
+ if not access_from_trusted_accounts:
+ break
else:
- account_id = principal_arn.split(":")[4]
+ # Account ID can be an ARN or just a 12-digit string
+ pattern = compile(r"^[0-9]{12}$")
+ match = pattern.match(principal_arn)
+ if not match:
+ account_id = principal_arn.split(":")[4]
+ else:
+ account_id = match.string
if (
account_id in trusted_account_ids
or account_id in vpc_client.audited_account
):
+ report = Check_Report_AWS(self.metadata())
+ report.region = endpoint.region
report.status = "PASS"
report.status_extended = f"Found trusted account {account_id} in VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id}."
report.resource_id = endpoint.id
report.resource_arn = endpoint.arn
report.resource_tags = endpoint.tags
+ findings.append(report)
else:
- report.status = "FAIL"
- report.status_extended = f"Found untrusted account {account_id} in VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id}."
+ report = Check_Report_AWS(self.metadata())
+ report.region = endpoint.region
report.resource_id = endpoint.id
report.resource_arn = endpoint.arn
report.resource_tags = endpoint.tags
- findings.append(report)
+
+ for account_id in trusted_account_ids:
+ if (
+ "Condition" in statement
+ and is_account_only_allowed_in_condition(
+ statement["Condition"], account_id
+ )
+ ):
+ access_from_trusted_accounts = True
+ else:
+ access_from_trusted_accounts = False
+ break
+
+ if (
+ not access_from_trusted_accounts
+ or len(trusted_account_ids) == 0
+ ):
+ access_from_trusted_accounts = False
+ report.status = "FAIL"
+ report.status_extended = f"VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id} can be accessed from non-trusted accounts."
+ else:
+ report.status = "PASS"
+ report.status_extended = f"VPC Endpoint {endpoint.id} in VPC {endpoint.vpc_id} can only be accessed from trusted accounts."
+
+ findings.append(report)
+ if not access_from_trusted_accounts:
+ break
return findings
| diff --git a/tests/providers/aws/lib/policy_condition_parser/policy_condition_parser_test.py b/tests/providers/aws/lib/policy_condition_parser/policy_condition_parser_test.py
--- a/tests/providers/aws/lib/policy_condition_parser/policy_condition_parser_test.py
+++ b/tests/providers/aws/lib/policy_condition_parser/policy_condition_parser_test.py
@@ -6,19 +6,19 @@
class Test_policy_condition_parser:
- def test_condition_parser_string_equals_list(self):
+ def test_condition_parser_string_equals_aws_SourceAccount_list(self):
condition_statement = {"StringEquals": {"aws:SourceAccount": ["123456789012"]}}
assert is_account_only_allowed_in_condition(
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_string_equals_str(self):
+ def test_condition_parser_string_equals_aws_SourceAccount_str(self):
condition_statement = {"StringEquals": {"aws:SourceAccount": "123456789012"}}
assert is_account_only_allowed_in_condition(
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_string_equals_list_not_valid(self):
+ def test_condition_parser_string_equals_aws_SourceAccount_list_not_valid(self):
condition_statement = {
"StringEquals": {"aws:SourceAccount": ["123456789012", "111222333444"]}
}
@@ -26,13 +26,67 @@ def test_condition_parser_string_equals_list_not_valid(self):
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_string_equals_str_not_valid(self):
+ def test_condition_parser_string_equals_aws_SourceAccount_str_not_valid(self):
condition_statement = {"StringEquals": {"aws:SourceAccount": "111222333444"}}
assert not is_account_only_allowed_in_condition(
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_arnlike_list(self):
+ def test_condition_parser_string_equals_s3_ResourceAccount_list(self):
+ condition_statement = {"StringEquals": {"s3:ResourceAccount": ["123456789012"]}}
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_s3_ResourceAccount_str(self):
+ condition_statement = {"StringEquals": {"s3:ResourceAccount": "123456789012"}}
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_s3_ResourceAccount_list_not_valid(self):
+ condition_statement = {
+ "StringEquals": {"s3:ResourceAccount": ["123456789012", "111222333444"]}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_s3_ResourceAccount_str_not_valid(self):
+ condition_statement = {"StringEquals": {"s3:ResourceAccount": "111222333444"}}
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_aws_PrincipalAccount_list(self):
+ condition_statement = {
+ "StringEquals": {"aws:PrincipalAccount": ["123456789012"]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_aws_PrincipalAccount_str(self):
+ condition_statement = {"StringEquals": {"aws:PrincipalAccount": "123456789012"}}
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_aws_PrincipalAccount_list_not_valid(self):
+ condition_statement = {
+ "StringEquals": {"aws:PrincipalAccount": ["123456789012", "111222333444"]}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_aws_PrincipalAccount_str_not_valid(self):
+ condition_statement = {"StringEquals": {"aws:PrincipalAccount": "111222333444"}}
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_aws_SourceArn_list(self):
condition_statement = {
"ArnLike": {"aws:SourceArn": ["arn:aws:cloudtrail:*:123456789012:trail/*"]}
}
@@ -41,7 +95,7 @@ def test_condition_parser_arnlike_list(self):
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_arnlike_list_not_valid(self):
+ def test_condition_parser_arn_like_aws_SourceArn_list_not_valid(self):
condition_statement = {
"ArnLike": {
"aws:SourceArn": [
@@ -55,7 +109,7 @@ def test_condition_parser_arnlike_list_not_valid(self):
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_arnlike_str(self):
+ def test_condition_parser_arn_like_aws_SourceArn_str(self):
condition_statement = {
"ArnLike": {"aws:SourceArn": "arn:aws:cloudtrail:*:123456789012:trail/*"}
}
@@ -64,7 +118,7 @@ def test_condition_parser_arnlike_str(self):
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_arnlike_str_not_valid(self):
+ def test_condition_parser_arn_like_aws_SourceArn_str_not_valid(self):
condition_statement = {
"ArnLike": {"aws:SourceArn": "arn:aws:cloudtrail:*:111222333444:trail/*"}
}
@@ -73,7 +127,50 @@ def test_condition_parser_arnlike_str_not_valid(self):
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_arnequals_list(self):
+ def test_condition_parser_arn_like_aws_PrincipalArn_list(self):
+ condition_statement = {
+ "ArnLike": {
+ "aws:PrincipalArn": ["arn:aws:cloudtrail:*:123456789012:trail/*"]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_aws_PrincipalArn_list_not_valid(self):
+ condition_statement = {
+ "ArnLike": {
+ "aws:PrincipalArn": [
+ "arn:aws:cloudtrail:*:123456789012:trail/*",
+ "arn:aws:cloudtrail:*:111222333444:trail/*",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_aws_PrincipalArn_str(self):
+ condition_statement = {
+ "ArnLike": {"aws:PrincipalArn": "arn:aws:cloudtrail:*:123456789012:trail/*"}
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_aws_PrincipalArn_str_not_valid(self):
+ condition_statement = {
+ "ArnLike": {"aws:PrincipalArn": "arn:aws:cloudtrail:*:111222333444:trail/*"}
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_aws_SourceArn_list(self):
condition_statement = {
"ArnEquals": {
"aws:SourceArn": [
@@ -86,7 +183,7 @@ def test_condition_parser_arnequals_list(self):
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_arnequals_list_not_valid(self):
+ def test_condition_parser_arn_equals_aws_SourceArn_list_not_valid(self):
condition_statement = {
"ArnEquals": {
"aws:SourceArn": [
@@ -100,7 +197,7 @@ def test_condition_parser_arnequals_list_not_valid(self):
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_arnequals_str(self):
+ def test_condition_parser_arn_equals_aws_SourceArn_str(self):
condition_statement = {
"ArnEquals": {
"aws:SourceArn": "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test"
@@ -111,9 +208,107 @@ def test_condition_parser_arnequals_str(self):
condition_statement, AWS_ACCOUNT_NUMBER
)
- def test_condition_parser_arnequals_str_not_valid(self):
+ def test_condition_parser_arn_equals_aws_SourceArn_str_not_valid(self):
+ condition_statement = {
+ "ArnEquals": {
+ "aws:SourceArn": "arn:aws:cloudtrail:eu-west-1:111222333444:trail/test"
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_aws_PrincipalArn_list(self):
+ condition_statement = {
+ "ArnEquals": {
+ "aws:PrincipalArn": [
+ "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test"
+ ]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_aws_PrincipalArn_list_not_valid(self):
+ condition_statement = {
+ "ArnEquals": {
+ "aws:PrincipalArn": [
+ "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test",
+ "arn:aws:cloudtrail:eu-west-1:111222333444:trail/test",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_aws_PrincipalArn_str(self):
+ condition_statement = {
+ "ArnEquals": {
+ "aws:PrincipalArn": "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test"
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_aws_PrincipalArn_str_not_valid(self):
condition_statement = {
"ArnEquals": {
+ "aws:PrincipalArn": "arn:aws:cloudtrail:eu-west-1:111222333444:trail/test"
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_aws_SourceArn_list(self):
+ condition_statement = {
+ "StringLike": {
+ "aws:SourceArn": [
+ "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test"
+ ]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_aws_SourceArn_list_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "aws:SourceArn": [
+ "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test",
+ "arn:aws:cloudtrail:eu-west-1:111222333444:trail/test",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_aws_SourceArn_str(self):
+ condition_statement = {
+ "StringLike": {
+ "aws:SourceArn": "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test"
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_aws_SourceArn_str_not_valid(self):
+ condition_statement = {
+ "StringLike": {
"aws:SourceArn": "arn:aws:cloudtrail:eu-west-1:111222333444:trail/test"
}
}
@@ -121,3 +316,52 @@ def test_condition_parser_arnequals_str_not_valid(self):
assert not is_account_only_allowed_in_condition(
condition_statement, AWS_ACCOUNT_NUMBER
)
+
+ def test_condition_parser_string_like_aws_PrincipalArn_list(self):
+ condition_statement = {
+ "StringLike": {
+ "aws:PrincipalArn": [
+ "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test"
+ ]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_aws_PrincipalArn_list_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "aws:PrincipalArn": [
+ "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test",
+ "arn:aws:cloudtrail:eu-west-1:111222333444:trail/test",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_aws_PrincipalArn_str(self):
+ condition_statement = {
+ "StringLike": {
+ "aws:PrincipalArn": "arn:aws:cloudtrail:eu-west-1:123456789012:trail/test"
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_aws_PrincipalArn_str_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "aws:PrincipalArn": "arn:aws:cloudtrail:eu-west-1:111222333444:trail/test"
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, AWS_ACCOUNT_NUMBER
+ )
diff --git a/tests/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries_test.py b/tests/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries_test.py
--- a/tests/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries_test.py
+++ b/tests/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries_test.py
@@ -8,11 +8,13 @@
AWS_REGION = "us-east-1"
AWS_ACCOUNT_NUMBER = "123456789012"
+TRUSTED_AWS_ACCOUNT_NUMBER = "111122223333"
+NON_TRUSTED_AWS_ACCOUNT_NUMBER = "000011112222"
def mock_get_config_var(config_var):
if config_var == "trusted_account_ids":
- return ["123456789010"]
+ return [TRUSTED_AWS_ACCOUNT_NUMBER]
return []
@@ -117,7 +119,7 @@ def test_vpc_endpoint_with_full_access(self):
assert result[0].status == "FAIL"
assert (
result[0].status_extended
- == f"VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']} has full access."
+ == f"VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']} can be accessed from non-trusted accounts."
)
assert (
result[0].resource_id
@@ -126,7 +128,7 @@ def test_vpc_endpoint_with_full_access(self):
assert result[0].region == AWS_REGION
@mock_ec2
- def test_vpc_endpoint_with_trusted_account(self):
+ def test_vpc_endpoint_with_trusted_account_arn(self):
# Create VPC Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
@@ -143,7 +145,67 @@ def test_vpc_endpoint_with_trusted_account(self):
"Statement": [
{
"Effect": "Allow",
- "Principal": {"AWS": "arn:aws:iam::123456789012:root"},
+ "Principal": {
+ "AWS": f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:root"
+ },
+ "Action": "*",
+ "Resource": "*",
+ }
+ ]
+ }
+ ),
+ )
+ from prowler.providers.aws.services.vpc.vpc_service import VPC
+
+ current_audit_info = self.set_mocked_audit_info()
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ):
+ with mock.patch(
+ "prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries.vpc_client",
+ new=VPC(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries import (
+ vpc_endpoint_connections_trust_boundaries,
+ )
+
+ check = vpc_endpoint_connections_trust_boundaries()
+ result = check.execute()
+
+ assert len(result) == 1
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"Found trusted account {AWS_ACCOUNT_NUMBER} in VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']}."
+ )
+ assert (
+ result[0].resource_id
+ == vpc_endpoint["VpcEndpoint"]["VpcEndpointId"]
+ )
+ assert result[0].region == AWS_REGION
+
+ @mock_ec2
+ def test_vpc_endpoint_with_trusted_account_id(self):
+ # Create VPC Mocked Resources
+ ec2_client = client("ec2", region_name=AWS_REGION)
+
+ vpc = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+
+ route_table = ec2_client.create_route_table(VpcId=vpc["VpcId"])["RouteTable"]
+ vpc_endpoint = ec2_client.create_vpc_endpoint(
+ VpcId=vpc["VpcId"],
+ ServiceName="com.amazonaws.us-east-1.s3",
+ RouteTableIds=[route_table["RouteTableId"]],
+ VpcEndpointType="Gateway",
+ PolicyDocument=json.dumps(
+ {
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {"AWS": AWS_ACCOUNT_NUMBER},
"Action": "*",
"Resource": "*",
}
@@ -201,7 +263,9 @@ def test_vpc_endpoint_with_untrusted_account(self):
"Statement": [
{
"Effect": "Allow",
- "Principal": {"AWS": "arn:aws:iam::123456789010:root"},
+ "Principal": {
+ "AWS": f"arn:aws:iam::{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:root"
+ },
"Action": "*",
"Resource": "*",
}
@@ -234,13 +298,77 @@ def test_vpc_endpoint_with_untrusted_account(self):
assert result[0].status == "FAIL"
assert (
result[0].status_extended
- == f"Found untrusted account 123456789010 in VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']}."
+ == f"VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']} can be accessed from non-trusted accounts."
)
assert (
result[0].resource_id
== vpc_endpoint["VpcEndpoint"]["VpcEndpointId"]
)
+ @mock_ec2
+ def test_vpc_endpoint_with_config_trusted_account_with_arn(self):
+ # Create VPC Mocked Resources
+ ec2_client = client("ec2", region_name=AWS_REGION)
+
+ vpc = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+
+ route_table = ec2_client.create_route_table(VpcId=vpc["VpcId"])["RouteTable"]
+ vpc_endpoint = ec2_client.create_vpc_endpoint(
+ VpcId=vpc["VpcId"],
+ ServiceName="com.amazonaws.us-east-1.s3",
+ RouteTableIds=[route_table["RouteTableId"]],
+ VpcEndpointType="Gateway",
+ PolicyDocument=json.dumps(
+ {
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": f"arn:aws:iam::{TRUSTED_AWS_ACCOUNT_NUMBER}:root"
+ },
+ "Action": "*",
+ "Resource": "*",
+ }
+ ]
+ }
+ ),
+ )
+ from prowler.providers.aws.services.vpc.vpc_service import VPC
+
+ current_audit_info = self.set_mocked_audit_info()
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ):
+ with mock.patch(
+ "prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries.vpc_client",
+ new=VPC(current_audit_info),
+ ):
+ with mock.patch(
+ "prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries.get_config_var",
+ new=mock_get_config_var,
+ ):
+ # Test Check
+ from prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries import (
+ vpc_endpoint_connections_trust_boundaries,
+ )
+
+ check = vpc_endpoint_connections_trust_boundaries()
+ result = check.execute()
+
+ assert len(result) == 1
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"Found trusted account {TRUSTED_AWS_ACCOUNT_NUMBER} in VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']}."
+ )
+ assert (
+ result[0].resource_id
+ == vpc_endpoint["VpcEndpoint"]["VpcEndpointId"]
+ )
+ assert result[0].region == AWS_REGION
+
@mock_ec2
def test_vpc_endpoint_with_config_trusted_account(self):
# Create VPC Mocked Resources
@@ -259,7 +387,7 @@ def test_vpc_endpoint_with_config_trusted_account(self):
"Statement": [
{
"Effect": "Allow",
- "Principal": {"AWS": "arn:aws:iam::123456789010:root"},
+ "Principal": {"AWS": [TRUSTED_AWS_ACCOUNT_NUMBER]},
"Action": "*",
"Resource": "*",
}
@@ -295,10 +423,261 @@ def test_vpc_endpoint_with_config_trusted_account(self):
assert result[0].status == "PASS"
assert (
result[0].status_extended
- == f"Found trusted account 123456789010 in VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']}."
+ == f"Found trusted account {TRUSTED_AWS_ACCOUNT_NUMBER} in VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']}."
)
assert (
result[0].resource_id
== vpc_endpoint["VpcEndpoint"]["VpcEndpointId"]
)
assert result[0].region == AWS_REGION
+
+ @mock_ec2
+ def test_vpc_endpoint_with_two_account_ids_one_trusted_one_not(self):
+ # Create VPC Mocked Resources
+ ec2_client = client("ec2", region_name=AWS_REGION)
+
+ vpc = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+
+ route_table = ec2_client.create_route_table(VpcId=vpc["VpcId"])["RouteTable"]
+ vpc_endpoint = ec2_client.create_vpc_endpoint(
+ VpcId=vpc["VpcId"],
+ ServiceName="com.amazonaws.us-east-1.s3",
+ RouteTableIds=[route_table["RouteTableId"]],
+ VpcEndpointType="Gateway",
+ PolicyDocument=json.dumps(
+ {
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": [
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ },
+ "Action": "*",
+ "Resource": "*",
+ }
+ ]
+ }
+ ),
+ )
+ from prowler.providers.aws.services.vpc.vpc_service import VPC
+
+ current_audit_info = self.set_mocked_audit_info()
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ):
+ with mock.patch(
+ "prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries.vpc_client",
+ new=VPC(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries import (
+ vpc_endpoint_connections_trust_boundaries,
+ )
+
+ check = vpc_endpoint_connections_trust_boundaries()
+ result = check.execute()
+
+ assert len(result) == 1
+ assert result[0].status == "FAIL"
+ assert (
+ result[0].status_extended
+ == f"VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']} can be accessed from non-trusted accounts."
+ )
+ assert (
+ result[0].resource_id
+ == vpc_endpoint["VpcEndpoint"]["VpcEndpointId"]
+ )
+ assert result[0].region == AWS_REGION
+
+ @mock_ec2
+ def test_vpc_endpoint_with_aws_principal_all(self):
+ # Create VPC Mocked Resources
+ ec2_client = client("ec2", region_name=AWS_REGION)
+
+ vpc = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+
+ route_table = ec2_client.create_route_table(VpcId=vpc["VpcId"])["RouteTable"]
+ vpc_endpoint = ec2_client.create_vpc_endpoint(
+ VpcId=vpc["VpcId"],
+ ServiceName="com.amazonaws.us-east-1.s3",
+ RouteTableIds=[route_table["RouteTableId"]],
+ VpcEndpointType="Gateway",
+ PolicyDocument=json.dumps(
+ {
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {"AWS": "*"},
+ "Action": "*",
+ "Resource": "*",
+ }
+ ]
+ }
+ ),
+ )
+ from prowler.providers.aws.services.vpc.vpc_service import VPC
+
+ current_audit_info = self.set_mocked_audit_info()
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ):
+ with mock.patch(
+ "prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries.vpc_client",
+ new=VPC(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries import (
+ vpc_endpoint_connections_trust_boundaries,
+ )
+
+ check = vpc_endpoint_connections_trust_boundaries()
+ result = check.execute()
+
+ assert len(result) == 1
+ assert result[0].status == "FAIL"
+ assert (
+ result[0].status_extended
+ == f"VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']} can be accessed from non-trusted accounts."
+ )
+ assert (
+ result[0].resource_id
+ == vpc_endpoint["VpcEndpoint"]["VpcEndpointId"]
+ )
+ assert result[0].region == AWS_REGION
+
+ @mock_ec2
+ def test_vpc_endpoint_with_aws_principal_all_but_restricted_condition_with_SourceAccount(
+ self,
+ ):
+ # Create VPC Mocked Resources
+ ec2_client = client("ec2", region_name=AWS_REGION)
+
+ vpc = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+
+ route_table = ec2_client.create_route_table(VpcId=vpc["VpcId"])["RouteTable"]
+ vpc_endpoint = ec2_client.create_vpc_endpoint(
+ VpcId=vpc["VpcId"],
+ ServiceName="com.amazonaws.us-east-1.s3",
+ RouteTableIds=[route_table["RouteTableId"]],
+ VpcEndpointType="Gateway",
+ PolicyDocument=json.dumps(
+ {
+ "Statement": [
+ {
+ "Action": "*",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Resource": "*",
+ "Condition": {
+ "StringEquals": {
+ "aws:SourceAccount": AWS_ACCOUNT_NUMBER
+ }
+ },
+ }
+ ]
+ }
+ ),
+ )
+ from prowler.providers.aws.services.vpc.vpc_service import VPC
+
+ current_audit_info = self.set_mocked_audit_info()
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ):
+ with mock.patch(
+ "prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries.vpc_client",
+ new=VPC(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries import (
+ vpc_endpoint_connections_trust_boundaries,
+ )
+
+ check = vpc_endpoint_connections_trust_boundaries()
+ result = check.execute()
+
+ assert len(result) == 1
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']} can only be accessed from trusted accounts."
+ )
+ assert (
+ result[0].resource_id
+ == vpc_endpoint["VpcEndpoint"]["VpcEndpointId"]
+ )
+ assert result[0].region == AWS_REGION
+
+ @mock_ec2
+ def test_vpc_endpoint_with_aws_principal_all_but_restricted_condition_with_PrincipalAccount(
+ self,
+ ):
+ # Create VPC Mocked Resources
+ ec2_client = client("ec2", region_name=AWS_REGION)
+
+ vpc = ec2_client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+
+ route_table = ec2_client.create_route_table(VpcId=vpc["VpcId"])["RouteTable"]
+ vpc_endpoint = ec2_client.create_vpc_endpoint(
+ VpcId=vpc["VpcId"],
+ ServiceName="com.amazonaws.us-east-1.s3",
+ RouteTableIds=[route_table["RouteTableId"]],
+ VpcEndpointType="Gateway",
+ PolicyDocument=json.dumps(
+ {
+ "Statement": [
+ {
+ "Action": "*",
+ "Effect": "Allow",
+ "Principal": "*",
+ "Resource": "*",
+ "Condition": {
+ "StringEquals": {
+ "aws:PrincipalAccount": AWS_ACCOUNT_NUMBER
+ }
+ },
+ }
+ ]
+ }
+ ),
+ )
+ from prowler.providers.aws.services.vpc.vpc_service import VPC
+
+ current_audit_info = self.set_mocked_audit_info()
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ):
+ with mock.patch(
+ "prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries.vpc_client",
+ new=VPC(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.vpc.vpc_endpoint_connections_trust_boundaries.vpc_endpoint_connections_trust_boundaries import (
+ vpc_endpoint_connections_trust_boundaries,
+ )
+
+ check = vpc_endpoint_connections_trust_boundaries()
+ result = check.execute()
+
+ assert len(result) == 1
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"VPC Endpoint {vpc_endpoint['VpcEndpoint']['VpcEndpointId']} in VPC {vpc['VpcId']} can only be accessed from trusted accounts."
+ )
+ assert (
+ result[0].resource_id
+ == vpc_endpoint["VpcEndpoint"]["VpcEndpointId"]
+ )
+ assert result[0].region == AWS_REGION
| [Bug]: AWS vpc_endpoint_connections_trust_boundaries errors on valid endpoint policy
### Steps to Reproduce
1. In an AWS account 123456789, configure an AWS VPC Private Endpoint (interface) with a valid policy that uses the account ID as the Principal. e.g:
```json
{
"Statement": [
{
"Action": "*",
"Effect": "Allow",
"Principal": {
"AWS": [
"123456789"
]
},
"Resource": "*"
}
]
}
```
2. Run prowler
### Expected behavior
Expect a clean bill of health from vpc_endpoint_connections_trust_boundaries because it has a principal that matches the account id of the current account (Per https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-accounts -> just setting the account ID is a valid AWS account prinicipal), which is included in trusted_account_ids without configuring it explicitly (see the comment at https://github.com/prowler-cloud/prowler/blob/master/prowler/config/config.yaml#L10)
### Actual Result with Screenshots or Logs
Instead, prowler errors out:
```
7/20/2023, 2:54:06 PM EDT | 2023-07-20 18:54:06,299 [File: check.py:399] [Module: check] ERROR: vpc_endpoint_connections_trust_boundaries -- IndexError[41]: list index out of range | prowler
-- | -- | --
7/20/2023, 2:54:06 PM EDT | Something went wrong in vpc_endpoint_connections_trust_boundaries, please use --log-level ERROR | prowler
```
This is due to https://github.com/prowler-cloud/prowler/blob/d2e34c42fd2a2f470e6e7fcc8c84da653ebd2d19/prowler/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py#L41
### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
Any
### OS used
Any
### Prowler version
3.7.1
### Pip version
Any
### Context
https://github.com/prowler-cloud/prowler/blob/d2e34c42fd2a2f470e6e7fcc8c84da653ebd2d19/prowler/providers/aws/services/vpc/vpc_endpoint_connections_trust_boundaries/vpc_endpoint_connections_trust_boundaries.py#L41 Should not assume that the contents of `statement["Principal"]["AWS"]` is a list of ARNs _or_ `*`. It also needs to account for a string consisting of an aws account ID. (per https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html)
This can be worked around by updating policies within AWS to have the full ARN of the root user, and VPC Endpoint Gateway policies seem to transform this automatically (in my limited testing) but VPC Endpoint Interface policies seem to save it as just the account id. Prowler should not error on a "legal" AWS policy. Thanks!
[edit]: Having ARNs in the `principal` key actually seems to break policies on endpoints, and the `condition` key should be used instead. See discussion below.
| Additionally, I'm working with AWS support troubleshooting some policies. Prowler is looking for a "Principal" to be set to restrict access in VPC Endpoint policies. AWS seems to recommend (both in documentation, and via a person in support) to limit things using "Condition" in policies instead.
See:
* "Example: Restrict access to buckets in a specific AWS account" on https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints-s3.html#bucket-policies-s3. -> They use a condition
* "Principals for gateway endpoints" on https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints-access.html#vpc-endpoint-policies-gateway. -> this says "With gateway endpoints, you must use the aws:PrincipalArn condition key to grant access to a principal."
It looks like Prowler _must_ use the `condition` key for Gateway endpoints (Amazon S3 and DynamoDB), where it is currently just looking in the `principal` key. From the documentation, I can't tell if this is a hard requirement for Interface endpoints (things other than S3 or DynamoDB) but I should have this worked out next week. | 2023-07-24T13:13:20 |
prowler-cloud/prowler | 2,639 | prowler-cloud__prowler-2639 | [
"2632"
] | e08424d3a377e55118150b81c928d7be290816b7 | diff --git a/prowler/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.py b/prowler/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.py
--- a/prowler/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.py
+++ b/prowler/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data.py
@@ -38,9 +38,19 @@ def execute(self):
with default_settings():
secrets.scan_file(temp_user_data_file.name)
- if secrets.json():
+ detect_secrets_output = secrets.json()
+ if detect_secrets_output:
+ secrets_string = ", ".join(
+ [
+ f"{secret['type']} on line {secret['line_number']}"
+ for secret in detect_secrets_output[
+ temp_user_data_file.name
+ ]
+ ]
+ )
report.status = "FAIL"
- report.status_extended = f"Potential secret found in EC2 instance {instance.id} User Data."
+ report.status_extended = f"Potential secret found in EC2 instance {instance.id} User Data -> {secrets_string}."
+
else:
report.status = "PASS"
report.status_extended = (
| diff --git a/tests/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data_test.py b/tests/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data_test.py
--- a/tests/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data_test.py
+++ b/tests/providers/aws/services/ec2/ec2_instance_secrets_user_data/ec2_instance_secrets_user_data_test.py
@@ -101,6 +101,12 @@ def test_one_ec2_with_no_secrets(self):
== f"No secrets found in EC2 instance {instance.id} User Data."
)
assert result[0].resource_id == instance.id
+ assert (
+ result[0].resource_arn
+ == f"arn:{current_audit_info.audited_partition}:ec2:{AWS_REGION}:{current_audit_info.audited_account}:instance/{instance.id}"
+ )
+ assert result[0].resource_tags is None
+ assert result[0].region == AWS_REGION
@mock_ec2
def test_one_ec2_with_secrets(self):
@@ -134,13 +140,15 @@ def test_one_ec2_with_secrets(self):
assert result[0].status == "FAIL"
assert (
result[0].status_extended
- == f"Potential secret found in EC2 instance {instance.id} User Data."
+ == f"Potential secret found in EC2 instance {instance.id} User Data -> Secret Keyword on line 1."
)
assert result[0].resource_id == instance.id
assert (
result[0].resource_arn
== f"arn:{current_audit_info.audited_partition}:ec2:{AWS_REGION}:{current_audit_info.audited_account}:instance/{instance.id}"
)
+ assert result[0].resource_tags is None
+ assert result[0].region == AWS_REGION
@mock_ec2
def test_one_ec2_file_with_secrets(self):
@@ -177,13 +185,15 @@ def test_one_ec2_file_with_secrets(self):
assert result[0].status == "FAIL"
assert (
result[0].status_extended
- == f"Potential secret found in EC2 instance {instance.id} User Data."
+ == f"Potential secret found in EC2 instance {instance.id} User Data -> Secret Keyword on line 1, Hex High Entropy String on line 3, Secret Keyword on line 3, Secret Keyword on line 4."
)
assert result[0].resource_id == instance.id
assert (
result[0].resource_arn
== f"arn:{current_audit_info.audited_partition}:ec2:{AWS_REGION}:{current_audit_info.audited_account}:instance/{instance.id}"
)
+ assert result[0].resource_tags is None
+ assert result[0].region == AWS_REGION
@mock_ec2
def test_one_launch_configurations_without_user_data(self):
@@ -221,3 +231,5 @@ def test_one_launch_configurations_without_user_data(self):
result[0].resource_arn
== f"arn:{current_audit_info.audited_partition}:ec2:{AWS_REGION}:{current_audit_info.audited_account}:instance/{instance.id}"
)
+ assert result[0].resource_tags is None
+ assert result[0].region == AWS_REGION
| [Bug]: The check 'Potential secret found in EC2 instance * User Data.' does not include the line numbers where the secrets were found
### Steps to Reproduce
The check 'Potential secret found in EC2 instance * User Data.' does not show the line numbers, whereas 'Potential secret found in variables of ECS task definition' does. Why is it so?
The results of check without precise pointing at the line are frustrating: you do not know where exactly the scanner found the secret and how many secrets were found.
Same issue will rise if you need to troubleshoot the scanner.
### Expected behavior
Numbers of lines with secrets are included in issue description.
### Actual Result with Screenshots or Logs
-
### How did you install Prowler?
Docker (docker pull toniblyx/prowler)
### Environment Resource
Fargate
### OS used
--
### Prowler version
3
### Pip version
--
### Context
_No response_
| I update the code for some of the checks to give the line number and secret type, but i must've missed this one. Thanks for pointing it out. | 2023-07-31T08:20:01 |
prowler-cloud/prowler | 2,655 | prowler-cloud__prowler-2655 | [
"2220"
] | 02f432238ede8936e87e16838df3439b79079337 | diff --git a/prowler/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py b/prowler/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py
--- a/prowler/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py
+++ b/prowler/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py
@@ -1,3 +1,5 @@
+from re import search
+
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.iam.iam_client import iam_client
@@ -11,63 +13,94 @@
# Does the tool handle Condition constraints? --> Not yet.
# Does the tool handle service control policy (SCP) restrictions? --> No, SCP are within Organizations AWS API.
+# Based on:
+# - https://bishopfox.com/blog/privilege-escalation-in-aws
+# - https://github.com/RhinoSecurityLabs/Security-Research/blob/master/tools/aws-pentest-tools/aws_escalate.py
+# - https://rhinosecuritylabs.com/aws/aws-privilege-escalation-methods-mitigation/
+
class iam_policy_allows_privilege_escalation(Check):
def execute(self) -> Check_Report_AWS:
- # Is necessary to include the "Action:*" for
- # each service that has a policy that could
- # allow for privilege escalation
- privilege_escalation_iam_actions = {
- "iam:AttachGroupPolicy",
- "iam:SetDefaultPolicyVersion2",
- "iam:AddUserToGroup",
- "iam:AttachRolePolicy",
- "iam:AttachUserPolicy",
- "iam:CreateAccessKey",
- "iam:CreatePolicyVersion",
- "iam:CreateLoginProfile",
- "iam:PassRole",
- "iam:PutGroupPolicy",
- "iam:PutRolePolicy",
- "iam:PutUserPolicy",
- "iam:SetDefaultPolicyVersion",
- "iam:UpdateAssumeRolePolicy",
- "iam:UpdateLoginProfile",
- "iam:*",
- "sts:AssumeRole",
- "sts:*",
- "ec2:RunInstances",
- "ec2:*",
- "lambda:CreateEventSourceMapping",
- "lambda:CreateFunction",
- "lambda:InvokeFunction",
- "lambda:UpdateFunctionCode",
- "lambda:*",
- "dynamodb:CreateTable",
- "dynamodb:PutItem",
- "dynamodb:*",
- "glue:CreateDevEndpoint",
- "glue:GetDevEndpoint",
- "glue:GetDevEndpoints",
- "glue:UpdateDevEndpoint",
- "glue:*",
- "cloudformation:CreateStack",
- "cloudformation:DescribeStacks",
- "cloudformation:*",
- "datapipeline:CreatePipeline",
- "datapipeline:PutPipelineDefinition",
- "datapipeline:ActivatePipeline",
- "datapipeline:*",
+ privilege_escalation_policies_combination = {
+ "CreatePolicyVersion": {"iam:CreatePolicyVersion"},
+ "SetDefaultPolicyVersion": {"iam:SetDefaultPolicyVersion"},
+ "iam:PassRole": {"iam:PassRole"},
+ "PassRole+EC2": {
+ "iam:PassRole",
+ "ec2:RunInstances",
+ },
+ "PassRole+CreateLambda+Invoke": {
+ "iam:PassRole",
+ "lambda:CreateFunction",
+ "lambda:InvokeFunction",
+ },
+ "PassRole+CreateLambda+ExistingDynamo": {
+ "iam:PassRole",
+ "lambda:CreateFunction",
+ "lambda:CreateEventSourceMapping",
+ },
+ "PassRole+CreateLambda+NewDynamo": {
+ "iam:PassRole",
+ "lambda:CreateFunction",
+ "lambda:CreateEventSourceMapping",
+ "dynamodb:CreateTable",
+ "dynamodb:PutItem",
+ },
+ "PassRole+GlueEndpoint": {
+ "iam:PassRole",
+ "glue:CreateDevEndpoint",
+ "glue:GetDevEndpoint",
+ },
+ "PassRole+GlueEndpoints": {
+ "iam:PassRole",
+ "glue:CreateDevEndpoint",
+ "glue:GetDevEndpoints",
+ },
+ "PassRole+CloudFormation": {
+ "cloudformation:CreateStack",
+ "cloudformation:DescribeStacks",
+ },
+ "PassRole+DataPipeline": {
+ "datapipeline:CreatePipeline",
+ "datapipeline:PutPipelineDefinition",
+ "datapipeline:ActivatePipeline",
+ },
+ "GlueUpdateDevEndpoint": {"glue:UpdateDevEndpoint"},
+ "GlueUpdateDevEndpoints": {"glue:UpdateDevEndpoint"},
+ "lambda:UpdateFunctionCode": {"lambda:UpdateFunctionCode"},
+ "iam:CreateAccessKey": {"iam:CreateAccessKey"},
+ "iam:CreateLoginProfile": {"iam:CreateLoginProfile"},
+ "iam:UpdateLoginProfile": {"iam:UpdateLoginProfile"},
+ "iam:AttachUserPolicy": {"iam:AttachUserPolicy"},
+ "iam:AttachGroupPolicy": {"iam:AttachGroupPolicy"},
+ "iam:AttachRolePolicy": {"iam:AttachRolePolicy"},
+ "AssumeRole+AttachRolePolicy": {"sts:AssumeRole", "iam:AttachRolePolicy"},
+ "iam:PutGroupPolicy": {"iam:PutGroupPolicy"},
+ "iam:PutRolePolicy": {"iam:PutRolePolicy"},
+ "AssumeRole+PutRolePolicy": {"sts:AssumeRole", "iam:PutRolePolicy"},
+ "iam:PutUserPolicy": {"iam:PutUserPolicy"},
+ "iam:AddUserToGroup": {"iam:AddUserToGroup"},
+ "iam:UpdateAssumeRolePolicy": {"iam:UpdateAssumeRolePolicy"},
+ "AssumeRole+UpdateAssumeRolePolicy": {
+ "sts:AssumeRole",
+ "iam:UpdateAssumeRolePolicy",
+ },
+ # TO-DO: We have to handle AssumeRole just if the resource is * and without conditions
+ # "sts:AssumeRole": {"sts:AssumeRole"},
}
+
findings = []
+
+ # Iterate over all the IAM "Customer Managed" policies
for policy in iam_client.policies:
- # Check only custom policies
if policy.type == "Custom":
report = Check_Report_AWS(self.metadata())
report.resource_id = policy.name
report.resource_arn = policy.arn
report.region = iam_client.region
report.resource_tags = policy.tags
+ report.status = "PASS"
+ report.status_extended = f"Custom Policy {report.resource_arn} does not allow privilege escalation"
# List of policy actions
allowed_actions = set()
@@ -85,42 +118,74 @@ def execute(self) -> Check_Report_AWS:
if statements["Effect"] == "Allow":
if "Action" in statements:
if type(statements["Action"]) is str:
- allowed_actions = {statements["Action"]}
+ allowed_actions.add(statements["Action"])
if type(statements["Action"]) is list:
- allowed_actions = set(statements["Action"])
+ allowed_actions.update(statements["Action"])
# Recover denied actions
if statements["Effect"] == "Deny":
if "Action" in statements:
if type(statements["Action"]) is str:
- denied_actions = {statements["Action"]}
+ denied_actions.add(statements["Action"])
if type(statements["Action"]) is list:
- denied_actions = set(statements["Action"])
+ denied_actions.update(statements["Action"])
if "NotAction" in statements:
if type(statements["NotAction"]) is str:
- denied_not_actions = {statements["NotAction"]}
+ denied_not_actions.add(statements["NotAction"])
if type(statements["NotAction"]) is list:
- denied_not_actions = set(statements["NotAction"])
-
- # First, we need to perform a left join with ALLOWED_ACTIONS and DENIED_ACTIONS
- left_actions = allowed_actions.difference(denied_actions)
- # Then, we need to find the DENIED_NOT_ACTIONS in LEFT_ACTIONS
- if denied_not_actions:
- privileged_actions = left_actions.intersection(denied_not_actions)
- # If there is no Denied Not Actions
- else:
- privileged_actions = left_actions
- # Finally, check if there is a privilege escalation action within this policy
- policy_privilege_escalation_actions = privileged_actions.intersection(
- privilege_escalation_iam_actions
- )
-
- if len(policy_privilege_escalation_actions) == 0:
- report.status = "PASS"
- report.status_extended = f"Custom Policy {report.resource_arn} does not allow privilege escalation"
- else:
- report.status = "FAIL"
- report.status_extended = f"Custom Policy {report.resource_arn} allows privilege escalation using the following actions: {policy_privilege_escalation_actions}"
+ denied_not_actions.update(statements["NotAction"])
+
+ # First, we need to perform a left join with ALLOWED_ACTIONS and DENIED_ACTIONS
+ left_actions = allowed_actions.difference(denied_actions)
+ # Then, we need to find the DENIED_NOT_ACTIONS in LEFT_ACTIONS
+ if denied_not_actions:
+ privileged_actions = left_actions.intersection(
+ denied_not_actions
+ )
+ # If there is no Denied Not Actions
+ else:
+ privileged_actions = left_actions
+
+ # Store all the action's combinations
+ policies_combination = set()
+
+ for values in privilege_escalation_policies_combination.values():
+ for val in values:
+ val_set = set()
+ val_set.add(val)
+ # Look for specific api:action
+ if privileged_actions.intersection(val_set) == val_set:
+ policies_combination.add(val)
+ # Look for api:*
+ else:
+ for permission in privileged_actions:
+ api = permission.split(":")[0]
+ api_action = permission.split(":")[1]
+
+ if api_action == "*":
+ if search(api, val):
+ policies_combination.add(val)
+
+ # Check all policies combinations and see if matchs with some combo key
+ combos = set()
+ for (
+ key,
+ values,
+ ) in privilege_escalation_policies_combination.items():
+ intersection = policies_combination.intersection(values)
+ if intersection == values:
+ combos.add(key)
+
+ if len(combos) != 0:
+ report.status = "FAIL"
+ policies_affected = ""
+ for key in combos:
+ policies_affected += (
+ str(privilege_escalation_policies_combination[key])
+ + " "
+ )
+
+ report.status_extended = f"Custom Policy {report.resource_arn} allows privilege escalation using the following actions: {policies_affected}".rstrip()
findings.append(report)
return findings
| diff --git a/tests/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation_test.py b/tests/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation_test.py
--- a/tests/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation_test.py
+++ b/tests/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation_test.py
@@ -1,4 +1,5 @@
from json import dumps
+from re import search
from unittest import mock
from boto3 import client, session
@@ -10,6 +11,75 @@
AWS_REGION = "us-east-1"
AWS_ACCOUNT_NUMBER = "123456789012"
+# Keep this up-to-date with the check's actions that allows for privilege escalation
+privilege_escalation_policies_combination = {
+ "CreatePolicyVersion": {"iam:CreatePolicyVersion"},
+ "SetDefaultPolicyVersion": {"iam:SetDefaultPolicyVersion"},
+ "iam:PassRole": {"iam:PassRole"},
+ "PassRole+EC2": {
+ "iam:PassRole",
+ "ec2:RunInstances",
+ },
+ "PassRole+CreateLambda+Invoke": {
+ "iam:PassRole",
+ "lambda:CreateFunction",
+ "lambda:InvokeFunction",
+ },
+ "PassRole+CreateLambda+ExistingDynamo": {
+ "iam:PassRole",
+ "lambda:CreateFunction",
+ "lambda:CreateEventSourceMapping",
+ },
+ "PassRole+CreateLambda+NewDynamo": {
+ "iam:PassRole",
+ "lambda:CreateFunction",
+ "lambda:CreateEventSourceMapping",
+ "dynamodb:CreateTable",
+ "dynamodb:PutItem",
+ },
+ "PassRole+GlueEndpoint": {
+ "iam:PassRole",
+ "glue:CreateDevEndpoint",
+ "glue:GetDevEndpoint",
+ },
+ "PassRole+GlueEndpoints": {
+ "iam:PassRole",
+ "glue:CreateDevEndpoint",
+ "glue:GetDevEndpoints",
+ },
+ "PassRole+CloudFormation": {
+ "cloudformation:CreateStack",
+ "cloudformation:DescribeStacks",
+ },
+ "PassRole+DataPipeline": {
+ "datapipeline:CreatePipeline",
+ "datapipeline:PutPipelineDefinition",
+ "datapipeline:ActivatePipeline",
+ },
+ "GlueUpdateDevEndpoint": {"glue:UpdateDevEndpoint"},
+ "GlueUpdateDevEndpoints": {"glue:UpdateDevEndpoint"},
+ "lambda:UpdateFunctionCode": {"lambda:UpdateFunctionCode"},
+ "iam:CreateAccessKey": {"iam:CreateAccessKey"},
+ "iam:CreateLoginProfile": {"iam:CreateLoginProfile"},
+ "iam:UpdateLoginProfile": {"iam:UpdateLoginProfile"},
+ "iam:AttachUserPolicy": {"iam:AttachUserPolicy"},
+ "iam:AttachGroupPolicy": {"iam:AttachGroupPolicy"},
+ "iam:AttachRolePolicy": {"iam:AttachRolePolicy"},
+ "AssumeRole+AttachRolePolicy": {"sts:AssumeRole", "iam:AttachRolePolicy"},
+ "iam:PutGroupPolicy": {"iam:PutGroupPolicy"},
+ "iam:PutRolePolicy": {"iam:PutRolePolicy"},
+ "AssumeRole+PutRolePolicy": {"sts:AssumeRole", "iam:PutRolePolicy"},
+ "iam:PutUserPolicy": {"iam:PutUserPolicy"},
+ "iam:AddUserToGroup": {"iam:AddUserToGroup"},
+ "iam:UpdateAssumeRolePolicy": {"iam:UpdateAssumeRolePolicy"},
+ "AssumeRole+UpdateAssumeRolePolicy": {
+ "sts:AssumeRole",
+ "iam:UpdateAssumeRolePolicy",
+ },
+ # TO-DO: We have to handle AssumeRole just if the resource is * and without conditions
+ # "sts:AssumeRole": {"sts:AssumeRole"},
+}
+
class Test_iam_policy_allows_privilege_escalation:
def set_mocked_audit_info(self):
@@ -43,14 +113,56 @@ def set_mocked_audit_info(self):
return audit_info
+ # @mock_iam
+ # def test_iam_policy_allows_privilege_escalation_sts(self):
+ # iam_client = client("iam", region_name=AWS_REGION)
+ # policy_name = "policy1"
+ # policy_document = {
+ # "Version": "2012-10-17",
+ # "Statement": [
+ # {"Effect": "Allow", "Action": "sts:*", "Resource": "*"},
+ # ],
+ # }
+ # policy_arn = iam_client.create_policy(
+ # PolicyName=policy_name, PolicyDocument=dumps(policy_document)
+ # )["Policy"]["Arn"]
+
+ # current_audit_info = self.set_mocked_audit_info()
+ # from prowler.providers.aws.services.iam.iam_service import IAM
+
+ # with mock.patch(
+ # "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ # new=current_audit_info,
+ # ), mock.patch(
+ # "prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
+ # new=IAM(current_audit_info),
+ # ):
+ # # Test Check
+ # from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
+ # iam_policy_allows_privilege_escalation,
+ # )
+
+ # check = iam_policy_allows_privilege_escalation()
+ # result = check.execute()
+ # assert len(result) == 1
+ # assert result[0].status == "FAIL"
+ # assert (
+ # result[0].status_extended
+ # == f"Custom Policy {policy_arn} allows privilege escalation using the following actions: {{'sts:AssumeRole'}}"
+ # )
+ # assert result[0].resource_id == policy_name
+ # assert result[0].resource_arn == policy_arn
+
@mock_iam
- def test_iam_policy_allows_privilege_escalation_sts(self):
+ def test_iam_policy_not_allows_privilege_escalation(self):
iam_client = client("iam", region_name=AWS_REGION)
policy_name = "policy1"
policy_document = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Action": "sts:*", "Resource": "*"},
+ {"Effect": "Deny", "Action": "sts:*", "Resource": "*"},
+ {"Effect": "Deny", "NotAction": "sts:*", "Resource": "*"},
],
}
policy_arn = iam_client.create_policy(
@@ -75,24 +187,28 @@ def test_iam_policy_allows_privilege_escalation_sts(self):
check = iam_policy_allows_privilege_escalation()
result = check.execute()
assert len(result) == 1
- assert result[0].status == "FAIL"
+ assert result[0].status == "PASS"
assert (
result[0].status_extended
- == f"Custom Policy {policy_arn} allows privilege escalation using the following actions: {{'sts:*'}}"
+ == f"Custom Policy {policy_arn} does not allow privilege escalation"
)
assert result[0].resource_id == policy_name
assert result[0].resource_arn == policy_arn
@mock_iam
- def test_iam_policy_not_allows_privilege_escalation(self):
+ def test_iam_policy_not_allows_privilege_escalation_glue_GetDevEndpoints(self):
iam_client = client("iam", region_name=AWS_REGION)
policy_name = "policy1"
policy_document = {
"Version": "2012-10-17",
"Statement": [
- {"Effect": "Allow", "Action": "sts:*", "Resource": "*"},
- {"Effect": "Deny", "Action": "sts:*", "Resource": "*"},
- {"Effect": "Deny", "NotAction": "sts:*", "Resource": "*"},
+ {"Effect": "Allow", "Action": "lambda:*", "Resource": "*"},
+ {"Effect": "Deny", "Action": "lambda:InvokeFunction", "Resource": "*"},
+ {
+ "Effect": "Deny",
+ "NotAction": "glue:GetDevEndpoints",
+ "Resource": "*",
+ },
],
}
policy_arn = iam_client.create_policy(
@@ -126,19 +242,30 @@ def test_iam_policy_not_allows_privilege_escalation(self):
assert result[0].resource_arn == policy_arn
@mock_iam
- def test_iam_policy_not_allows_privilege_escalation_glue_GetDevEndpoints(self):
+ def test_iam_policy_not_allows_privilege_escalation_dynamodb_PutItem(self):
iam_client = client("iam", region_name=AWS_REGION)
policy_name = "policy1"
policy_document = {
"Version": "2012-10-17",
"Statement": [
- {"Effect": "Allow", "Action": "lambda:*", "Resource": "*"},
- {"Effect": "Deny", "Action": "lambda:InvokeFunction", "Resource": "*"},
+ {
+ "Effect": "Allow",
+ "Action": [
+ "lambda:*",
+ "iam:PassRole",
+ "dynamodb:PutItem",
+ "cloudformation:CreateStack",
+ "cloudformation:DescribeStacks",
+ "ec2:RunInstances",
+ ],
+ "Resource": "*",
+ },
{
"Effect": "Deny",
- "NotAction": "glue:GetDevEndpoints",
+ "Action": ["lambda:InvokeFunction", "cloudformation:CreateStack"],
"Resource": "*",
},
+ {"Effect": "Deny", "NotAction": "dynamodb:PutItem", "Resource": "*"},
],
}
policy_arn = iam_client.create_policy(
@@ -172,7 +299,113 @@ def test_iam_policy_not_allows_privilege_escalation_glue_GetDevEndpoints(self):
assert result[0].resource_arn == policy_arn
@mock_iam
- def test_iam_policy_not_allows_privilege_escalation_dynamodb_PutItem(self):
+ def test_iam_policy_allows_privilege_escalation_iam_all_and_ec2_RunInstances(
+ self,
+ ):
+ iam_client = client("iam", region_name=AWS_REGION)
+ policy_name = "policy1"
+ policy_document = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "iam:*",
+ ],
+ "Resource": "*",
+ },
+ {
+ "Effect": "Allow",
+ "Action": ["ec2:RunInstances"],
+ "Resource": "*",
+ },
+ ],
+ }
+ policy_arn = iam_client.create_policy(
+ PolicyName=policy_name, PolicyDocument=dumps(policy_document)
+ )["Policy"]["Arn"]
+
+ current_audit_info = self.set_mocked_audit_info()
+ from prowler.providers.aws.services.iam.iam_service import IAM
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ), mock.patch(
+ "prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
+ new=IAM(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
+ iam_policy_allows_privilege_escalation,
+ )
+
+ check = iam_policy_allows_privilege_escalation()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "FAIL"
+ assert result[0].resource_id == policy_name
+ assert result[0].resource_arn == policy_arn
+
+ assert search(
+ f"Custom Policy {policy_arn} allows privilege escalation using the following actions: ",
+ result[0].status_extended,
+ )
+ assert search("iam:PassRole", result[0].status_extended)
+ assert search("ec2:RunInstances", result[0].status_extended)
+
+ @mock_iam
+ def test_iam_policy_allows_privilege_escalation_iam_PassRole(
+ self,
+ ):
+ iam_client = client("iam", region_name=AWS_REGION)
+ policy_name = "policy1"
+ policy_document = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "iam:PassRole",
+ "Resource": f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:role/ecs",
+ }
+ ],
+ }
+ policy_arn = iam_client.create_policy(
+ PolicyName=policy_name, PolicyDocument=dumps(policy_document)
+ )["Policy"]["Arn"]
+
+ current_audit_info = self.set_mocked_audit_info()
+ from prowler.providers.aws.services.iam.iam_service import IAM
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ), mock.patch(
+ "prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
+ new=IAM(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
+ iam_policy_allows_privilege_escalation,
+ )
+
+ check = iam_policy_allows_privilege_escalation()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "FAIL"
+ assert result[0].resource_id == policy_name
+ assert result[0].resource_arn == policy_arn
+
+ assert search(
+ f"Custom Policy {policy_arn} allows privilege escalation using the following actions: ",
+ result[0].status_extended,
+ )
+ assert search("iam:PassRole", result[0].status_extended)
+
+ @mock_iam
+ def test_iam_policy_allows_privilege_escalation_two_combinations(
+ self,
+ ):
iam_client = client("iam", region_name=AWS_REGION)
policy_name = "policy1"
policy_document = {
@@ -181,21 +414,27 @@ def test_iam_policy_not_allows_privilege_escalation_dynamodb_PutItem(self):
{
"Effect": "Allow",
"Action": [
- "lambda:*",
"iam:PassRole",
- "dynamodb:PutItem",
- "cloudformation:CreateStack",
- "cloudformation:DescribeStacks",
- "ec2:RunInstances",
],
"Resource": "*",
},
{
- "Effect": "Deny",
- "Action": ["lambda:InvokeFunction", "cloudformation:CreateStack"],
+ "Effect": "Allow",
+ "Action": ["ec2:RunInstances"],
+ "Resource": "*",
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "lambda:CreateFunction",
+ ],
+ "Resource": "*",
+ },
+ {
+ "Effect": "Allow",
+ "Action": ["lambda:InvokeFunction"],
"Resource": "*",
},
- {"Effect": "Deny", "NotAction": "dynamodb:PutItem", "Resource": "*"},
],
}
policy_arn = iam_client.create_policy(
@@ -221,9 +460,311 @@ def test_iam_policy_not_allows_privilege_escalation_dynamodb_PutItem(self):
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
- assert (
- result[0].status_extended
- == f"Custom Policy {policy_arn} allows privilege escalation using the following actions: {{'dynamodb:PutItem'}}"
+ assert result[0].resource_id == policy_name
+ assert result[0].resource_arn == policy_arn
+
+ assert search(
+ f"Custom Policy {policy_arn} allows privilege escalation using the following actions: ",
+ result[0].status_extended,
)
+ assert search("iam:PassRole", result[0].status_extended)
+ assert search("lambda:InvokeFunction", result[0].status_extended)
+ assert search("lambda:CreateFunction", result[0].status_extended)
+ assert search("ec2:RunInstances", result[0].status_extended)
+
+ @mock_iam
+ def test_iam_policy_allows_privilege_escalation_iam_PassRole_and_other_actions(
+ self,
+ ):
+ iam_client = client("iam", region_name=AWS_REGION)
+ policy_name = "policy1"
+ policy_document = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "iam:PassRole",
+ "Resource": f"arn:aws:iam::{AWS_ACCOUNT_NUMBER}:role/ecs",
+ },
+ {
+ "Action": "account:GetAccountInformation",
+ "Effect": "Allow",
+ "Resource": "*",
+ },
+ ],
+ }
+ policy_arn = iam_client.create_policy(
+ PolicyName=policy_name, PolicyDocument=dumps(policy_document)
+ )["Policy"]["Arn"]
+
+ current_audit_info = self.set_mocked_audit_info()
+ from prowler.providers.aws.services.iam.iam_service import IAM
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ), mock.patch(
+ "prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
+ new=IAM(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
+ iam_policy_allows_privilege_escalation,
+ )
+
+ check = iam_policy_allows_privilege_escalation()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "FAIL"
assert result[0].resource_id == policy_name
assert result[0].resource_arn == policy_arn
+
+ assert search(
+ f"Custom Policy {policy_arn} allows privilege escalation using the following actions: ",
+ result[0].status_extended,
+ )
+ assert search("iam:PassRole", result[0].status_extended)
+
+ @mock_iam
+ def test_iam_policy_allows_privilege_escalation_policies_combination(
+ self,
+ ):
+ current_audit_info = self.set_mocked_audit_info()
+ iam_client = client("iam", region_name=AWS_REGION)
+ policy_name = "privileged_policy"
+ for values in privilege_escalation_policies_combination.values():
+ print(list(values))
+ # We create a new statement in each loop with the combinations required to allow the privilege escalation
+ policy_document = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": list(values),
+ "Resource": "*",
+ },
+ ],
+ }
+ policy_arn = iam_client.create_policy(
+ PolicyName=policy_name, PolicyDocument=dumps(policy_document)
+ )["Policy"]["Arn"]
+
+ from prowler.providers.aws.services.iam.iam_service import IAM
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ), mock.patch(
+ "prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
+ new=IAM(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
+ iam_policy_allows_privilege_escalation,
+ )
+
+ check = iam_policy_allows_privilege_escalation()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "FAIL"
+ assert result[0].resource_id == policy_name
+ assert result[0].resource_arn == policy_arn
+
+ assert search(
+ f"Custom Policy {policy_arn} allows privilege escalation using the following actions: ",
+ result[0].status_extended,
+ )
+
+ # Check the actions that allow for privilege escalation
+ for action in values:
+ assert search(action, result[0].status_extended)
+
+ # Delete each IAM policy after the test
+ iam_client.delete_policy(PolicyArn=policy_arn)
+
+ @mock_iam
+ def test_iam_policy_allows_privilege_escalation_two_policies_one_good_one_bad(
+ self,
+ ):
+ current_audit_info = self.set_mocked_audit_info()
+ iam_client = client("iam", region_name=AWS_REGION)
+ policy_name_1 = "privileged_policy_1"
+ policy_document_1 = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": ["ec2:RunInstances"],
+ "Resource": "*",
+ },
+ ],
+ }
+ policy_name_2 = "privileged_policy_2"
+ policy_document_2 = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "iam:PassRole",
+ ],
+ "Resource": "*",
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "lambda:CreateFunction",
+ ],
+ "Resource": "*",
+ },
+ {
+ "Effect": "Allow",
+ "Action": ["lambda:InvokeFunction"],
+ "Resource": "*",
+ },
+ ],
+ }
+ policy_arn_1 = iam_client.create_policy(
+ PolicyName=policy_name_1, PolicyDocument=dumps(policy_document_1)
+ )["Policy"]["Arn"]
+
+ policy_arn_2 = iam_client.create_policy(
+ PolicyName=policy_name_2, PolicyDocument=dumps(policy_document_2)
+ )["Policy"]["Arn"]
+
+ from prowler.providers.aws.services.iam.iam_service import IAM
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ), mock.patch(
+ "prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
+ new=IAM(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
+ iam_policy_allows_privilege_escalation,
+ )
+
+ check = iam_policy_allows_privilege_escalation()
+ result = check.execute()
+ assert len(result) == 2
+ for finding in result:
+ if finding.resource_id == policy_name_1:
+ assert finding.status == "PASS"
+ assert finding.resource_arn == policy_arn_1
+ assert (
+ finding.status_extended
+ == f"Custom Policy {policy_arn_1} does not allow privilege escalation"
+ )
+
+ if finding.resource_id == policy_name_2:
+ assert finding.status == "FAIL"
+ assert finding.resource_arn == policy_arn_2
+
+ assert search(
+ f"Custom Policy {policy_arn_2} allows privilege escalation using the following actions: ",
+ finding.status_extended,
+ )
+ assert search("iam:PassRole", finding.status_extended)
+ assert search("lambda:InvokeFunction", finding.status_extended)
+ assert search("lambda:CreateFunction", finding.status_extended)
+
+ @mock_iam
+ def test_iam_policy_allows_privilege_escalation_two_bad_policies(
+ self,
+ ):
+ current_audit_info = self.set_mocked_audit_info()
+ iam_client = client("iam", region_name=AWS_REGION)
+ policy_name_1 = "privileged_policy_1"
+ policy_document_1 = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "iam:PassRole",
+ ],
+ "Resource": "*",
+ },
+ {
+ "Effect": "Allow",
+ "Action": ["ec2:RunInstances"],
+ "Resource": "*",
+ },
+ ],
+ }
+ policy_name_2 = "privileged_policy_2"
+ policy_document_2 = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "iam:PassRole",
+ ],
+ "Resource": "*",
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "lambda:CreateFunction",
+ ],
+ "Resource": "*",
+ },
+ {
+ "Effect": "Allow",
+ "Action": ["lambda:InvokeFunction"],
+ "Resource": "*",
+ },
+ ],
+ }
+ policy_arn_1 = iam_client.create_policy(
+ PolicyName=policy_name_1, PolicyDocument=dumps(policy_document_1)
+ )["Policy"]["Arn"]
+
+ policy_arn_2 = iam_client.create_policy(
+ PolicyName=policy_name_2, PolicyDocument=dumps(policy_document_2)
+ )["Policy"]["Arn"]
+
+ from prowler.providers.aws.services.iam.iam_service import IAM
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ), mock.patch(
+ "prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
+ new=IAM(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
+ iam_policy_allows_privilege_escalation,
+ )
+
+ check = iam_policy_allows_privilege_escalation()
+ result = check.execute()
+ assert len(result) == 2
+ for finding in result:
+ if finding.resource_id == policy_name_1:
+ assert finding.status == "FAIL"
+ assert finding.resource_arn == policy_arn_1
+
+ assert search(
+ f"Custom Policy {policy_arn_1} allows privilege escalation using the following actions: ",
+ finding.status_extended,
+ )
+
+ assert search("iam:PassRole", finding.status_extended)
+ assert search("ec2:RunInstances", finding.status_extended)
+
+ if finding.resource_id == policy_name_2:
+ assert finding.status == "FAIL"
+ assert finding.resource_arn == policy_arn_2
+
+ assert search(
+ f"Custom Policy {policy_arn_2} allows privilege escalation using the following actions: ",
+ finding.status_extended,
+ )
+ assert search("iam:PassRole", finding.status_extended)
+ assert search("lambda:InvokeFunction", finding.status_extended)
+ assert search("lambda:CreateFunction", finding.status_extended)
| AWS iam_policy_allows_privilege_escalation check are not accurate
### New feature motivation
Hi,
I got that the iam_policy_allows_privilege_escalation check is not accurate.
for example:
glue:GetDevEndpoints if this action is alone without IAM: PassRole privilege escalation can't be performed.
WDYT?
_Another issue from 2023/06/26_
Hi
Prowler alert for only glue:GetDevEndpoint action that allows performing PE.
I think need to check the combination of glue:UpdateDevEndpoint together glue:GetDevEndpoint.
today prowler alert wrong PE alert for only GetDevEndpoint action
### Solution Proposed
Identify a combination of policies that allows PE.
### Describe alternatives you've considered
NA
### Additional context
_No response_
| Hi @roisec, you're right, some actions doesn't allow for privilege escalation by itself without some others. This check was built from https://bishopfox.com/blog/privilege-escalation-in-aws so maybe we can create some tuples within the policies list to map the right actions that needs to be together in order to allow for privilege escalation.
We'll give this a thought.
Thanks for using Prowler!
A better solution, but it would require significantly more effort, would be to implment the graph-based approach pmapper uses
This is our idea, to implement it with a matrix because you need the combination of several permissions to allow the privilege escalation.
I assume that there is no ETA for a fix on this?
Hi @ernievd, we are working on it but we don't have an ETA. We will update this issue as soon as we have the fix. | 2023-08-01T16:01:37 |
prowler-cloud/prowler | 2,709 | prowler-cloud__prowler-2709 | [
"2708"
] | 474e39a4c9d1338e797b67a45240182ff6035eae | diff --git a/prowler/__main__.py b/prowler/__main__.py
--- a/prowler/__main__.py
+++ b/prowler/__main__.py
@@ -220,7 +220,9 @@ def prowler():
# Resolve previous fails of Security Hub
if provider == "aws" and args.security_hub and not args.skip_sh_update:
resolve_security_hub_previous_findings(
- args.output_directory, args.output_filename, audit_info
+ audit_output_options.output_directory,
+ audit_output_options.output_filename,
+ audit_info,
)
# Display summary table
| FileNotFoundError after version 3.8.1
### Discussed in https://github.com/prowler-cloud/prowler/discussions/2707
<div type='discussions-op-text'>
<sup>Originally posted by **cerontrustly** August 10, 2023</sup>
Hello guys!
After using version 3.8.1 Prowler stopped working for me showing the following error:
`Traceback (most recent call last):
File "/home/prowler/.local/bin/prowler", line 8, in <module>
sys.exit(prowler())
File "/home/prowler/.local/lib/python3.9/site-packages/prowler/__main__.py", line 222, in prowler
resolve_security_hub_previous_findings(
File "/home/prowler/.local/lib/python3.9/site-packages/prowler/providers/aws/lib/security_hub/security_hub.py", line 66, in resolve_security_hub_previous_findings
with open(f"{output_directory}/{output_filename}{json_asff_file_suffix}") as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/prowler/output/None.asff.json'`
My command line looks like this:
`docker run -v /tmp:/prowler/tmp toniblyx/prowler:stable -f us-west-2 -M json-asff -S -z`
I think it can be related to [this](https://github.com/prowler-cloud/prowler/pull/2687) change.
Can somebody you help me?
Thank you!</div>
| 2023-08-10T20:44:45 |
||
prowler-cloud/prowler | 2,726 | prowler-cloud__prowler-2726 | [
"2723"
] | 7ffb12268d99f9a7b7e1a5599412330ae9430509 | diff --git a/prowler/providers/aws/lib/policy_condition_parser/policy_condition_parser.py b/prowler/providers/aws/lib/policy_condition_parser/policy_condition_parser.py
--- a/prowler/providers/aws/lib/policy_condition_parser/policy_condition_parser.py
+++ b/prowler/providers/aws/lib/policy_condition_parser/policy_condition_parser.py
@@ -1,31 +1,52 @@
-# lista de cuentas y te devuelva las válidas
def is_account_only_allowed_in_condition(
condition_statement: dict, source_account: str
):
+ """
+ is_account_only_allowed_in_condition parses the IAM Condition policy block and returns True if the source_account passed as argument is within, False if not.
+
+ @param condition_statement: dict with an IAM Condition block, e.g.:
+ {
+ "StringLike": {
+ "AWS:SourceAccount": 111122223333
+ }
+ }
+
+ @param source_account: str with a 12-digit AWS Account number, e.g.: 111122223333
+ """
is_condition_valid = False
+
+ # The conditions must be defined in lowercase since the context key names are not case-sensitive.
+ # For example, including the aws:SourceAccount context key is equivalent to testing for AWS:SourceAccount
+ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition.html
valid_condition_options = {
"StringEquals": [
- "aws:SourceAccount",
- "aws:SourceOwner",
- "s3:ResourceAccount",
- "aws:PrincipalAccount",
- "aws:ResourceAccount",
+ "aws:sourceaccount",
+ "aws:sourceowner",
+ "s3:resourceaccount",
+ "aws:principalaccount",
+ "aws:resourceaccount",
],
"StringLike": [
- "aws:SourceAccount",
- "aws:SourceOwner",
- "aws:SourceArn",
- "aws:PrincipalArn",
- "aws:ResourceAccount",
- "aws:PrincipalAccount",
+ "aws:sourceaccount",
+ "aws:sourceowner",
+ "aws:sourcearn",
+ "aws:principalarn",
+ "aws:resourceaccount",
+ "aws:principalaccount",
],
- "ArnLike": ["aws:SourceArn", "aws:PrincipalArn"],
- "ArnEquals": ["aws:SourceArn", "aws:PrincipalArn"],
+ "ArnLike": ["aws:sourcearn", "aws:principalarn"],
+ "ArnEquals": ["aws:sourcearn", "aws:principalarn"],
}
for condition_operator, condition_operator_key in valid_condition_options.items():
if condition_operator in condition_statement:
for value in condition_operator_key:
+ # We need to transform the condition_statement into lowercase
+ condition_statement[condition_operator] = {
+ k.lower(): v
+ for k, v in condition_statement[condition_operator].items()
+ }
+
if value in condition_statement[condition_operator]:
# values are a list
if isinstance(
| diff --git a/tests/providers/aws/lib/policy_condition_parser/policy_condition_parser_test.py b/tests/providers/aws/lib/policy_condition_parser/policy_condition_parser_test.py
--- a/tests/providers/aws/lib/policy_condition_parser/policy_condition_parser_test.py
+++ b/tests/providers/aws/lib/policy_condition_parser/policy_condition_parser_test.py
@@ -7,6 +7,7 @@
class Test_policy_condition_parser:
+ # Test lowercase context key name --> aws
def test_condition_parser_string_equals_aws_SourceAccount_list(self):
condition_statement = {
"StringEquals": {"aws:SourceAccount": [TRUSTED_AWS_ACCOUNT_NUMBER]}
@@ -633,3 +634,631 @@ def test_condition_parser_string_like_aws_ResourceAccount_str_not_valid(self):
assert not is_account_only_allowed_in_condition(
condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
)
+
+ # Test uppercase context key name --> AWS
+ def test_condition_parser_string_equals_AWS_SourceAccount_list(self):
+ condition_statement = {
+ "StringEquals": {"AWS:SourceAccount": [TRUSTED_AWS_ACCOUNT_NUMBER]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_SourceAccount_list_not_valid(self):
+ condition_statement = {
+ "StringEquals": {
+ "AWS:SourceAccount": [
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ }
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_SourceAccount_str(self):
+ condition_statement = {
+ "StringEquals": {"AWS:SourceAccount": TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_SourceAccount_str_not_valid(self):
+ condition_statement = {
+ "StringEquals": {"AWS:SourceAccount": NON_TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceAccount_list(self):
+ condition_statement = {
+ "StringLike": {"AWS:SourceAccount": [TRUSTED_AWS_ACCOUNT_NUMBER]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceAccount_list_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:SourceAccount": [
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ }
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceAccount_str(self):
+ condition_statement = {
+ "StringLike": {"AWS:SourceAccount": TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceAccount_str_not_valid(self):
+ condition_statement = {
+ "StringLike": {"AWS:SourceAccount": NON_TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_SourceOwner_str(self):
+ condition_statement = {
+ "StringEquals": {"AWS:SourceOwner": TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_SourceOwner_str_not_valid(self):
+ condition_statement = {
+ "StringEquals": {"AWS:SourceOwner": NON_TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_SourceOwner_list(self):
+ condition_statement = {
+ "StringEquals": {"AWS:SourceOwner": [TRUSTED_AWS_ACCOUNT_NUMBER]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_SourceOwner_list_not_valid(self):
+ condition_statement = {
+ "StringEquals": {
+ "AWS:SourceOwner": [
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ }
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceOwner_list(self):
+ condition_statement = {
+ "StringLike": {"AWS:SourceOwner": [TRUSTED_AWS_ACCOUNT_NUMBER]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceOwner_list_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:SourceOwner": [
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ }
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceOwner_str(self):
+ condition_statement = {
+ "StringLike": {"AWS:SourceOwner": TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceOwner_str_not_valid(self):
+ condition_statement = {
+ "StringLike": {"AWS:SourceOwner": NON_TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_S3_ResourceAccount_list(self):
+ condition_statement = {
+ "StringEquals": {"S3:ResourceAccount": [TRUSTED_AWS_ACCOUNT_NUMBER]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_S3_ResourceAccount_list_not_valid(self):
+ condition_statement = {
+ "StringEquals": {
+ "S3:ResourceAccount": [
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ }
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_S3_ResourceAccount_str(self):
+ condition_statement = {
+ "StringEquals": {"S3:ResourceAccount": TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_S3_ResourceAccount_str_not_valid(self):
+ condition_statement = {
+ "StringEquals": {"S3:ResourceAccount": NON_TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_PrincipalAccount_list(self):
+ condition_statement = {
+ "StringEquals": {"AWS:PrincipalAccount": [TRUSTED_AWS_ACCOUNT_NUMBER]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_PrincipalAccount_list_not_valid(self):
+ condition_statement = {
+ "StringEquals": {
+ "AWS:PrincipalAccount": [
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ }
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_PrincipalAccount_str(self):
+ condition_statement = {
+ "StringEquals": {"AWS:PrincipalAccount": TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_PrincipalAccount_str_not_valid(self):
+ condition_statement = {
+ "StringEquals": {"AWS:PrincipalAccount": NON_TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_PrincipalAccount_list(self):
+ condition_statement = {
+ "StringLike": {"AWS:PrincipalAccount": [TRUSTED_AWS_ACCOUNT_NUMBER]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_PrincipalAccount_list_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:PrincipalAccount": [
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ }
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_PrincipalAccount_str(self):
+ condition_statement = {
+ "StringLike": {"AWS:PrincipalAccount": TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_PrincipalAccount_str_not_valid(self):
+ condition_statement = {
+ "StringLike": {"AWS:PrincipalAccount": NON_TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_AWS_SourceArn_list(self):
+ condition_statement = {
+ "ArnLike": {
+ "AWS:SourceArn": [
+ f"arn:aws:cloudtrail:*:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*"
+ ]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_AWS_SourceArn_list_not_valid(self):
+ condition_statement = {
+ "ArnLike": {
+ "AWS:SourceArn": [
+ f"arn:aws:cloudtrail:*:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*",
+ f"arn:aws:cloudtrail:*:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_AWS_SourceArn_str(self):
+ condition_statement = {
+ "ArnLike": {
+ "AWS:SourceArn": f"arn:aws:cloudtrail:*:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*"
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_AWS_SourceArn_str_not_valid(self):
+ condition_statement = {
+ "ArnLike": {
+ "AWS:SourceArn": f"arn:aws:cloudtrail:*:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*"
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_AWS_PrincipalArn_list(self):
+ condition_statement = {
+ "ArnLike": {
+ "AWS:PrincipalArn": [
+ f"arn:aws:cloudtrail:*:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*"
+ ]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_AWS_PrincipalArn_list_not_valid(self):
+ condition_statement = {
+ "ArnLike": {
+ "AWS:PrincipalArn": [
+ f"arn:aws:cloudtrail:*:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*",
+ f"arn:aws:cloudtrail:*:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_AWS_PrincipalArn_str(self):
+ condition_statement = {
+ "ArnLike": {
+ "AWS:PrincipalArn": f"arn:aws:cloudtrail:*:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*"
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_like_AWS_PrincipalArn_str_not_valid(self):
+ condition_statement = {
+ "ArnLike": {
+ "AWS:PrincipalArn": f"arn:aws:cloudtrail:*:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/*"
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_AWS_SourceArn_list(self):
+ condition_statement = {
+ "ArnEquals": {
+ "AWS:SourceArn": [
+ f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ ]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_AWS_SourceArn_list_not_valid(self):
+ condition_statement = {
+ "ArnEquals": {
+ "AWS:SourceArn": [
+ f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test",
+ f"arn:aws:cloudtrail:eu-west-1:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_AWS_SourceArn_str(self):
+ condition_statement = {
+ "ArnEquals": {
+ "AWS:SourceArn": f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_AWS_SourceArn_str_not_valid(self):
+ condition_statement = {
+ "ArnEquals": {
+ "AWS:SourceArn": f"arn:aws:cloudtrail:eu-west-1:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_AWS_PrincipalArn_list(self):
+ condition_statement = {
+ "ArnEquals": {
+ "AWS:PrincipalArn": [
+ f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ ]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_AWS_PrincipalArn_list_not_valid(self):
+ condition_statement = {
+ "ArnEquals": {
+ "AWS:PrincipalArn": [
+ f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test",
+ f"arn:aws:cloudtrail:eu-west-1:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_AWS_PrincipalArn_str(self):
+ condition_statement = {
+ "ArnEquals": {
+ "AWS:PrincipalArn": f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_arn_equals_AWS_PrincipalArn_str_not_valid(self):
+ condition_statement = {
+ "ArnEquals": {
+ "AWS:PrincipalArn": f"arn:aws:cloudtrail:eu-west-1:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceArn_list(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:SourceArn": [
+ f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ ]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceArn_list_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:SourceArn": [
+ f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test",
+ f"arn:aws:cloudtrail:eu-west-1:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceArn_str(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:SourceArn": f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_SourceArn_str_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:SourceArn": f"arn:aws:cloudtrail:eu-west-1:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_PrincipalArn_list(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:PrincipalArn": [
+ f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ ]
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_PrincipalArn_list_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:PrincipalArn": [
+ f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test",
+ f"arn:aws:cloudtrail:eu-west-1:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test",
+ ]
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_PrincipalArn_str(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:PrincipalArn": f"arn:aws:cloudtrail:eu-west-1:{TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ }
+ }
+
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_PrincipalArn_str_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:PrincipalArn": f"arn:aws:cloudtrail:eu-west-1:{NON_TRUSTED_AWS_ACCOUNT_NUMBER}:trail/test"
+ }
+ }
+
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_ResourceAccount_list(self):
+ condition_statement = {
+ "StringEquals": {"AWS:ResourceAccount": [TRUSTED_AWS_ACCOUNT_NUMBER]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_ResourceAccount_list_not_valid(self):
+ condition_statement = {
+ "StringEquals": {
+ "AWS:ResourceAccount": [
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ }
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_ResourceAccount_str(self):
+ condition_statement = {
+ "StringEquals": {"AWS:ResourceAccount": TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_equals_AWS_ResourceAccount_str_not_valid(self):
+ condition_statement = {
+ "StringEquals": {"AWS:ResourceAccount": NON_TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_ResourceAccount_list(self):
+ condition_statement = {
+ "StringLike": {"AWS:ResourceAccount": [TRUSTED_AWS_ACCOUNT_NUMBER]}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_ResourceAccount_list_not_valid(self):
+ condition_statement = {
+ "StringLike": {
+ "AWS:ResourceAccount": [
+ TRUSTED_AWS_ACCOUNT_NUMBER,
+ NON_TRUSTED_AWS_ACCOUNT_NUMBER,
+ ]
+ }
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_ResourceAccount_str(self):
+ condition_statement = {
+ "StringLike": {"AWS:ResourceAccount": TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
+
+ def test_condition_parser_string_like_AWS_ResourceAccount_str_not_valid(self):
+ condition_statement = {
+ "StringLike": {"AWS:ResourceAccount": NON_TRUSTED_AWS_ACCOUNT_NUMBER}
+ }
+ assert not is_account_only_allowed_in_condition(
+ condition_statement, TRUSTED_AWS_ACCOUNT_NUMBER
+ )
| [Bug]: Context name on conditions are case-insensitive
### Steps to Reproduce
As yo can see here https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition.html, context name on conditions are case-insensitive. Some default aws policies are "AWS:xxxx" instead of "aws:xxxx", so prowler fails to detect it. Also can happen with manual policies, if someone put "aws:sourceaccount" inseat of "aws:SourceAccount". So is_account_only_allowed_in_condition must be case-insensitive to work.
I found it on SNS default policy, which look like this:
```
{
"Version": "2008-10-17",
"Id": "__default_policy_ID",
"Statement": [
{
"Sid": "__default_statement_ID",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": [
"SNS:GetTopicAttributes",
"SNS:SetTopicAttributes",
"SNS:AddPermission",
"SNS:RemovePermission",
"SNS:DeleteTopic",
"SNS:Subscribe",
"SNS:ListSubscriptionsByTopic",
"SNS:Publish",
"SNS:Receive"
],
"Resource": "arn:aws:sns:eu-west-1:XXXXXXXX:config-topic",
"Condition": {
"StringEquals": {
"AWS:SourceOwner": "XXXXXXXX"
}
}
}
]
}
```
### Expected behavior
It should pass when found a condition like "AWS:SourceOwner" with a valid value.
### Actual Result with Screenshots or Logs
It can't detect correctly the condition, so the test fails.
╭────────────┬───────────┬───────────┬────────────┬────────┬──────────┬───────╮
│ Provider │ Service │ Status │ Critical │ High │ Medium │ Low │
├────────────┼───────────┼───────────┼────────────┼────────┼──────────┼───────┤
│ aws │ sns │ FAIL (13) │ 0 │ 13 │ 0 │ 0 │
╰────────────┴───────────┴───────────┴────────────┴────────┴──────────┴───────╯
### How did you install Prowler?
Cloning the repository from github.com (git clone)
### Environment Resource
8. SNS
### OS used
5. Ubuntu
### Prowler version
Prowler 3.8.2
### Pip version
pip 23.2.1
### Context
_No response_
| 2023-08-14T10:10:12 |
|
prowler-cloud/prowler | 2,736 | prowler-cloud__prowler-2736 | [
"2535"
] | 8f091e7548e98e292b98730ced785537c22c3261 | diff --git a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py
--- a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py
+++ b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py
@@ -5,6 +5,12 @@
class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):
def execute(self):
findings = []
+
+ # Get minimun severity to report
+ minimum_severity = ecr_client.audit_config.get(
+ "ecr_repository_vulnerability_minimum_severity", "MEDIUM"
+ )
+
for registry in ecr_client.registries.values():
for repository in registry.repositories:
# First check if the repository has images
@@ -27,8 +33,23 @@ def execute(self):
report.status_extended = (
f"ECR repository {repository.name} with scan status FAILED."
)
- elif image.scan_findings_status != "FAILED":
- if image.scan_findings_severity_count and (
+ elif (
+ image.scan_findings_status != "FAILED"
+ and image.scan_findings_severity_count
+ ):
+ if (
+ minimum_severity == "CRITICAL"
+ and image.scan_findings_severity_count.critical
+ ):
+ report.status = "FAIL"
+ report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}."
+ elif minimum_severity == "HIGH" and (
+ image.scan_findings_severity_count.critical
+ or image.scan_findings_severity_count.high
+ ):
+ report.status = "FAIL"
+ report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}."
+ elif minimum_severity == "MEDIUM" and (
image.scan_findings_severity_count.critical
or image.scan_findings_severity_count.high
or image.scan_findings_severity_count.medium
| diff --git a/tests/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image_test.py b/tests/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image_test.py
--- a/tests/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image_test.py
+++ b/tests/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image_test.py
@@ -1,5 +1,4 @@
from datetime import datetime
-from re import search
from unittest import mock
from prowler.providers.aws.services.ecr.ecr_service import (
@@ -16,6 +15,7 @@
repository_arn = (
f"arn:aws:ecr:eu-west-1:{AWS_ACCOUNT_NUMBER}:repository/{repository_name}"
)
+latest_tag = "test-tag"
repo_policy_public = {
"Version": "2012-10-17",
"Statement": [
@@ -118,7 +118,7 @@ def test_image_scaned_without_findings(self):
policy=repo_policy_public,
images_details=[
ImageDetails(
- latest_tag="test-tag",
+ latest_tag=latest_tag,
latest_digest="test-digest",
image_pushed_at=datetime(2023, 1, 1),
scan_findings_status="COMPLETE",
@@ -145,11 +145,69 @@ def test_image_scaned_without_findings(self):
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
- assert search("scanned without findings", result[0].status_extended)
+ assert (
+ result[0].status_extended
+ == f"ECR repository {repository_name} has imageTag {latest_tag} scanned without findings."
+ )
+ assert result[0].resource_id == repository_name
+ assert result[0].resource_arn == repository_arn
+
+ def test_image_scanned_with_findings_default_severity_MEDIUM(self):
+ ecr_client = mock.MagicMock
+ ecr_client.registries = {}
+ ecr_client.registries[AWS_REGION] = Registry(
+ id=AWS_ACCOUNT_NUMBER,
+ region=AWS_REGION,
+ scan_type="BASIC",
+ repositories=[
+ Repository(
+ name=repository_name,
+ arn=repository_arn,
+ region=AWS_REGION,
+ scan_on_push=True,
+ policy=repo_policy_public,
+ images_details=[
+ ImageDetails(
+ latest_tag=latest_tag,
+ latest_digest="test-digest",
+ image_pushed_at=datetime(2023, 1, 1),
+ scan_findings_status="COMPLETE",
+ scan_findings_severity_count=FindingSeverityCounts(
+ critical=12, high=34, medium=7
+ ),
+ )
+ ],
+ lifecycle_policy=None,
+ )
+ ],
+ rules=[],
+ )
+
+ # Set audit_config
+ ecr_client.audit_config = {
+ "ecr_repository_vulnerability_minimum_severity": "MEDIUM"
+ }
+
+ with mock.patch(
+ "prowler.providers.aws.services.ecr.ecr_service.ECR",
+ ecr_client,
+ ):
+ from prowler.providers.aws.services.ecr.ecr_repositories_scan_vulnerabilities_in_latest_image.ecr_repositories_scan_vulnerabilities_in_latest_image import (
+ ecr_repositories_scan_vulnerabilities_in_latest_image,
+ )
+
+ check = ecr_repositories_scan_vulnerabilities_in_latest_image()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "FAIL"
+ assert (
+ result[0].status_extended
+ == f"ECR repository {repository_name} has imageTag {latest_tag} scanned with findings: CRITICAL->{12}, HIGH->{34}, MEDIUM->{7}."
+ )
assert result[0].resource_id == repository_name
assert result[0].resource_arn == repository_arn
- def test_image_scanned_with_findings(self):
+ def test_image_scanned_with_findings_default_severity_HIGH(self):
ecr_client = mock.MagicMock
ecr_client.registries = {}
ecr_client.registries[AWS_REGION] = Registry(
@@ -165,7 +223,7 @@ def test_image_scanned_with_findings(self):
policy=repo_policy_public,
images_details=[
ImageDetails(
- latest_tag="test-tag",
+ latest_tag=latest_tag,
latest_digest="test-digest",
image_pushed_at=datetime(2023, 1, 1),
scan_findings_status="COMPLETE",
@@ -180,6 +238,66 @@ def test_image_scanned_with_findings(self):
rules=[],
)
+ # Set audit_config
+ ecr_client.audit_config = {
+ "ecr_repository_vulnerability_minimum_severity": "HIGH"
+ }
+
+ with mock.patch(
+ "prowler.providers.aws.services.ecr.ecr_service.ECR",
+ ecr_client,
+ ):
+ from prowler.providers.aws.services.ecr.ecr_repositories_scan_vulnerabilities_in_latest_image.ecr_repositories_scan_vulnerabilities_in_latest_image import (
+ ecr_repositories_scan_vulnerabilities_in_latest_image,
+ )
+
+ check = ecr_repositories_scan_vulnerabilities_in_latest_image()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "FAIL"
+ assert (
+ result[0].status_extended
+ == f"ECR repository {repository_name} has imageTag {latest_tag} scanned with findings: CRITICAL->{12}, HIGH->{34}."
+ )
+ assert result[0].resource_id == repository_name
+ assert result[0].resource_arn == repository_arn
+
+ def test_image_scanned_with_findings_default_severity_CRITICAL(self):
+ ecr_client = mock.MagicMock
+ ecr_client.registries = {}
+ ecr_client.registries[AWS_REGION] = Registry(
+ id=AWS_ACCOUNT_NUMBER,
+ region=AWS_REGION,
+ scan_type="BASIC",
+ repositories=[
+ Repository(
+ name=repository_name,
+ arn=repository_arn,
+ region=AWS_REGION,
+ scan_on_push=True,
+ policy=repo_policy_public,
+ images_details=[
+ ImageDetails(
+ latest_tag=latest_tag,
+ latest_digest="test-digest",
+ image_pushed_at=datetime(2023, 1, 1),
+ scan_findings_status="COMPLETE",
+ scan_findings_severity_count=FindingSeverityCounts(
+ critical=12, high=34, medium=7
+ ),
+ )
+ ],
+ lifecycle_policy=None,
+ )
+ ],
+ rules=[],
+ )
+
+ # Set audit_config
+ ecr_client.audit_config = {
+ "ecr_repository_vulnerability_minimum_severity": "CRITICAL"
+ }
+
with mock.patch(
"prowler.providers.aws.services.ecr.ecr_service.ECR",
ecr_client,
@@ -192,7 +310,122 @@ def test_image_scanned_with_findings(self):
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
- assert search("scanned with findings:", result[0].status_extended)
+ assert (
+ result[0].status_extended
+ == f"ECR repository {repository_name} has imageTag {latest_tag} scanned with findings: CRITICAL->{12}."
+ )
+ assert result[0].resource_id == repository_name
+ assert result[0].resource_arn == repository_arn
+
+ def test_image_scanned_without_CRITICAL_findings_default_severity_CRITICAL(self):
+ ecr_client = mock.MagicMock
+ ecr_client.registries = {}
+ ecr_client.registries[AWS_REGION] = Registry(
+ id=AWS_ACCOUNT_NUMBER,
+ region=AWS_REGION,
+ scan_type="BASIC",
+ repositories=[
+ Repository(
+ name=repository_name,
+ arn=repository_arn,
+ region=AWS_REGION,
+ scan_on_push=True,
+ policy=repo_policy_public,
+ images_details=[
+ ImageDetails(
+ latest_tag=latest_tag,
+ latest_digest="test-digest",
+ image_pushed_at=datetime(2023, 1, 1),
+ scan_findings_status="COMPLETE",
+ scan_findings_severity_count=FindingSeverityCounts(
+ critical=0, high=34, medium=7
+ ),
+ )
+ ],
+ lifecycle_policy=None,
+ )
+ ],
+ rules=[],
+ )
+
+ # Set audit_config
+ ecr_client.audit_config = {
+ "ecr_repository_vulnerability_minimum_severity": "CRITICAL"
+ }
+
+ with mock.patch(
+ "prowler.providers.aws.services.ecr.ecr_service.ECR",
+ ecr_client,
+ ):
+ from prowler.providers.aws.services.ecr.ecr_repositories_scan_vulnerabilities_in_latest_image.ecr_repositories_scan_vulnerabilities_in_latest_image import (
+ ecr_repositories_scan_vulnerabilities_in_latest_image,
+ )
+
+ check = ecr_repositories_scan_vulnerabilities_in_latest_image()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"ECR repository {repository_name} has imageTag {latest_tag} scanned without findings."
+ )
+ assert result[0].resource_id == repository_name
+ assert result[0].resource_arn == repository_arn
+
+ def test_image_scanned_without_CRITICAL_and_HIGH_findings_default_severity_HIGH(
+ self,
+ ):
+ ecr_client = mock.MagicMock
+ ecr_client.registries = {}
+ ecr_client.registries[AWS_REGION] = Registry(
+ id=AWS_ACCOUNT_NUMBER,
+ region=AWS_REGION,
+ scan_type="BASIC",
+ repositories=[
+ Repository(
+ name=repository_name,
+ arn=repository_arn,
+ region=AWS_REGION,
+ scan_on_push=True,
+ policy=repo_policy_public,
+ images_details=[
+ ImageDetails(
+ latest_tag=latest_tag,
+ latest_digest="test-digest",
+ image_pushed_at=datetime(2023, 1, 1),
+ scan_findings_status="COMPLETE",
+ scan_findings_severity_count=FindingSeverityCounts(
+ critical=0, high=0, medium=7
+ ),
+ )
+ ],
+ lifecycle_policy=None,
+ )
+ ],
+ rules=[],
+ )
+
+ # Set audit_config
+ ecr_client.audit_config = {
+ "ecr_repository_vulnerability_minimum_severity": "HIGH"
+ }
+
+ with mock.patch(
+ "prowler.providers.aws.services.ecr.ecr_service.ECR",
+ ecr_client,
+ ):
+ from prowler.providers.aws.services.ecr.ecr_repositories_scan_vulnerabilities_in_latest_image.ecr_repositories_scan_vulnerabilities_in_latest_image import (
+ ecr_repositories_scan_vulnerabilities_in_latest_image,
+ )
+
+ check = ecr_repositories_scan_vulnerabilities_in_latest_image()
+ result = check.execute()
+ assert len(result) == 1
+ assert result[0].status == "PASS"
+ assert (
+ result[0].status_extended
+ == f"ECR repository {repository_name} has imageTag {latest_tag} scanned without findings."
+ )
assert result[0].resource_id == repository_name
assert result[0].resource_arn == repository_arn
@@ -212,7 +445,7 @@ def test_image_scanned_fail_scan(self):
policy=repo_policy_public,
images_details=[
ImageDetails(
- latest_tag="test-tag",
+ latest_tag=latest_tag,
latest_digest="test-digest",
image_pushed_at=datetime(2023, 1, 1),
scan_findings_status="FAILED",
@@ -239,7 +472,10 @@ def test_image_scanned_fail_scan(self):
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
- assert search("with scan status FAILED", result[0].status_extended)
+ assert (
+ result[0].status_extended
+ == f"ECR repository {repository_name} with scan status FAILED."
+ )
assert result[0].resource_id == repository_name
assert result[0].resource_arn == repository_arn
@@ -259,7 +495,7 @@ def test_image_not_scanned(self):
policy=repo_policy_public,
images_details=[
ImageDetails(
- latest_tag="test-tag",
+ latest_tag=latest_tag,
latest_digest="test-digest",
image_pushed_at=datetime(2023, 1, 1),
scan_findings_status="",
@@ -286,6 +522,9 @@ def test_image_not_scanned(self):
result = check.execute()
assert len(result) == 1
assert result[0].status == "FAIL"
- assert search("without a scan", result[0].status_extended)
+ assert (
+ result[0].status_extended
+ == f"ECR repository {repository_name} has imageTag {latest_tag} without a scan."
+ )
assert result[0].resource_id == repository_name
assert result[0].resource_arn == repository_arn
| ecr_repositories_scan_vulnerabilities_in_latest_image: Configure level
### New feature motivation
Hi, is it possible to configure the level from which the test shall fail?
AWS tags some findings as medium which I might want to ignore, but of course I don't want to mute critical findings for the image.
### Solution Proposed
none
### Describe alternatives you've considered
none
### Additional context
_No response_
| Hi @HarryBo112, we will add a config variable to set your desire severity of the findings of an ECR image that you do not want to ignore. | 2023-08-16T08:34:53 |
prowler-cloud/prowler | 2,772 | prowler-cloud__prowler-2772 | [
"2770"
] | 957ffaabaefa332220a2fdd39e600172ef39bcbb | diff --git a/prowler/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py b/prowler/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py
--- a/prowler/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py
+++ b/prowler/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation.py
@@ -1,5 +1,3 @@
-from re import search
-
from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.iam.iam_client import iam_client
@@ -168,7 +166,8 @@ def execute(self) -> Check_Report_AWS:
action = api_action[1]
# Add permissions if the API is present
if action == "*":
- if search(api, val):
+ val_api = val.split(":")[0]
+ if api == val_api:
policies_combination.add(val)
# len() == 1, so *
| diff --git a/tests/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation_test.py b/tests/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation_test.py
--- a/tests/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation_test.py
+++ b/tests/providers/aws/services/iam/iam_policy_allows_privilege_escalation/iam_policy_allows_privilege_escalation_test.py
@@ -919,3 +919,61 @@ def test_iam_policy_allows_privilege_escalation_administrator_policy(
permissions
]:
assert search(permission, finding.status_extended)
+
+ @mock_iam
+ def test_iam_policy_not_allows_privilege_escalation_custom_policy(
+ self,
+ ):
+ current_audit_info = self.set_mocked_audit_info()
+ iam_client = client("iam", region_name=AWS_REGION)
+ policy_name_1 = "privileged_policy_1"
+ policy_document_1 = {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Action": ["es:List*", "es:Get*", "es:Describe*"],
+ "Resource": "*",
+ },
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Action": "es:*",
+ "Resource": f"arn:aws:es:{AWS_REGION}:{AWS_ACCOUNT_NUMBER}:domain/test/*",
+ },
+ ],
+ }
+
+ policy_arn_1 = iam_client.create_policy(
+ PolicyName=policy_name_1, PolicyDocument=dumps(policy_document_1)
+ )["Policy"]["Arn"]
+
+ from prowler.providers.aws.services.iam.iam_service import IAM
+
+ with mock.patch(
+ "prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
+ new=current_audit_info,
+ ), mock.patch(
+ "prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation.iam_client",
+ new=IAM(current_audit_info),
+ ):
+ # Test Check
+ from prowler.providers.aws.services.iam.iam_policy_allows_privilege_escalation.iam_policy_allows_privilege_escalation import (
+ iam_policy_allows_privilege_escalation,
+ )
+
+ check = iam_policy_allows_privilege_escalation()
+ result = check.execute()
+ assert len(result) == 1
+ for finding in result:
+ if finding.resource_id == policy_name_1:
+ assert finding.status == "PASS"
+ assert finding.resource_id == policy_name_1
+ assert finding.resource_arn == policy_arn_1
+ assert finding.region == AWS_REGION
+ assert finding.resource_tags == []
+ assert (
+ finding.status_extended
+ == f"Custom Policy {policy_arn_1} does not allow privilege escalation."
+ )
| [Bug]: iam_policy_allows_privilege_escalation is raising false positives
### Steps to Reproduce
When looking at the scan results for "Ensure no Customer Managed IAM policies allow actions that may lead into Privilege Escalation" it look like we are getting bad results for it now using Prowler version 3.8.2.
If I run a scan on an AWS IAM policy defined with the below json I get the failure -
_Custom Policy arn:aws:iam::xxxxxxxxxxx:policy/app-user-policy-hertzcp-pprd allows privilege escalation using the following actions: {'iam:CreateAccessKey'}_
The below is the AWS IAM policy json that it scanned and as you can see the policy does not have 'iam:CreateAccessKey' within it :
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Action": [
"es:List*",
"es:Get*",
"es:Describe*"
],
"Resource": "*"
},
{
"Sid": "",
"Effect": "Allow",
"Action": "es:*",
"Resource": "arn:aws:es:us-west-2:xxxxxxxxxxxx:domain/g-clients-infra-pprd/*"
}
]
}
```
When I used prowler version 3.4.1 it did not find/report on the above issue.
Also -
Prowler version 3.8.2 now reports the below policy as passing for "Ensure no Customer Managed IAM policies allow actions that may lead into Privilege Escalation" :
```
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Resource": "arn:aws:iam::*:role/xena-role"
}
]
}
```
When a scan using prowler version 3.4.1 was run the above policy was reported as a failure for "Ensure no Customer Managed IAM policies allow actions that may lead into Privilege Escalation".
I would believe that this policy should still be reported as a failure and that Prowler version 3.8.2 has issues.
### Expected behavior
The first policy should pass and the second policy should fail.
### Actual Result with Screenshots or Logs
[prowler-output-741743798098-20230823103540.csv](https://github.com/prowler-cloud/prowler/files/12421414/prowler-output-741743798098-20230823103540.csv)
### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
EKS and ran the scan locally
### OS used
MacOS
### Prowler version
3.8.2
### Pip version
21.1.3
### Context
_No response_
| 2023-08-24T06:56:11 |
|
pulp/pulpcore | 6 | pulp__pulpcore-6 | [
"3044"
] | e212f35a1c4e92d2a1f371b648747c0e7417011a | diff --git a/pulpcore/app/tasks/__init__.py b/pulpcore/app/tasks/__init__.py
--- a/pulpcore/app/tasks/__init__.py
+++ b/pulpcore/app/tasks/__init__.py
@@ -1,3 +1,3 @@
-from pulpcore.app.tasks import base, repository # noqa
+from pulpcore.app.tasks import base, distribution, repository # noqa
from .orphan import orphan_cleanup # noqa
diff --git a/pulpcore/app/tasks/distribution.py b/pulpcore/app/tasks/distribution.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/tasks/distribution.py
@@ -0,0 +1,56 @@
+from django.core.exceptions import ObjectDoesNotExist
+
+from pulpcore.app.models import Distribution, CreatedResource
+from pulpcore.app.serializers import DistributionSerializer
+
+
+def create(*args, **kwargs):
+ """
+ Creates a :class:`~pulpcore.app.models.Distribution`
+
+ Raises:
+ ValidationError: If the DistributionSerializer is not valid
+ """
+ data = kwargs.pop('data', None)
+ serializer = DistributionSerializer(data=data)
+ serializer.is_valid(raise_exception=True)
+ serializer.save()
+ resource = CreatedResource(content_object=serializer.instance)
+ resource.save()
+
+
+def update(instance_id, *args, **kwargs):
+ """
+ Updates a :class:`~pulpcore.app.models.Distribution`
+
+ Args:
+ instance_id (int): The id of the distribution to be updated
+
+ Raises:
+ ValidationError: If the DistributionSerializer is not valid
+ """
+ data = kwargs.pop('data', None)
+ partial = kwargs.pop('partial', False)
+ instance = Distribution.objects.get(pk=instance_id)
+ serializer = DistributionSerializer(instance, data=data, partial=partial)
+ serializer.is_valid(raise_exception=True)
+ serializer.save()
+
+
+def delete(instance_id, *args, **kwargs):
+ """
+ Delete a :class:`~pulpcore.app.models.Distribution`
+
+ Args:
+ instance_id (int): The id of the Distribution to be deleted
+
+ Raises:
+ ObjectDoesNotExist: If the Distribution was already deleted
+ """
+ try:
+ instance = Distribution.objects.get(pk=instance_id)
+ except ObjectDoesNotExist:
+ # The object was already deleted, and we don't want an error thrown trying to delete again.
+ return
+ else:
+ instance.delete()
diff --git a/pulpcore/app/viewsets/publication.py b/pulpcore/app/viewsets/publication.py
--- a/pulpcore/app/viewsets/publication.py
+++ b/pulpcore/app/viewsets/publication.py
@@ -1,7 +1,12 @@
from django_filters.rest_framework import filters, DjangoFilterBackend
+from drf_yasg.utils import swagger_auto_schema
from rest_framework import mixins
from rest_framework.filters import OrderingFilter
+from pulpcore.app import tasks
+from pulpcore.app.response import OperationPostponedResponse
+from pulpcore.tasking.tasks import enqueue_with_reservation
+
from pulpcore.app.models import (
ContentGuard,
Distribution,
@@ -70,12 +75,72 @@ class Meta:
class DistributionViewSet(NamedModelViewSet,
- mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin):
+ """
+ Provides read and list methods and also provides asynchronous CUD methods to dispatch tasks
+ with reservation that lock all Distributions preventing race conditions during base_path
+ checking.
+ """
endpoint_name = 'distributions'
queryset = Distribution.objects.all()
serializer_class = DistributionSerializer
filterset_class = DistributionFilter
+
+ @swagger_auto_schema(operation_description="Trigger an asynchronous create task",
+ responses={202: DistributionSerializer})
+ def create(self, request, *args, **kwargs):
+ """
+ Dispatches a task with reservation for creating a distribution.
+ """
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ async_result = enqueue_with_reservation(
+ tasks.distribution.create,
+ "/api/v3/distributions/",
+ kwargs={'data': request.data}
+ )
+ return OperationPostponedResponse(async_result, request)
+
+ @swagger_auto_schema(operation_description="Trigger an asynchronous update task",
+ responses={202: DistributionSerializer})
+ def update(self, request, pk, *args, **kwargs):
+ """
+ Dispatches a task with reservation for updating a distribution.
+ """
+ partial = kwargs.pop('partial', False)
+ instance = self.get_object()
+ serializer = self.get_serializer(instance, data=request.data, partial=partial)
+ serializer.is_valid(raise_exception=True)
+ async_result = enqueue_with_reservation(
+ tasks.distribution.update,
+ "/api/v3/distributions/",
+ args=(pk,),
+ kwargs={'data': request.data, 'partial': partial}
+ )
+ return OperationPostponedResponse(async_result, request)
+
+ @swagger_auto_schema(operation_description="Trigger an asynchronous partial update task",
+ responses={202: DistributionSerializer})
+ def partial_update(self, request, *args, **kwargs):
+ """
+ Dispatches a task with reservation for partially updating a distribution.
+ """
+ kwargs['partial'] = True
+ return self.update(request, *args, **kwargs)
+
+ @swagger_auto_schema(operation_description="Trigger an asynchronous delete task",
+ responses={202: DistributionSerializer})
+ def delete(self, request, pk, *args, **kwargs):
+ """
+ Dispatches a task with reservation for deleting a distribution.
+ """
+ self.get_object()
+ async_result = enqueue_with_reservation(
+ tasks.distribution.delete,
+ "/api/v3/distributions/",
+ args=(pk,)
+ )
+ return OperationPostponedResponse(async_result, request)
| diff --git a/pulpcore/tests/functional/api/test_crud_distributions.py b/pulpcore/tests/functional/api/test_crud_distributions.py
--- a/pulpcore/tests/functional/api/test_crud_distributions.py
+++ b/pulpcore/tests/functional/api/test_crud_distributions.py
@@ -26,9 +26,12 @@ def setUpClass(cls):
def test_01_create_distribution(self):
"""Create a distribution."""
body = gen_distribution()
- type(self).distribution = self.client.post(
+ response_dict = self.client.post(
DISTRIBUTION_PATH, body
)
+ dist_task = self.client.get(response_dict['task'])
+ distribution_href = dist_task['created_resources'][0]
+ type(self).distribution = self.client.get(distribution_href)
for key, val in body.items():
with self.subTest(key=key):
self.assertEqual(self.distribution[key], val)
@@ -158,7 +161,10 @@ def setUpClass(cls):
cls.client = api.Client(cls.cfg, api.json_handler)
body = gen_distribution()
body['base_path'] = body['base_path'].replace('-', '/')
- cls.distribution = cls.client.post(DISTRIBUTION_PATH, body)
+ response_dict = cls.client.post(DISTRIBUTION_PATH, body)
+ dist_task = cls.client.get(response_dict['task'])
+ distribution_href = dist_task['created_resources'][0]
+ cls.distribution = cls.client.get(distribution_href)
@classmethod
def tearDownClass(cls):
diff --git a/pulpcore/tests/functional/api/using_plugin/test_auto_distribution.py b/pulpcore/tests/functional/api/using_plugin/test_auto_distribution.py
--- a/pulpcore/tests/functional/api/using_plugin/test_auto_distribution.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_auto_distribution.py
@@ -94,7 +94,10 @@ def test_repo_auto_distribution(self):
body['repository'] = repo['_href']
body['publisher'] = publisher['_href']
- distribution = self.client.post(DISTRIBUTION_PATH, body)
+ response_dict = self.client.post(DISTRIBUTION_PATH, body)
+ dist_task = self.client.get(response_dict['task'])
+ distribution_href = dist_task['created_resources'][0]
+ distribution = self.client.get(distribution_href)
self.addCleanup(self.client.delete, distribution['_href'])
last_version_href = get_versions(repo)[-1]['_href']
@@ -172,16 +175,20 @@ def test_all(self):
body = gen_distribution()
body['publisher'] = publisher['_href']
body['repository'] = repo['_href']
- distribution = self.client.post(DISTRIBUTION_PATH, body)
+ response_dict = self.client.post(DISTRIBUTION_PATH, body)
+ dist_task = self.client.get(response_dict['task'])
+ distribution_href = dist_task['created_resources'][0]
+ distribution = self.client.get(distribution_href)
self.addCleanup(self.client.delete, distribution['_href'])
# Update the distribution.
self.try_update_distribution(distribution, publisher=None)
self.try_update_distribution(distribution, repository=None)
- distribution = self.client.patch(distribution['_href'], {
+ self.client.patch(distribution['_href'], {
'publisher': None,
'repository': None,
})
+ distribution = self.client.get(distribution['_href'])
self.assertIsNone(distribution['publisher'], distribution)
self.assertIsNone(distribution['repository'], distribution)
diff --git a/pulpcore/tests/functional/api/using_plugin/test_content_app.py b/pulpcore/tests/functional/api/using_plugin/test_content_app.py
--- a/pulpcore/tests/functional/api/using_plugin/test_content_app.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_content_app.py
@@ -77,7 +77,10 @@ def test_content_app_returns_404(self):
body['repository'] = repo['_href']
body['publisher'] = publisher['_href']
- distribution = self.client.post(DISTRIBUTION_PATH, body)
+ response_dict = self.client.post(DISTRIBUTION_PATH, body)
+ dist_task = self.client.get(response_dict['task'])
+ distribution_href = dist_task['created_resources'][0]
+ distribution = self.client.get(distribution_href)
self.addCleanup(self.client.delete, distribution['_href'])
last_version_href = get_versions(repo)[-1]['_href']
diff --git a/pulpcore/tests/functional/api/using_plugin/test_content_promotion.py b/pulpcore/tests/functional/api/using_plugin/test_content_promotion.py
--- a/pulpcore/tests/functional/api/using_plugin/test_content_promotion.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_content_promotion.py
@@ -74,7 +74,10 @@ def test_all(self):
for _ in range(2):
body = gen_distribution()
body['publication'] = publication['_href']
- distribution = client.post(DISTRIBUTION_PATH, body)
+ response_dict = client.post(DISTRIBUTION_PATH, body)
+ dist_task = client.get(response_dict['task'])
+ distribution_href = dist_task['created_resources'][0]
+ distribution = client.get(distribution_href)
distributions.append(distribution)
self.addCleanup(client.delete, distribution['_href'])
diff --git a/pulpcore/tests/functional/api/using_plugin/test_crd_publications.py b/pulpcore/tests/functional/api/using_plugin/test_crd_publications.py
--- a/pulpcore/tests/functional/api/using_plugin/test_crd_publications.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_crd_publications.py
@@ -140,7 +140,10 @@ def test_05_read_publications(self):
"""Read a publication by its distribution."""
body = gen_distribution()
body['publication'] = self.publication['_href']
- distribution = self.client.post(DISTRIBUTION_PATH, body)
+ response_dict = self.client.post(DISTRIBUTION_PATH, body)
+ dist_task = self.client.get(response_dict['task'])
+ distribution_href = dist_task['created_resources'][0]
+ distribution = self.client.get(distribution_href)
self.addCleanup(self.client.delete, distribution['_href'])
self.publication.update(self.client.get(self.publication['_href']))
| Update CI files from 3.20 branch
[noissue]
| 2019-02-28T20:47:48 |
|
pulp/pulpcore | 12 | pulp__pulpcore-12 | [
"4415"
] | e205eb822a4d1e584558b8a1eb0654c1680a325b | diff --git a/pulpcore/app/apps.py b/pulpcore/app/apps.py
--- a/pulpcore/app/apps.py
+++ b/pulpcore/app/apps.py
@@ -137,4 +137,4 @@ class PulpAppConfig(PulpPluginAppConfig):
# The app label to be used when creating tables, registering models, referencing this app
# with manage.py, etc. This cannot contain a dot and must not conflict with the name of a
# package containing a Django app.
- label = 'pulp_app'
+ label = 'core'
diff --git a/pulpcore/app/migrations/0001_initial.py b/pulpcore/app/migrations/0001_initial.py
--- a/pulpcore/app/migrations/0001_initial.py
+++ b/pulpcore/app/migrations/0001_initial.py
@@ -1,4 +1,4 @@
-# Generated by Django 2.1.7 on 2019-03-07 15:19
+# Generated by Django 2.1.7 on 2019-03-11 09:42
from django.conf import settings
import django.core.validators
@@ -63,8 +63,8 @@ class Migration(migrations.Migration):
('_created', models.DateTimeField(auto_now_add=True)),
('_last_updated', models.DateTimeField(auto_now=True, null=True)),
('relative_path', models.CharField(max_length=255)),
- ('artifact', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='pulp_app.Artifact')),
- ('content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pulp_app.Content')),
+ ('artifact', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='core.Artifact')),
+ ('content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Content')),
],
bases=(models.Model, pulpcore.app.models.content.QueryMixin),
),
@@ -103,7 +103,7 @@ class Migration(migrations.Migration):
('_last_updated', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(db_index=True, max_length=255, unique=True)),
('base_path', models.CharField(max_length=255, unique=True)),
- ('content_guard', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='pulp_app.ContentGuard')),
+ ('content_guard', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='core.ContentGuard')),
],
options={
'default_related_name': 'distributions',
@@ -159,8 +159,8 @@ class Migration(migrations.Migration):
('_created', models.DateTimeField(auto_now_add=True)),
('_last_updated', models.DateTimeField(auto_now=True, null=True)),
('relative_path', models.CharField(max_length=255)),
- ('content_artifact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_artifact', to='pulp_app.ContentArtifact')),
- ('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_artifact', to='pulp_app.Publication')),
+ ('content_artifact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_artifact', to='core.ContentArtifact')),
+ ('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_artifact', to='core.Publication')),
],
options={
'default_related_name': 'published_artifact',
@@ -174,7 +174,7 @@ class Migration(migrations.Migration):
('_last_updated', models.DateTimeField(auto_now=True, null=True)),
('relative_path', models.CharField(max_length=255)),
('file', models.FileField(max_length=255, upload_to=pulpcore.app.models.publication.PublishedMetadata._storage_path)),
- ('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_metadata', to='pulp_app.Publication')),
+ ('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_metadata', to='core.Publication')),
],
options={
'default_related_name': 'published_metadata',
@@ -231,8 +231,8 @@ class Migration(migrations.Migration):
('sha256', models.CharField(max_length=64, null=True)),
('sha384', models.CharField(max_length=96, null=True)),
('sha512', models.CharField(max_length=128, null=True)),
- ('content_artifact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pulp_app.ContentArtifact')),
- ('remote', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pulp_app.Remote')),
+ ('content_artifact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.ContentArtifact')),
+ ('remote', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Remote')),
],
bases=(models.Model, pulpcore.app.models.content.QueryMixin),
),
@@ -256,8 +256,8 @@ class Migration(migrations.Migration):
('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('_created', models.DateTimeField(auto_now_add=True)),
('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='version_memberships', to='pulp_app.Content')),
- ('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pulp_app.Repository')),
+ ('content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='version_memberships', to='core.Content')),
+ ('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Repository')),
],
),
migrations.CreateModel(
@@ -268,8 +268,8 @@ class Migration(migrations.Migration):
('_last_updated', models.DateTimeField(auto_now=True, null=True)),
('number', models.PositiveIntegerField(db_index=True)),
('complete', models.BooleanField(db_index=True, default=False)),
- ('base_version', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='versions', to='pulp_app.RepositoryVersion')),
- ('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='versions', to='pulp_app.Repository')),
+ ('base_version', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='versions', to='core.RepositoryVersion')),
+ ('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='versions', to='core.Repository')),
],
options={
'ordering': ('number',),
@@ -284,7 +284,7 @@ class Migration(migrations.Migration):
('count_type', models.CharField(choices=[('A', 'added'), ('P', 'present'), ('R', 'removed')], max_length=1)),
('content_type', models.TextField()),
('count', models.IntegerField()),
- ('repository_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='counts', to='pulp_app.RepositoryVersion')),
+ ('repository_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='counts', to='core.RepositoryVersion')),
],
),
migrations.CreateModel(
@@ -312,7 +312,7 @@ class Migration(migrations.Migration):
('finished_at', models.DateTimeField(null=True)),
('non_fatal_errors', pulpcore.app.fields.JSONField(default=list)),
('error', pulpcore.app.fields.JSONField(null=True)),
- ('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='spawned_tasks', to='pulp_app.Task')),
+ ('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='spawned_tasks', to='core.Task')),
],
options={
'abstract': False,
@@ -324,8 +324,8 @@ class Migration(migrations.Migration):
('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('_created', models.DateTimeField(auto_now_add=True)),
('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pulp_app.ReservedResource')),
- ('task', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='pulp_app.Task')),
+ ('resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.ReservedResource')),
+ ('task', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Task')),
],
options={
'abstract': False,
@@ -365,72 +365,72 @@ class Migration(migrations.Migration):
migrations.AddField(
model_name='task',
name='worker',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tasks', to='pulp_app.Worker'),
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tasks', to='core.Worker'),
),
migrations.AddField(
model_name='reservedresource',
name='tasks',
- field=models.ManyToManyField(related_name='reserved_resources', through='pulp_app.TaskReservedResource', to='pulp_app.Task'),
+ field=models.ManyToManyField(related_name='reserved_resources', through='core.TaskReservedResource', to='core.Task'),
),
migrations.AddField(
model_name='reservedresource',
name='worker',
- field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reservations', to='pulp_app.Worker'),
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reservations', to='core.Worker'),
),
migrations.AddField(
model_name='repositorycontent',
name='version_added',
- field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='added_memberships', to='pulp_app.RepositoryVersion'),
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='added_memberships', to='core.RepositoryVersion'),
),
migrations.AddField(
model_name='repositorycontent',
name='version_removed',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='removed_memberships', to='pulp_app.RepositoryVersion'),
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='removed_memberships', to='core.RepositoryVersion'),
),
migrations.AddField(
model_name='repository',
name='content',
- field=models.ManyToManyField(related_name='repositories', through='pulp_app.RepositoryContent', to='pulp_app.Content'),
+ field=models.ManyToManyField(related_name='repositories', through='core.RepositoryContent', to='core.Content'),
),
migrations.AddField(
model_name='publication',
name='publisher',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pulp_app.Publisher'),
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Publisher'),
),
migrations.AddField(
model_name='publication',
name='repository_version',
- field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pulp_app.RepositoryVersion'),
+ field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.RepositoryVersion'),
),
migrations.AddField(
model_name='progressreport',
name='task',
- field=models.ForeignKey(default=pulpcore.app.models.task.Task.current, on_delete=django.db.models.deletion.CASCADE, related_name='progress_reports', to='pulp_app.Task'),
+ field=models.ForeignKey(default=pulpcore.app.models.task.Task.current, on_delete=django.db.models.deletion.CASCADE, related_name='progress_reports', to='core.Task'),
),
migrations.AddField(
model_name='distribution',
name='publication',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='pulp_app.Publication'),
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='core.Publication'),
),
migrations.AddField(
model_name='distribution',
name='publisher',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='pulp_app.Publisher'),
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='core.Publisher'),
),
migrations.AddField(
model_name='distribution',
name='repository',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='pulp_app.Repository'),
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='core.Repository'),
),
migrations.AddField(
model_name='createdresource',
name='task',
- field=models.ForeignKey(default=pulpcore.app.models.task.Task.current, on_delete=django.db.models.deletion.CASCADE, related_name='created_resources', to='pulp_app.Task'),
+ field=models.ForeignKey(default=pulpcore.app.models.task.Task.current, on_delete=django.db.models.deletion.CASCADE, related_name='created_resources', to='core.Task'),
),
migrations.AddField(
model_name='content',
name='_artifacts',
- field=models.ManyToManyField(through='pulp_app.ContentArtifact', to='pulp_app.Artifact'),
+ field=models.ManyToManyField(through='core.ContentArtifact', to='core.Artifact'),
),
migrations.CreateModel(
name='ProgressBar',
@@ -440,7 +440,7 @@ class Migration(migrations.Migration):
'proxy': True,
'indexes': [],
},
- bases=('pulp_app.progressreport',),
+ bases=('core.progressreport',),
),
migrations.CreateModel(
name='ProgressSpinner',
@@ -450,7 +450,7 @@ class Migration(migrations.Migration):
'proxy': True,
'indexes': [],
},
- bases=('pulp_app.progressreport',),
+ bases=('core.progressreport',),
),
migrations.AlterUniqueTogether(
name='repositoryversion',
@@ -458,7 +458,7 @@ class Migration(migrations.Migration):
),
migrations.AlterUniqueTogether(
name='repositorycontent',
- unique_together={('repository', 'content', 'version_added'), ('repository', 'content', 'version_removed')},
+ unique_together={('repository', 'content', 'version_removed'), ('repository', 'content', 'version_added')},
),
migrations.AlterUniqueTogether(
name='remoteartifact',
@@ -466,7 +466,7 @@ class Migration(migrations.Migration):
),
migrations.AlterUniqueTogether(
name='publishedmetadata',
- unique_together={('publication', 'file'), ('publication', 'relative_path')},
+ unique_together={('publication', 'relative_path'), ('publication', 'file')},
),
migrations.AlterUniqueTogether(
name='publishedartifact',
| [PR #3886/596a72e4 backport][3.16] Taught PulpImport to recover from previously-failed attempt.
**This is a backport of PR #3886 as merged into main (596a72e4a12541304dcb8e23f7e1d3c5d8a58390).**
If an expected-reassembled-filename exists, ignore chunk-info and proceed with that file.
Test-coverage will be added in pulp_file once this is in a released version of pulpcore.
fixes #3737.
[nocoverage]
| 2019-03-01T12:33:41 |
||
pulp/pulpcore | 13 | pulp__pulpcore-13 | [
"4188",
"4188"
] | 795c8dbd1d561e5dc5da5ddc9fed939814267019 | diff --git a/containers/images/pulp-api/container-assets/wait_on_postgres.py b/containers/images/pulp-api/container-assets/wait_on_postgres.py
new file mode 100755
--- /dev/null
+++ b/containers/images/pulp-api/container-assets/wait_on_postgres.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+import sys
+import socket
+import time
+import os
+
+if __name__ == '__main__':
+
+ postgres_is_alive = False
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ tries = 0
+ print("Waiting on postgresql to start...")
+ while not postgres_is_alive and tries < 100:
+ tries += 1
+ try:
+ print("Checking postgres host %s" % os.environ['POSTGRES_SERVICE_HOST'])
+ s.connect((os.environ['POSTGRES_SERVICE_HOST'], 5432))
+ except socket.error:
+ time.sleep(3)
+ else:
+ postgres_is_alive = True
+
+ if postgres_is_alive:
+ print("Postgres started!")
+ sys.exit(0)
+ else:
+ print("Unable to reach postgres on port 5432")
+ sys.exit(1)
| [PR #4182/04738d77 backport][3.23] Made reclaim space task more tolerant in the face of artifact shared …
**This is a backport of PR #4182 as merged into main (04738d77a42d93f386294f0857230eed1983ba61).**
…between two content units.
closes #3610
[PR #4182/04738d77 backport][3.23] Made reclaim space task more tolerant in the face of artifact shared …
**This is a backport of PR #4182 as merged into main (04738d77a42d93f386294f0857230eed1983ba61).**
…between two content units.
closes #3610
| 2019-03-01T14:54:14 |
||
pulp/pulpcore | 17 | pulp__pulpcore-17 | [
"4391"
] | 14f4be9885a866171d48e12ae1dea2ecf670d466 | diff --git a/pulpcore/app/serializers/fields.py b/pulpcore/app/serializers/fields.py
--- a/pulpcore/app/serializers/fields.py
+++ b/pulpcore/app/serializers/fields.py
@@ -200,8 +200,12 @@ class BaseURLField(serializers.CharField):
"""
def to_representation(self, value):
- if settings.CONTENT_HOST:
- host = settings.CONTENT_HOST
- else:
- host = self.context['request'].get_host()
- return ''.join([host, settings.CONTENT_PATH_PREFIX, value])
+ base_path = value
+ host = settings.CONTENT_HOST or ''
+ prefix = settings.CONTENT_PATH_PREFIX or ''
+ return '/'.join(
+ (
+ host.strip('/'),
+ prefix.strip('/'),
+ base_path.lstrip('/')
+ ))
| Update CI files for branch 3.21
[noissue]
| 2019-03-04T17:56:51 |
||
pulp/pulpcore | 26 | pulp__pulpcore-26 | [
"4528"
] | 01f8cc86b6d0beb14910cfe732c24a0891a4d3d4 | diff --git a/pulpcore/app/migrations/0002_distribution_remote.py b/pulpcore/app/migrations/0002_distribution_remote.py
--- a/pulpcore/app/migrations/0002_distribution_remote.py
+++ b/pulpcore/app/migrations/0002_distribution_remote.py
@@ -7,13 +7,13 @@
class Migration(migrations.Migration):
dependencies = [
- ('pulp_app', '0001_initial'),
+ ('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='distribution',
name='remote',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='pulp_app.Remote'),
+ field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='distributions', to='core.Remote'),
),
]
| 3.23 - Faster exports option
| 2019-03-11T14:55:50 |
||
pulp/pulpcore | 63 | pulp__pulpcore-63 | [
"4615"
] | 98bc7b2de12f3af71642147170e8c084eaf9c05d | diff --git a/pulpcore/app/serializers/repository.py b/pulpcore/app/serializers/repository.py
--- a/pulpcore/app/serializers/repository.py
+++ b/pulpcore/app/serializers/repository.py
@@ -267,10 +267,14 @@ def get_content_summary(self, obj):
The summary of contained content.
Returns:
- dict: {'added': {<_type>: {'count': <count>, 'href': <href>},
- 'removed': {<_type>: {'count': <count>, 'href': <href>},
- 'present': {<_type>: {'count': <count>, 'href': <href>},
- }
+ dict: The dictionary has the following format.::
+
+ {
+ 'added': {<_type>: {'count': <count>, 'href': <href>},
+ 'removed': {<_type>: {'count': <count>, 'href': <href>},
+ 'present': {<_type>: {'count': <count>, 'href': <href>},
+ }
+
"""
to_return = {'added': {}, 'removed': {}, 'present': {}}
for count_detail in obj.counts.all():
| [PR #4609/4c4d6aa0 backport][3.28] Works around a sync-time performance regression on PG12
**This is a backport of PR #4609 as merged into main (4c4d6aa0745b4938e2aeee8ffc28a0b0392cc9ef).**
closes #4591
We still don't really "know":
* Why this only seems to happen on PG12
* Why the original query change (https://github.com/pulp/pulpcore/pull/4275/files) triggered this
* Why it only seems to happen on capsule syncs (this is most mysterious)
| 2019-04-01T21:07:41 |
||
pulp/pulpcore | 94 | pulp__pulpcore-94 | [
"4678"
] | 7e43d21dbdd8d4dae603046687e21259eb0bbde3 | diff --git a/pulpcore/app/migrations/0001_initial.py b/pulpcore/app/migrations/0001_initial.py
--- a/pulpcore/app/migrations/0001_initial.py
+++ b/pulpcore/app/migrations/0001_initial.py
@@ -1,4 +1,4 @@
-# Generated by Django 2.1.7 on 2019-03-18 19:56
+# Generated by Django 2.2 on 2019-04-22 17:55
from django.conf import settings
import django.core.validators
@@ -19,8 +19,8 @@ class Migration(migrations.Migration):
initial = True
dependencies = [
- migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
@@ -66,6 +66,9 @@ class Migration(migrations.Migration):
('artifact', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='core.Artifact')),
('content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Content')),
],
+ options={
+ 'unique_together': {('content', 'relative_path')},
+ },
bases=(models.Model, pulpcore.app.models.content.QueryMixin),
),
migrations.CreateModel(
@@ -82,33 +85,6 @@ class Migration(migrations.Migration):
'abstract': False,
},
),
- migrations.CreateModel(
- name='CreatedResource',
- fields=[
- ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('_created', models.DateTimeField(auto_now_add=True)),
- ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('object_id', models.UUIDField()),
- ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
- ],
- options={
- 'abstract': False,
- },
- ),
- migrations.CreateModel(
- name='Distribution',
- fields=[
- ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('_created', models.DateTimeField(auto_now_add=True)),
- ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('name', models.CharField(db_index=True, max_length=255, unique=True)),
- ('base_path', models.CharField(max_length=255, unique=True)),
- ('content_guard', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.ContentGuard')),
- ],
- options={
- 'default_related_name': '_distributions',
- },
- ),
migrations.CreateModel(
name='Exporter',
fields=[
@@ -123,63 +99,6 @@ class Migration(migrations.Migration):
'default_related_name': 'exporters',
},
),
- migrations.CreateModel(
- name='ProgressReport',
- fields=[
- ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('_created', models.DateTimeField(auto_now_add=True)),
- ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('message', models.TextField()),
- ('state', models.TextField(choices=[('waiting', 'Waiting'), ('skipped', 'Skipped'), ('running', 'Running'), ('completed', 'Completed'), ('failed', 'Failed'), ('canceled', 'Canceled')], default='waiting')),
- ('total', models.IntegerField(null=True)),
- ('done', models.IntegerField(default=0)),
- ('suffix', models.TextField(default='')),
- ],
- options={
- 'abstract': False,
- },
- ),
- migrations.CreateModel(
- name='Publication',
- fields=[
- ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('_created', models.DateTimeField(auto_now_add=True)),
- ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('complete', models.BooleanField(db_index=True, default=False)),
- ('pass_through', models.BooleanField(default=False)),
- ],
- options={
- 'abstract': False,
- },
- ),
- migrations.CreateModel(
- name='PublishedArtifact',
- fields=[
- ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('_created', models.DateTimeField(auto_now_add=True)),
- ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('relative_path', models.CharField(max_length=255)),
- ('content_artifact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_artifact', to='core.ContentArtifact')),
- ('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_artifact', to='core.Publication')),
- ],
- options={
- 'default_related_name': 'published_artifact',
- },
- ),
- migrations.CreateModel(
- name='PublishedMetadata',
- fields=[
- ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('_created', models.DateTimeField(auto_now_add=True)),
- ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('relative_path', models.CharField(max_length=255)),
- ('file', models.FileField(max_length=255, upload_to=pulpcore.app.models.publication.PublishedMetadata._storage_path)),
- ('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_metadata', to='core.Publication')),
- ],
- options={
- 'default_related_name': 'published_metadata',
- },
- ),
migrations.CreateModel(
name='Publisher',
fields=[
@@ -217,25 +136,6 @@ class Migration(migrations.Migration):
'default_related_name': 'remotes',
},
),
- migrations.CreateModel(
- name='RemoteArtifact',
- fields=[
- ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('_created', models.DateTimeField(auto_now_add=True)),
- ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('url', models.TextField(validators=[django.core.validators.URLValidator])),
- ('size', models.IntegerField(null=True)),
- ('md5', models.CharField(max_length=32, null=True)),
- ('sha1', models.CharField(max_length=40, null=True)),
- ('sha224', models.CharField(max_length=56, null=True)),
- ('sha256', models.CharField(max_length=64, null=True)),
- ('sha384', models.CharField(max_length=96, null=True)),
- ('sha512', models.CharField(max_length=128, null=True)),
- ('content_artifact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.ContentArtifact')),
- ('remote', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Remote')),
- ],
- bases=(models.Model, pulpcore.app.models.content.QueryMixin),
- ),
migrations.CreateModel(
name='Repository',
fields=[
@@ -250,16 +150,6 @@ class Migration(migrations.Migration):
'verbose_name_plural': 'repositories',
},
),
- migrations.CreateModel(
- name='RepositoryContent',
- fields=[
- ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
- ('_created', models.DateTimeField(auto_now_add=True)),
- ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='version_memberships', to='core.Content')),
- ('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Repository')),
- ],
- ),
migrations.CreateModel(
name='RepositoryVersion',
fields=[
@@ -275,18 +165,9 @@ class Migration(migrations.Migration):
'ordering': ('number',),
'get_latest_by': 'number',
'default_related_name': 'versions',
+ 'unique_together': {('repository', 'number')},
},
),
- migrations.CreateModel(
- name='RepositoryVersionContentDetails',
- fields=[
- ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
- ('count_type', models.CharField(choices=[('A', 'added'), ('P', 'present'), ('R', 'removed')], max_length=1)),
- ('content_type', models.TextField()),
- ('count', models.IntegerField()),
- ('repository_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='counts', to='core.RepositoryVersion')),
- ],
- ),
migrations.CreateModel(
name='ReservedResource',
fields=[
@@ -318,13 +199,15 @@ class Migration(migrations.Migration):
},
),
migrations.CreateModel(
- name='TaskReservedResource',
+ name='Worker',
fields=[
('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('_created', models.DateTimeField(auto_now_add=True)),
('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.ReservedResource')),
- ('task', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Task')),
+ ('name', models.CharField(db_index=True, max_length=255, unique=True)),
+ ('last_heartbeat', models.DateTimeField(auto_now=True)),
+ ('gracefully_stopped', models.BooleanField(default=False)),
+ ('cleaned_up', models.BooleanField(default=False)),
],
options={
'abstract': False,
@@ -347,15 +230,13 @@ class Migration(migrations.Migration):
},
),
migrations.CreateModel(
- name='Worker',
+ name='TaskReservedResource',
fields=[
('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('_created', models.DateTimeField(auto_now_add=True)),
('_last_updated', models.DateTimeField(auto_now=True, null=True)),
- ('name', models.CharField(db_index=True, max_length=255, unique=True)),
- ('last_heartbeat', models.DateTimeField(auto_now=True)),
- ('gracefully_stopped', models.BooleanField(default=False)),
- ('cleaned_up', models.BooleanField(default=False)),
+ ('resource', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.ReservedResource')),
+ ('task', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Task')),
],
options={
'abstract': False,
@@ -376,60 +257,100 @@ class Migration(migrations.Migration):
name='worker',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reservations', to='core.Worker'),
),
- migrations.AddField(
- model_name='repositorycontent',
- name='version_added',
- field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='added_memberships', to='core.RepositoryVersion'),
+ migrations.CreateModel(
+ name='RepositoryVersionContentDetails',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('count_type', models.CharField(choices=[('A', 'added'), ('P', 'present'), ('R', 'removed')], max_length=1)),
+ ('content_type', models.TextField()),
+ ('count', models.IntegerField()),
+ ('repository_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='counts', to='core.RepositoryVersion')),
+ ],
),
- migrations.AddField(
- model_name='repositorycontent',
- name='version_removed',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='removed_memberships', to='core.RepositoryVersion'),
+ migrations.CreateModel(
+ name='RepositoryContent',
+ fields=[
+ ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('_created', models.DateTimeField(auto_now_add=True)),
+ ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
+ ('content', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='version_memberships', to='core.Content')),
+ ('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Repository')),
+ ('version_added', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='added_memberships', to='core.RepositoryVersion')),
+ ('version_removed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='removed_memberships', to='core.RepositoryVersion')),
+ ],
+ options={
+ 'unique_together': {('repository', 'content', 'version_added'), ('repository', 'content', 'version_removed')},
+ },
),
migrations.AddField(
model_name='repository',
name='content',
field=models.ManyToManyField(related_name='repositories', through='core.RepositoryContent', to='core.Content'),
),
- migrations.AddField(
- model_name='publication',
- name='publisher',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Publisher'),
- ),
- migrations.AddField(
- model_name='publication',
- name='repository_version',
- field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.RepositoryVersion'),
- ),
- migrations.AddField(
- model_name='progressreport',
- name='task',
- field=models.ForeignKey(default=pulpcore.app.models.task.Task.current, on_delete=django.db.models.deletion.CASCADE, related_name='progress_reports', to='core.Task'),
- ),
- migrations.AddField(
- model_name='distribution',
- name='publication',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.Publication'),
- ),
- migrations.AddField(
- model_name='distribution',
- name='publisher',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.Publisher'),
+ migrations.CreateModel(
+ name='Publication',
+ fields=[
+ ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('_created', models.DateTimeField(auto_now_add=True)),
+ ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
+ ('_type', models.TextField(default=None)),
+ ('complete', models.BooleanField(db_index=True, default=False)),
+ ('pass_through', models.BooleanField(default=False)),
+ ('publisher', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Publisher')),
+ ('repository_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.RepositoryVersion')),
+ ],
+ options={
+ 'abstract': False,
+ },
),
- migrations.AddField(
- model_name='distribution',
- name='remote',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.Remote'),
+ migrations.CreateModel(
+ name='ProgressReport',
+ fields=[
+ ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('_created', models.DateTimeField(auto_now_add=True)),
+ ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
+ ('message', models.TextField()),
+ ('state', models.TextField(choices=[('waiting', 'Waiting'), ('skipped', 'Skipped'), ('running', 'Running'), ('completed', 'Completed'), ('failed', 'Failed'), ('canceled', 'Canceled')], default='waiting')),
+ ('total', models.IntegerField(null=True)),
+ ('done', models.IntegerField(default=0)),
+ ('suffix', models.TextField(default='')),
+ ('task', models.ForeignKey(default=pulpcore.app.models.task.Task.current, on_delete=django.db.models.deletion.CASCADE, related_name='progress_reports', to='core.Task')),
+ ],
+ options={
+ 'abstract': False,
+ },
),
- migrations.AddField(
- model_name='distribution',
- name='repository',
- field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.Repository'),
+ migrations.CreateModel(
+ name='Distribution',
+ fields=[
+ ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('_created', models.DateTimeField(auto_now_add=True)),
+ ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
+ ('name', models.CharField(db_index=True, max_length=255, unique=True)),
+ ('base_path', models.CharField(max_length=255, unique=True)),
+ ('content_guard', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.ContentGuard')),
+ ('publication', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.Publication')),
+ ('publisher', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.Publisher')),
+ ('remote', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.Remote')),
+ ('repository', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='_distributions', to='core.Repository')),
+ ],
+ options={
+ 'default_related_name': '_distributions',
+ },
),
- migrations.AddField(
- model_name='createdresource',
- name='task',
- field=models.ForeignKey(default=pulpcore.app.models.task.Task.current, on_delete=django.db.models.deletion.CASCADE, related_name='created_resources', to='core.Task'),
+ migrations.CreateModel(
+ name='CreatedResource',
+ fields=[
+ ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('_created', models.DateTimeField(auto_now_add=True)),
+ ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
+ ('object_id', models.UUIDField()),
+ ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
+ ('task', models.ForeignKey(default=pulpcore.app.models.task.Task.current, on_delete=django.db.models.deletion.CASCADE, related_name='created_resources', to='core.Task')),
+ ],
+ options={
+ 'abstract': False,
+ },
),
migrations.AddField(
model_name='content',
@@ -443,6 +364,7 @@ class Migration(migrations.Migration):
options={
'proxy': True,
'indexes': [],
+ 'constraints': [],
},
bases=('core.progressreport',),
),
@@ -453,31 +375,60 @@ class Migration(migrations.Migration):
options={
'proxy': True,
'indexes': [],
+ 'constraints': [],
},
bases=('core.progressreport',),
),
- migrations.AlterUniqueTogether(
- name='repositoryversion',
- unique_together={('repository', 'number')},
- ),
- migrations.AlterUniqueTogether(
- name='repositorycontent',
- unique_together={('repository', 'content', 'version_added'), ('repository', 'content', 'version_removed')},
- ),
- migrations.AlterUniqueTogether(
- name='remoteartifact',
- unique_together={('content_artifact', 'remote')},
- ),
- migrations.AlterUniqueTogether(
- name='publishedmetadata',
- unique_together={('publication', 'relative_path'), ('publication', 'file')},
+ migrations.CreateModel(
+ name='RemoteArtifact',
+ fields=[
+ ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('_created', models.DateTimeField(auto_now_add=True)),
+ ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
+ ('url', models.TextField(validators=[django.core.validators.URLValidator])),
+ ('size', models.IntegerField(null=True)),
+ ('md5', models.CharField(max_length=32, null=True)),
+ ('sha1', models.CharField(max_length=40, null=True)),
+ ('sha224', models.CharField(max_length=56, null=True)),
+ ('sha256', models.CharField(max_length=64, null=True)),
+ ('sha384', models.CharField(max_length=96, null=True)),
+ ('sha512', models.CharField(max_length=128, null=True)),
+ ('content_artifact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.ContentArtifact')),
+ ('remote', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Remote')),
+ ],
+ options={
+ 'unique_together': {('content_artifact', 'remote')},
+ },
+ bases=(models.Model, pulpcore.app.models.content.QueryMixin),
),
- migrations.AlterUniqueTogether(
- name='publishedartifact',
- unique_together={('publication', 'content_artifact'), ('publication', 'relative_path')},
+ migrations.CreateModel(
+ name='PublishedMetadata',
+ fields=[
+ ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('_created', models.DateTimeField(auto_now_add=True)),
+ ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
+ ('relative_path', models.CharField(max_length=255)),
+ ('file', models.FileField(max_length=255, upload_to=pulpcore.app.models.publication.PublishedMetadata._storage_path)),
+ ('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_metadata', to='core.Publication')),
+ ],
+ options={
+ 'default_related_name': 'published_metadata',
+ 'unique_together': {('publication', 'relative_path'), ('publication', 'file')},
+ },
),
- migrations.AlterUniqueTogether(
- name='contentartifact',
- unique_together={('content', 'relative_path')},
+ migrations.CreateModel(
+ name='PublishedArtifact',
+ fields=[
+ ('_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
+ ('_created', models.DateTimeField(auto_now_add=True)),
+ ('_last_updated', models.DateTimeField(auto_now=True, null=True)),
+ ('relative_path', models.CharField(max_length=255)),
+ ('content_artifact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_artifact', to='core.ContentArtifact')),
+ ('publication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='published_artifact', to='core.Publication')),
+ ],
+ options={
+ 'default_related_name': 'published_artifact',
+ 'unique_together': {('publication', 'relative_path'), ('publication', 'content_artifact')},
+ },
),
]
diff --git a/pulpcore/app/models/publication.py b/pulpcore/app/models/publication.py
--- a/pulpcore/app/models/publication.py
+++ b/pulpcore/app/models/publication.py
@@ -6,7 +6,7 @@
from .task import CreatedResource
-class Publication(Model):
+class Publication(MasterModel):
"""
A publication contains metadata and artifacts associated with content
contained within a RepositoryVersion.
@@ -40,6 +40,7 @@ class Publication(Model):
>>> ...
>>>
"""
+ TYPE = 'publication'
complete = models.BooleanField(db_index=True, default=False)
pass_through = models.BooleanField(default=False)
diff --git a/pulpcore/app/serializers/publication.py b/pulpcore/app/serializers/publication.py
--- a/pulpcore/app/serializers/publication.py
+++ b/pulpcore/app/serializers/publication.py
@@ -19,10 +19,8 @@
)
-class PublicationSerializer(ModelSerializer):
- _href = IdentityField(
- view_name='publications-detail'
- )
+class PublicationSerializer(MasterModelSerializer):
+ _href = DetailIdentityField()
publisher = DetailRelatedField(
help_text=_('The publisher that created this publication.'),
queryset=models.Publisher.objects.all()
@@ -42,8 +40,9 @@ class PublicationSerializer(ModelSerializer):
)
class Meta:
+ abstract = True
model = models.Publication
- fields = ModelSerializer.Meta.fields + (
+ fields = MasterModelSerializer.Meta.fields + (
'publisher',
'_distributions',
'repository_version',
@@ -96,11 +95,10 @@ class BaseDistributionSerializer(ModelSerializer):
queryset=models.ContentGuard.objects.all(),
allow_null=True
)
- publication = RelatedField(
+ publication = DetailRelatedField(
required=False,
help_text=_('The publication being served as defined by this distribution'),
queryset=models.Publication.objects.exclude(complete=False),
- view_name='publications-detail',
allow_null=True
)
repository = RelatedField(
| diff --git a/pulpcore/tests/functional/api/using_plugin/constants.py b/pulpcore/tests/functional/api/using_plugin/constants.py
--- a/pulpcore/tests/functional/api/using_plugin/constants.py
+++ b/pulpcore/tests/functional/api/using_plugin/constants.py
@@ -4,6 +4,7 @@
from pulp_smash.constants import PULP_FIXTURES_BASE_URL
from pulp_smash.pulp3.constants import (
+ BASE_PUBLICATION_PATH,
BASE_PUBLISHER_PATH,
BASE_REMOTE_PATH,
CONTENT_PATH
@@ -18,6 +19,8 @@
FILE_PUBLISHER_PATH = urljoin(BASE_PUBLISHER_PATH, 'file/file/')
+FILE_PUBLICATION_PATH = urljoin(BASE_PUBLICATION_PATH, 'file/file/')
+
FILE_FIXTURE_URL = urljoin(PULP_FIXTURES_BASE_URL, 'file/')
"""The URL to a file repository."""
diff --git a/pulpcore/tests/functional/api/using_plugin/test_crd_publications.py b/pulpcore/tests/functional/api/using_plugin/test_crd_publications.py
--- a/pulpcore/tests/functional/api/using_plugin/test_crd_publications.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_crd_publications.py
@@ -8,7 +8,6 @@
from pulp_smash import api, config
from pulp_smash.pulp3.constants import (
DISTRIBUTION_PATH,
- PUBLICATIONS_PATH,
REPO_PATH
)
from pulp_smash.pulp3.utils import (
@@ -21,6 +20,7 @@
from pulpcore.tests.functional.api.utils import parse_date_from_string
from pulpcore.tests.functional.api.using_plugin.constants import (
+ FILE_PUBLICATION_PATH,
FILE_PUBLISHER_PATH,
FILE_REMOTE_PATH
)
@@ -105,7 +105,7 @@ def test_02_read_publication_without_specific_fields(self):
@skip_if(bool, 'publication', False)
def test_02_read_publications(self):
"""Read a publication by its repository version."""
- publications = self.client.get(PUBLICATIONS_PATH, params={
+ publications = self.client.get(FILE_PUBLICATION_PATH, params={
'repository_version': self.repo['_href']
})
self.assertEqual(len(publications), 1, publications)
@@ -116,7 +116,7 @@ def test_02_read_publications(self):
@skip_if(bool, 'publication', False)
def test_03_read_publications(self):
"""Read a publication by its publisher."""
- publications = self.client.get(PUBLICATIONS_PATH, params={
+ publications = self.client.get(FILE_PUBLICATION_PATH, params={
'publisher': self.publisher['_href']
})
self.assertEqual(len(publications), 1, publications)
@@ -127,7 +127,7 @@ def test_03_read_publications(self):
@skip_if(bool, 'publication', False)
def test_04_read_publications(self):
"""Read a publication by its created time."""
- publications = self.client.get(PUBLICATIONS_PATH, params={
+ publications = self.client.get(FILE_PUBLICATION_PATH, params={
'_created': self.publication['_created']
})
self.assertEqual(len(publications), 1, publications)
@@ -146,7 +146,7 @@ def test_05_read_publications(self):
self.addCleanup(self.client.delete, distribution['_href'])
self.publication.update(self.client.get(self.publication['_href']))
- publications = self.client.get(PUBLICATIONS_PATH, params={
+ publications = self.client.get(FILE_PUBLICATION_PATH, params={
'distributions': distribution['_href']
})
self.assertEqual(len(publications), 1, publications)
@@ -169,7 +169,7 @@ def test_06_publication_create_order(self):
# Read publications
publications = self.client.get(
- PUBLICATIONS_PATH,
+ FILE_PUBLICATION_PATH,
params={'publisher': self.publisher['_href']}
)
self.assertEqual(len(publications), 3)
| Drop 3.16 from auto-ci-update, add 3.39
[noissue]
| 2019-04-23T18:29:51 |
|
pulp/pulpcore | 97 | pulp__pulpcore-97 | [
"4341"
] | 20a7a54a61523ddfb7a8733c4dc8cc4bd672a50a | diff --git a/pulpcore/app/serializers/repository.py b/pulpcore/app/serializers/repository.py
--- a/pulpcore/app/serializers/repository.py
+++ b/pulpcore/app/serializers/repository.py
@@ -136,7 +136,8 @@ class RepositorySyncURLSerializer(serializers.Serializer):
mirror = fields.BooleanField(
required=False,
default=False,
- help_text=_('The synchronization mode, True for "mirror" and False for "additive" mode.')
+ help_text=_('If ``True``, synchronization will remove all content that is not present in '
+ 'the remote repository. If ``False``, sync will be additive only.')
)
| Burst mode for pulpcore-worker
When upgrading, it is sometimes not safe to bring "old" task unfinished to the new version. Though we have a backwards compatibility claim, some admins may decide they want to drain the task queue anyway.
Providing a "--burst" option to the pulpcore worker that will tell it to exit instead of going to sleep will help there.
Also this may be useful in case you have a system stuck on a bunch of sync/import/export tasks to add a few workers that will disconnect themselves once the congestion is over.
| 2019-04-24T13:14:46 |
||
pulp/pulpcore | 119 | pulp__pulpcore-119 | [
"4786"
] | e706e7af82d6fb25896996ca9edbcf0689b6288a | diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py
--- a/pulpcore/app/serializers/content.py
+++ b/pulpcore/app/serializers/content.py
@@ -99,6 +99,7 @@ class ArtifactSerializer(base.ModelSerializer):
file = serializers.FileField(
help_text=_("The stored file."),
+ allow_empty_file=True,
required=False
)
| diff --git a/pulpcore/tests/functional/api/test_crd_artifacts.py b/pulpcore/tests/functional/api/test_crd_artifacts.py
--- a/pulpcore/tests/functional/api/test_crd_artifacts.py
+++ b/pulpcore/tests/functional/api/test_crd_artifacts.py
@@ -54,11 +54,31 @@ def test_upload_valid_attrs(self):
for keys in itertools.combinations(file_attrs, i):
data = {key: file_attrs[key] for key in keys}
with self.subTest(data=data):
- self._do_upload_valid_attrs(data)
+ self._do_upload_valid_attrs(data, self.file)
- def _do_upload_valid_attrs(self, data):
+ def test_upload_empty_file(self):
+ """Upload an empty file.
+
+ For each possible combination of ``sha256`` and ``size`` (including
+ neither), do the following:
+
+ 1. Upload a file with the chosen combination of attributes.
+ 2. Verify that an artifact has been created, and that it has valid
+ attributes.
+ 3. Delete the artifact, and verify that its attributes are
+ inaccessible.
+ """
+ empty_file = b''
+ file_attrs = {'sha256': hashlib.sha256(empty_file).hexdigest(), 'size': 0}
+ for i in range(len(file_attrs) + 1):
+ for keys in itertools.combinations(file_attrs, i):
+ data = {key: file_attrs[key] for key in keys}
+ with self.subTest(data=data):
+ self._do_upload_valid_attrs(data, files={'file': empty_file})
+
+ def _do_upload_valid_attrs(self, data, files):
"""Upload a file with the given attributes."""
- artifact = self.client.post(ARTIFACTS_PATH, data=data, files=self.file)
+ artifact = self.client.post(ARTIFACTS_PATH, data=data, files=files)
self.addCleanup(self.client.delete, artifact['_href'])
read_artifact = self.client.get(artifact['_href'])
for key, val in artifact.items():
| [noissue]: Update aiohttp requirement from <3.9.1,>=3.8.1 to >=3.8.1,<3.9.2
Updates the requirements on [aiohttp](https://github.com/aio-libs/aiohttp) to permit the latest version.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/aio-libs/aiohttp/releases">aiohttp's releases</a>.</em></p>
<blockquote>
<h2>3.9.1</h2>
<h2>Bugfixes</h2>
<ul>
<li>
<p>Fixed importing aiohttp under PyPy on Windows.</p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7848">#7848</a>)</p>
</li>
<li>
<p>Fixed async concurrency safety in websocket compressor.</p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7865">#7865</a>)</p>
</li>
<li>
<p>Fixed <code>ClientResponse.close()</code> releasing the connection instead of closing.</p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7869">#7869</a>)</p>
</li>
<li>
<p>Fixed a regression where connection may get closed during upgrade. -- by :user:<code>Dreamsorcerer</code></p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7879">#7879</a>)</p>
</li>
<li>
<p>Fixed messages being reported as upgraded without an Upgrade header in Python parser. -- by :user:<code>Dreamsorcerer</code></p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7895">#7895</a>)</p>
</li>
</ul>
<hr />
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/aio-libs/aiohttp/blob/master/CHANGES.rst">aiohttp's changelog</a>.</em></p>
<blockquote>
<h1>3.9.1 (2023-11-26)</h1>
<h2>Bugfixes</h2>
<ul>
<li>
<p>Fixed importing aiohttp under PyPy on Windows.</p>
<p><code>[#7848](https://github.com/aio-libs/aiohttp/issues/7848) <https://github.com/aio-libs/aiohttp/issues/7848></code>_</p>
</li>
<li>
<p>Fixed async concurrency safety in websocket compressor.</p>
<p><code>[#7865](https://github.com/aio-libs/aiohttp/issues/7865) <https://github.com/aio-libs/aiohttp/issues/7865></code>_</p>
</li>
<li>
<p>Fixed <code>ClientResponse.close()</code> releasing the connection instead of closing.</p>
<p><code>[#7869](https://github.com/aio-libs/aiohttp/issues/7869) <https://github.com/aio-libs/aiohttp/issues/7869></code>_</p>
</li>
<li>
<p>Fixed a regression where connection may get closed during upgrade. -- by :user:<code>Dreamsorcerer</code></p>
<p><code>[#7879](https://github.com/aio-libs/aiohttp/issues/7879) <https://github.com/aio-libs/aiohttp/issues/7879></code>_</p>
</li>
<li>
<p>Fixed messages being reported as upgraded without an Upgrade header in Python parser. -- by :user:<code>Dreamsorcerer</code></p>
<p><code>[#7895](https://github.com/aio-libs/aiohttp/issues/7895) <https://github.com/aio-libs/aiohttp/issues/7895></code>_</p>
</li>
</ul>
<hr />
<h1>3.9.0 (2023-11-18)</h1>
<h2>Features</h2>
<ul>
<li>
<p>Introduced <code>AppKey</code> for static typing support of <code>Application</code> storage.
See <a href="https://docs.aiohttp.org/en/stable/web_advanced.html#application-s-config">https://docs.aiohttp.org/en/stable/web_advanced.html#application-s-config</a></p>
<p><code>[#5864](https://github.com/aio-libs/aiohttp/issues/5864) <https://github.com/aio-libs/aiohttp/issues/5864></code>_</p>
</li>
<li>
<p>Added a graceful shutdown period which allows pending tasks to complete before the application's cleanup is called.
The period can be adjusted with the <code>shutdown_timeout</code> parameter. -- by :user:<code>Dreamsorcerer</code>.
See <a href="https://docs.aiohttp.org/en/latest/web_advanced.html#graceful-shutdown">https://docs.aiohttp.org/en/latest/web_advanced.html#graceful-shutdown</a></p>
<p><code>[#7188](https://github.com/aio-libs/aiohttp/issues/7188) <https://github.com/aio-libs/aiohttp/issues/7188></code>_</p>
</li>
<li>
<p>Added <code>handler_cancellation <https://docs.aiohttp.org/en/stable/web_advanced.html#web-handler-cancellation></code>_ parameter to cancel web handler on client disconnection. -- by :user:<code>mosquito</code>
This (optionally) reintroduces a feature removed in a previous release.</p>
</li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/aio-libs/aiohttp/commit/6333c026422c6b0fe57ff63cde4104e2d00f47f4"><code>6333c02</code></a> Release v3.9.1 (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7911">#7911</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/9dbd273093d6af6f5e1481816b05a7192860b440"><code>9dbd273</code></a> [PR <a href="https://redirect.github.com/aio-libs/aiohttp/issues/7673">#7673</a>/aa7d1a8f backport][3.9] Document release process (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7909">#7909</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/dd175b6b89564dc74fba0692a8a5f9a9b38e528a"><code>dd175b6</code></a> Fix regression with connection upgrade (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7879">#7879</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7908">#7908</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/946523d6380bd79e13146557432f46f6f9bbd53f"><code>946523d</code></a> Fix flaky websocket test (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7902">#7902</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7904">#7904</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/ddc2a26c9e0c43fd1229e4424f2a30d1b10ced13"><code>ddc2a26</code></a> [PR <a href="https://redirect.github.com/aio-libs/aiohttp/issues/7896">#7896</a>/9a7cfe77 backport][3.9] Fix some flaky tests (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7900">#7900</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/2ae4d6ffdd015f622bfb75dee98ad629240cccc4"><code>2ae4d6f</code></a> Message is not upgraded if Upgrade header is missing (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7895">#7895</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7898">#7898</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/bb111012706d3ef9edc525be3d8d4df410ad847f"><code>bb11101</code></a> Restore async concurrency safety to websocket compressor (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7865">#7865</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7889">#7889</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/6dd0122417f00ed4b2b353226a1b164b6463a245"><code>6dd0122</code></a> Update dependabot.yml (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7888">#7888</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/41a9f1f5b9b2630b1f4b971811c7ef8f016262fb"><code>41a9f1f</code></a> Bump mypy from 1.7.0 to 1.7.1 (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7882">#7882</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/a04970150c6ce9fda22c9f63d947845f79148b4c"><code>a049701</code></a> Fix usage of proxy.py in test_proxy_functional (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7773">#7773</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7876">#7876</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/aio-libs/aiohttp/compare/v3.8.1...v3.9.1">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-05-06T18:25:26 |
|
pulp/pulpcore | 130 | pulp__pulpcore-130 | [
"4719"
] | 641f5e5e1fc9f20128d530a456b3d60d94f2ebe5 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -38,17 +38,42 @@ class ArtifactNotFound(Exception):
pass
-HOP_BY_HOP_HEADERS = [
- 'connection',
- 'keep-alive',
- 'public',
- 'proxy-authenticate',
- 'transfer-encoding',
- 'upgrade',
-]
+class Handler:
+ """
+ A default Handler for the Content App that also can be subclassed to create custom handlers.
+ This Handler will perform the following:
-class Handler:
+ 1. Match the request against a Distribution
+
+ 2. Call the certguard check if a certguard exists for the matched Distribution.
+
+ 3. If the Distribution has a `publication` serve that Publication's `PublishedArtifacts`,
+ `PublishedMetadata` by the remaining `relative path`. If still unserved and if `pass_through`
+ is set, the associated `repository_version` will have its `ContentArtifacts` served by
+ `relative_path` also. This will serve the associated `Artifact`.
+
+ 4. If still unmatched, and the Distribution has a `repository` attribute set, find it's latest
+ `repository_version`. If the Distribution has a `repository_version` attribute set, use that.
+ For this `repository_version`, find matching `ContentArtifact` objects by `relative_path` and
+ serve them. If there is an associated `Artifact` return it.
+
+ 5. If the Distribution has a `remote`, find an associated `RemoteArtifact` that matches by
+ `relative_path`. Fetch and stream the corresponding `RemoteArtifact` to the client,
+ optionally saving the `Artifact` depending on the `policy` attribute.
+
+ """
+
+ hop_by_hop_headers = [
+ 'connection',
+ 'keep-alive',
+ 'public',
+ 'proxy-authenticate',
+ 'transfer-encoding',
+ 'upgrade',
+ ]
+
+ distribution_model = None
async def stream_content(self, request):
"""
@@ -85,26 +110,31 @@ def _base_paths(path):
path = base
return tree
- @staticmethod
- def _match_distribution(path):
+ @classmethod
+ def _match_distribution(cls, path):
"""
- Match a distribution using a list of base paths.
+ Match a distribution using a list of base paths and return its detail object.
Args:
path (str): The path component of the URL.
Returns:
- BaseDistribution: The matched distribution.
+ detail of BaseDistribution: The matched distribution.
Raises:
PathNotResolved: when not matched.
"""
- base_paths = Handler._base_paths(path)
+ base_paths = cls._base_paths(path)
try:
- return BaseDistribution.objects.get(base_path__in=base_paths)
+ if cls.distribution_model is None:
+ model_class = BaseDistribution
+ return BaseDistribution.objects.get(base_path__in=base_paths).cast()
+ else:
+ model_class = cls.distribution_model
+ return cls.distribution_model.objects.get(base_path__in=base_paths)
except ObjectDoesNotExist:
- log.debug(_('BaseDistribution not matched for {path} using: {base_paths}').format(
- path=path, base_paths=base_paths
+ log.debug(_('{model_name} not matched for {path} using: {base_paths}').format(
+ model_name=model_class.__name__, path=path, base_paths=base_paths
))
raise PathNotResolved(path)
@@ -117,7 +147,7 @@ def _permit(request, distribution):
Args:
request (:class:`aiohttp.web.Request`): A request for a published file.
- distribution (:class:`pulpcore.plugin.models.BaseDistribution`): The matched
+ distribution (detail of :class:`pulpcore.plugin.models.BaseDistribution`): The matched
distribution.
Raises:
@@ -137,10 +167,6 @@ def _permit(request, distribution):
'r': str(pe)
})
raise HTTPForbidden(reason=str(pe))
- except Exception:
- reason = _('Guard "{g}" failed:').format(g=guard.name)
- log.debug(reason, exc_info=True)
- raise HTTPForbidden(reason=reason)
async def _match_and_stream(self, path, request):
"""
@@ -158,7 +184,7 @@ async def _match_and_stream(self, path, request):
:class:`aiohttp.web.StreamResponse` or :class:`aiohttp.web.FileResponse`: The response
streamed back to the client.
"""
- distro = Handler._match_distribution(path).cast()
+ distro = self._match_distribution(path)
self._permit(request, distro)
rel_path = path.lstrip('/')
@@ -396,7 +422,7 @@ async def _stream_remote_artifact(self, request, response, remote_artifact):
async def handle_headers(headers):
for name, value in headers.items():
- if name.lower() in HOP_BY_HOP_HEADERS:
+ if name.lower() in self.hop_by_hop_headers:
continue
response.headers[name] = value
await response.prepare(request)
| Update CI files for branch 3.39
[noissue]
| 2019-05-13T19:08:59 |
||
pulp/pulpcore | 133 | pulp__pulpcore-133 | [
"4817"
] | 15e28bf669e5c5cb318b06fa59a5b6eadccab817 | diff --git a/pulpcore/app/serializers/publication.py b/pulpcore/app/serializers/publication.py
--- a/pulpcore/app/serializers/publication.py
+++ b/pulpcore/app/serializers/publication.py
@@ -92,6 +92,22 @@ class Meta:
class BaseDistributionSerializer(MasterModelSerializer):
+ """
+ The Serializer for the BaseDistribution model.
+
+ The serializer deliberately omits the "remote" field, which is used for
+ pull-through caching only. Plugins implementing pull-through caching will
+ have to add the field in their derived serializer class like this::
+
+ remote = DetailRelatedField(
+ required=False,
+ help_text=_('Remote that can be used to fetch content when using pull-through caching.'),
+ queryset=models.Remote.objects.all(),
+ allow_null=True
+ )
+
+ """
+
_href = DetailIdentityField()
base_path = serializers.CharField(
help_text=_('The base (relative) path component of the published url. Avoid paths that \
@@ -123,12 +139,6 @@ class BaseDistributionSerializer(MasterModelSerializer):
)),
UniqueValidator(queryset=models.BaseDistribution.objects.all())]
)
- remote = DetailRelatedField(
- required=False,
- help_text=_('Remote that can be used to fetch content when using pull-through caching.'),
- queryset=models.Remote.objects.all(),
- allow_null=True
- )
class Meta:
abstract = True
@@ -138,7 +148,6 @@ class Meta:
'base_url',
'content_guard',
'name',
- 'remote',
)
def _validate_path_overlap(self, path):
| Cleanup old publications - general cleanup
**Is your feature request related to a problem? Please describe.**
We do a lot of different new repos and publications - and also delete them after certain time.
Now i see, the publications resist.
Means, in normal handling we have hundreds of publications.
But, we don't know where they do belong to, becuase there is no name tag.
**Describe the solution you'd like**
May be there is a possibility to remove old publications with no belonging or link to a repository or distribution.
**Describe alternatives you've considered**
No idea, i just think about an clenaup job.
| 2019-05-14T19:36:49 |
||
pulp/pulpcore | 137 | pulp__pulpcore-137 | [
"4719"
] | 2a035d2fa619fe4003c4c9c946f1a263e159ee7d | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -50,8 +50,8 @@ class Handler:
3. If the Distribution has a `publication` serve that Publication's `PublishedArtifacts`,
`PublishedMetadata` by the remaining `relative path`. If still unserved and if `pass_through`
- is set, the associated `repository_version` will have its `ContentArtifacts` served by
- `relative_path` also. This will serve the associated `Artifact`.
+ is set, the associated `repository_version` will have its `ContentArtifacts` served by
+ `relative_path` also. This will serve the associated `Artifact`.
4. If still unmatched, and the Distribution has a `repository` attribute set, find it's latest
`repository_version`. If the Distribution has a `repository_version` attribute set, use that.
| Update CI files for branch 3.39
[noissue]
| 2019-05-17T17:08:53 |
||
pulp/pulpcore | 152 | pulp__pulpcore-152 | [
"4792"
] | 92e1009e277dc1a70b28fd7ba4031c5afd7be0fb | diff --git a/pulpcore/app/serializers/publication.py b/pulpcore/app/serializers/publication.py
--- a/pulpcore/app/serializers/publication.py
+++ b/pulpcore/app/serializers/publication.py
@@ -28,12 +28,13 @@ class PublicationSerializer(MasterModelSerializer):
queryset=models.RepositoryVersion.objects.all(),
required=False,
)
- repository = serializers.HyperlinkedRelatedField(
+ repository = RelatedField(
help_text=_('A URI of the repository to be published.'),
required=False,
label=_('Repository'),
queryset=models.Repository.objects.all(),
view_name='repositories-detail',
+ write_only=True
)
def validate(self, data):
| Fix division-by-zero on import
closes #4777
TODO: still working on the tests, it's a bit difficult to piece together how the fixtures interact...
| 2019-05-24T13:16:54 |
||
pulp/pulpcore | 172 | pulp__pulpcore-172 | [
"4901"
] | f917fd30be347a61e357db3a2249c4301882895d | diff --git a/pulpcore/app/serializers/repository.py b/pulpcore/app/serializers/repository.py
--- a/pulpcore/app/serializers/repository.py
+++ b/pulpcore/app/serializers/repository.py
@@ -246,12 +246,17 @@ class Meta:
class RepositoryVersionCreateSerializer(ModelSerializer, NestedHyperlinkedModelSerializer):
add_content_units = serializers.ListField(
- help_text=_('A list of content units to add to a new repository version'),
- write_only=True
+ help_text=_('A list of content units to add to a new repository version. This content is '
+ 'added after remove_content_units are removed.'),
+ write_only=True,
+ required=False
)
remove_content_units = serializers.ListField(
- help_text=_('A list of content units to remove from the latest repository version'),
- write_only=True
+ help_text=_("A list of content units to remove from the latest repository version. "
+ "You may also specify '*' as an entry to remove all content. This content is "
+ "removed before add_content_units are added."),
+ write_only=True,
+ required=False
)
base_version = NestedRelatedField(
required=False,
@@ -263,6 +268,11 @@ class RepositoryVersionCreateSerializer(ModelSerializer, NestedHyperlinkedModelS
parent_lookup_kwargs={'repository_pk': 'repository__pk'},
)
+ def validate_remove_content_units(self, value):
+ if len(value) > 1 and '*' in value:
+ raise serializers.ValidationError("Cannot supply content units and '*'.")
+ return value
+
class Meta:
model = models.RepositoryVersion
fields = ['add_content_units', 'remove_content_units', 'base_version']
diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -88,6 +88,10 @@ def add_and_remove(repository_pk, add_content_units, remove_content_units, base_
else:
base_version = None
+ if '*' in remove_content_units:
+ latest = models.RepositoryVersion.latest(repository)
+ remove_content_units = latest.content.values_list('pk', flat=True)
+
with models.RepositoryVersion.create(repository, base_version=base_version) as new_version:
- new_version.add_content(models.Content.objects.filter(pk__in=add_content_units))
new_version.remove_content(models.Content.objects.filter(pk__in=remove_content_units))
+ new_version.add_content(models.Content.objects.filter(pk__in=add_content_units))
diff --git a/pulpcore/app/viewsets/repository.py b/pulpcore/app/viewsets/repository.py
--- a/pulpcore/app/viewsets/repository.py
+++ b/pulpcore/app/viewsets/repository.py
@@ -225,6 +225,8 @@ def create(self, request, repository_pk):
add_content_units = []
remove_content_units = []
repository = self.get_parent_object()
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
if 'base_version' in request.data:
base_version_pk = self.get_resource(request.data['base_version'], RepositoryVersion).pk
@@ -238,8 +240,11 @@ def create(self, request, repository_pk):
if 'remove_content_units' in request.data:
for url in request.data['remove_content_units']:
- content = self.get_resource(url, Content)
- remove_content_units.append(content.pk)
+ if url == '*':
+ remove_content_units.append(url)
+ else:
+ content = self.get_resource(url, Content)
+ remove_content_units.append(content.pk)
result = enqueue_with_reservation(
tasks.repository.add_and_remove, [repository],
| Update CI files for branch 3.43
| 2019-06-11T18:34:19 |
||
pulp/pulpcore | 174 | pulp__pulpcore-174 | [
"3808"
] | 159a6bb88d59ac5db72608260a81cf317be1d521 | diff --git a/pulpcore/app/middleware.py b/pulpcore/app/middleware.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/middleware.py
@@ -0,0 +1,7 @@
+from django.contrib.auth.middleware import RemoteUserMiddleware
+
+from pulpcore.app import settings
+
+
+class PulpRemoteUserMiddleware(RemoteUserMiddleware):
+ header = settings.REMOTE_USER_ENVIRON_NAME
diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -84,10 +84,16 @@
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'pulpcore.app.middleware.PulpRemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
+AUTHENTICATION_BACKENDS = [
+ 'django.contrib.auth.backends.ModelBackend',
+ 'django.contrib.auth.backends.RemoteUserBackend',
+]
+
ROOT_URLCONF = 'pulpcore.app.urls'
TEMPLATES = [
@@ -116,6 +122,7 @@
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticated',),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
+ 'rest_framework.authentication.RemoteUserAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'UPLOADED_FILES_USE_URL': False,
@@ -198,6 +205,8 @@
CONTENT_HOST = None
CONTENT_PATH_PREFIX = '/pulp/content/'
+REMOTE_USER_ENVIRON_NAME = "REMOTE_USER"
+
PROFILE_STAGES_API = False
SWAGGER_SETTINGS = {
| Delete artifact files after sucessful transaction
| 2019-06-12T19:16:25 |
||
pulp/pulpcore | 185 | pulp__pulpcore-185 | [
"5016"
] | 4c8dc5fa773934bed20b4199345c0a4b1e243cdd | diff --git a/pulpcore/app/viewsets/content.py b/pulpcore/app/viewsets/content.py
--- a/pulpcore/app/viewsets/content.py
+++ b/pulpcore/app/viewsets/content.py
@@ -2,7 +2,6 @@
from django.db import models
from rest_framework import mixins, status
-from rest_framework.parsers import FormParser, MultiPartParser
from rest_framework.response import Response
from pulpcore.app.models import Artifact, Content
@@ -50,7 +49,6 @@ class ArtifactViewSet(NamedModelViewSet,
queryset = Artifact.objects.all()
serializer_class = ArtifactSerializer
filterset_class = ArtifactFilter
- parser_classes = (MultiPartParser, FormParser)
def destroy(self, request, pk):
"""
| [PR #5015/8c3dcd88 backport][3.22] Backport the `--nightly` pytest arg
**This is a backport of PR #5015 as merged into 3.28 (8c3dcd887caa7ef6f940cb49cb627200fbc3bf6b).**
This pytest flag used to be provided by pulp-smash, but we no longer want to depend on that. So we need to provide this functionality in the pulpcore pytest plugin. A safeguard is in place to not totally confuse pytest if pulp-smash happens to be installed anyway.
[noissue]
(cherry picked from commit c6135e81ae8aacd9c6dde092856d1a12bfdac8ab)
| 2019-06-26T17:17:59 |
||
pulp/pulpcore | 190 | pulp__pulpcore-190 | [
"4990"
] | 47cb9e7a49dde35dfa926c4d8abaf953c082fec3 | diff --git a/pulpcore/app/serializers/repository.py b/pulpcore/app/serializers/repository.py
--- a/pulpcore/app/serializers/repository.py
+++ b/pulpcore/app/serializers/repository.py
@@ -111,9 +111,10 @@ class RemoteSerializer(MasterModelSerializer):
min_value=1
)
policy = serializers.ChoiceField(
- help_text="The policy to use when downloading content. The possible values include: "
- "'immediate', 'on_demand', and 'cache_only'. 'immediate' is the default.",
- choices=models.Remote.POLICY_CHOICES,
+ help_text="The policy to use when downloading content.",
+ choices=(
+ (models.Remote.IMMEDIATE, 'When syncing, download all metadata and content now.')
+ ),
default=models.Remote.IMMEDIATE
)
| Deps 3 22
| 2019-06-27T18:19:38 |
||
pulp/pulpcore | 193 | pulp__pulpcore-193 | [
"4988"
] | 47cb9e7a49dde35dfa926c4d8abaf953c082fec3 | diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py
--- a/pulpcore/app/viewsets/upload.py
+++ b/pulpcore/app/viewsets/upload.py
@@ -10,19 +10,33 @@
from pulpcore.app.models import Upload
from pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer
-from pulpcore.app.viewsets.base import NamedModelViewSet
+from pulpcore.app.viewsets import BaseFilterSet
+from pulpcore.app.viewsets.base import DATETIME_FILTER_OPTIONS, NamedModelViewSet
+from pulpcore.app.viewsets.custom_filters import IsoDateTimeFilter
+
+
+class UploadFilter(BaseFilterSet):
+ completed = IsoDateTimeFilter(field_name='completed')
+
+ class Meta:
+ model = Upload
+ fields = {
+ 'completed': DATETIME_FILTER_OPTIONS + ['isnull']
+ }
class UploadViewSet(NamedModelViewSet,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
+ mixins.DestroyModelMixin,
mixins.ListModelMixin):
"""View for chunked uploads."""
endpoint_name = 'uploads'
queryset = Upload.objects.all()
serializer_class = UploadSerializer
- http_method_names = ['get', 'post', 'head', 'put']
+ filterset_class = UploadFilter
+ http_method_names = ['get', 'post', 'head', 'put', 'delete'] # remove PATCH
content_range_pattern = r'^bytes (\d+)-(\d+)/(\d+|[*])$'
content_range_parameter = \
| Verify if domain name has more than 50 chars
fixes: #4976
| 2019-06-27T19:31:18 |
||
pulp/pulpcore | 194 | pulp__pulpcore-194 | [
"4714"
] | 59f3450286d0a3a88e4b9fd1edb3568ee65bf54f | diff --git a/pulpcore/app/migrations/0003_remove_remote_validate.py b/pulpcore/app/migrations/0003_remove_remote_validate.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/migrations/0003_remove_remote_validate.py
@@ -0,0 +1,17 @@
+# Generated by Django 2.2.2 on 2019-06-27 20:20
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('core', '0002_contentappstatus'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='remote',
+ name='validate',
+ ),
+ ]
diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -58,7 +58,6 @@ class Remote(MasterModel):
name (models.CharField): The remote name.
url (models.TextField): The URL of an external content source.
- validate (models.BooleanField): If True, the plugin will validate imported files.
ssl_ca_certificate (models.FileField): A PEM encoded CA certificate used to validate the
server certificate presented by the external source.
ssl_client_certificate (models.FileField): A PEM encoded client certificate used
@@ -97,7 +96,6 @@ class Remote(MasterModel):
name = models.CharField(db_index=True, unique=True, max_length=255)
url = models.TextField()
- validate = models.BooleanField(default=True)
ssl_ca_certificate = models.TextField(null=True)
ssl_client_certificate = models.TextField(null=True)
diff --git a/pulpcore/app/serializers/repository.py b/pulpcore/app/serializers/repository.py
--- a/pulpcore/app/serializers/repository.py
+++ b/pulpcore/app/serializers/repository.py
@@ -55,10 +55,6 @@ class RemoteSerializer(MasterModelSerializer):
url = serializers.CharField(
help_text='The URL of an external content source.',
)
- validate = serializers.BooleanField(
- help_text='If True, the plugin will validate imported artifacts.',
- required=False,
- )
ssl_ca_certificate = SecretCharField(
help_text='A string containing the PEM encoded CA certificate used to validate the server '
'certificate presented by the remote server. All new line characters must be '
@@ -122,9 +118,10 @@ class Meta:
abstract = True
model = models.Remote
fields = MasterModelSerializer.Meta.fields + (
- 'name', 'url', 'validate', 'ssl_ca_certificate', 'ssl_client_certificate',
- 'ssl_client_key', 'ssl_validation', 'proxy_url', 'username', 'password',
- '_last_updated', 'download_concurrency', 'policy')
+ 'name', 'url', 'ssl_ca_certificate', 'ssl_client_certificate', 'ssl_client_key',
+ 'ssl_validation', 'proxy_url', 'username', 'password', '_last_updated',
+ 'download_concurrency', 'policy'
+ )
class RepositorySyncURLSerializer(serializers.Serializer):
| Update CI files for branch main
[noissue]
| 2019-06-28T00:43:26 |
||
pulp/pulpcore | 201 | pulp__pulpcore-201 | [
"4998"
] | d89ae8380123a4d39e71e0956d05f576078a43ee | diff --git a/pulpcore/app/migrations/0002_increase_artifact_size_field.py b/pulpcore/app/migrations/0002_increase_artifact_size_field.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/migrations/0002_increase_artifact_size_field.py
@@ -0,0 +1,18 @@
+# Generated by Django 2.2.2 on 2019-07-05 17:06
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('core', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='artifact',
+ name='size',
+ field=models.BigIntegerField(),
+ ),
+ ]
diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -103,7 +103,7 @@ def storage_path(self, name):
return storage.get_artifact_path(self.sha256)
file = fields.ArtifactFileField(null=False, upload_to=storage_path, max_length=255)
- size = models.IntegerField(null=False)
+ size = models.BigIntegerField(null=False)
md5 = models.CharField(max_length=32, null=False, unique=False, db_index=True)
sha1 = models.CharField(max_length=40, null=False, unique=False, db_index=True)
sha224 = models.CharField(max_length=56, null=False, unique=False, db_index=True)
| [RFE?] Keep worker process running in case of db connection error
**Is your feature request related to a problem? Please describe.**
From time to time my Postgres instance restarts (not related to Pulp) which, in turn, causes a database connection error and pulpcore worker process exits (and pod restarts).
**Describe the solution you'd like**
Is there a way to handle these connection drops on pulpcore worker side? For example, the main process from pulpcore-api pods is a gunicorn app that forks children processes and whenever the connection to the database is lost, the api pods will throw exceptions but the main gunicorn process will keep running (only the forks will die), so this causes no pod restart. Is it possible to provide a similar behavior in pulpcore-worker pods so that the main process does not exit in case of database failure?
**Additional context**
* We understand that the main problem is on the database side and the idea of this issue is just a "workaround".
* If we manage to find a way to keep the process running (and not restart the pod) will it handle/ensure the reconnection to the database?
| 2019-07-05T16:52:30 |
||
pulp/pulpcore | 203 | pulp__pulpcore-203 | [
"5060"
] | 3c8471845f0b30e31aaf7bd6059c2f6c22eca090 | diff --git a/containers/images/pulp-api/container-assets/wait_on_postgres.py b/containers/images/pulp/container-assets/wait_on_postgres.py
similarity index 100%
rename from containers/images/pulp-api/container-assets/wait_on_postgres.py
rename to containers/images/pulp/container-assets/wait_on_postgres.py
| diff --git a/.travis/test_bindings.py b/.travis/test_bindings.py
--- a/.travis/test_bindings.py
+++ b/.travis/test_bindings.py
@@ -135,7 +135,8 @@ def upload_file_in_chunks(file_path):
pprint(repository_version_1)
# Create an artifact from a local file
-artifact = artifacts.create(file='test_bindings.py')
+file_path = os.path.join(os.environ['TRAVIS_BUILD_DIR'], '.travis/test_bindings.py')
+artifact = artifacts.create(file=file_path)
pprint(artifact)
# Create a FileContent from the artifact
| [noissue]: Update protobuf requirement from <4.25.3,>=4.21.1 to >=4.21.1,<4.25.4
Updates the requirements on [protobuf](https://github.com/protocolbuffers/protobuf) to permit the latest version.
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/4a2aef570deb2bfb8927426558701e8bfc26f2a4"><code>4a2aef5</code></a> Updating version.json and repo version numbers to: 25.3</li>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/7c6ba838806c6e737a01173ab37d129bfc1ccb22"><code>7c6ba83</code></a> Merge pull request <a href="https://redirect.github.com/protocolbuffers/protobuf/issues/15814">#15814</a> from protocolbuffers/cp-ruby-3.3</li>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/25b1e8112079e6d463196fc137cc8d1925205426"><code>25b1e81</code></a> Update Ruby GHA to test against Ruby 3.3.</li>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/70e459f9d43ef00ae1405eeb1d50d4bb61551d58"><code>70e459f</code></a> Merge pull request <a href="https://redirect.github.com/protocolbuffers/protobuf/issues/15802">#15802</a> from protocolbuffers/cp-25.x</li>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/17ec19db930216d59eb8efdb0bbf6d1f5355f147"><code>17ec19d</code></a> Bump python version to 3.9 for gcloud 460.0.0</li>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/9dc736d924f6839073028a4d806a3b85f01000cf"><code>9dc736d</code></a> [ObjC] Use a local to avoid warnings in 32bit builds.</li>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/9d1bc1041be5fefa82e8c9823e79c263f6d8b2ac"><code>9d1bc10</code></a> [CPP] Add the privacy manifest to the C++ CocoaPod.</li>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/cec08dc615df059d1a93fe03c4e617bba679fa69"><code>cec08dc</code></a> [ObjC] Add the privacy manifest to the ObjC CocoaPod.</li>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/cf87faa29b57a304a0cfe5af4e9b7a5c659c62b4"><code>cf87faa</code></a> Add PrivacyInfo.xcprivacy (<a href="https://redirect.github.com/protocolbuffers/protobuf/issues/15557">#15557</a>)</li>
<li><a href="https://github.com/protocolbuffers/protobuf/commit/76d05d4cb9200c371c8894df21f37ba4060bdc8a"><code>76d05d4</code></a> remove use of mach_absolute_time (<a href="https://redirect.github.com/protocolbuffers/protobuf/issues/15554">#15554</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/protocolbuffers/protobuf/compare/v4.22.0-rc1...v4.25.3">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-07-10T18:45:14 |
|
pulp/pulpcore | 205 | pulp__pulpcore-205 | [
"4662"
] | dc2e85868b82d7217a3cd9c2c856ab3672928eac | diff --git a/pulpcore/app/models/task.py b/pulpcore/app/models/task.py
--- a/pulpcore/app/models/task.py
+++ b/pulpcore/app/models/task.py
@@ -143,7 +143,9 @@ def dirty_workers(self):
def with_reservations(self, resources):
"""
- Returns a worker with ANY of the reservations for resources specified by resource urls. This
+ Returns a worker with the resources reserved.
+
+ This worker may have ANY of the reservations for resources specified by resource urls. This
is useful when looking for a worker to queue work against as we don't care if it doesn't
have all the reservations as we can still try creating reservations for the additional
resources we need.
@@ -160,6 +162,18 @@ def with_reservations(self, resources):
"""
return self.filter(reservations__resource__in=resources).distinct().get()
+ def resource_managers(self):
+ """
+ Returns a queryset of resource managers.
+
+ Resource managers are identified by their name. Note that some of these may be offline.
+
+ Returns:
+ :class:`django.db.models.query.QuerySet`: A query set of the Worker objects which
+ which match the resource manager name.
+ """
+ return self.filter(name__startswith=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME)
+
class Worker(Model):
"""
diff --git a/pulpcore/tasking/tasks.py b/pulpcore/tasking/tasks.py
--- a/pulpcore/tasking/tasks.py
+++ b/pulpcore/tasking/tasks.py
@@ -83,13 +83,14 @@ def _queue_reserved_task(func, inner_task_id, resources, inner_args, inner_kwarg
time.sleep(0.25)
continue
else:
- task_status.state = TASK_STATES.RUNNING
- task_status.save()
+ rq_worker = util.get_current_worker()
+ worker = Worker.objects.get(name=rq_worker.name)
+ task_status.worker = worker
+ task_status.set_running()
q = Queue('resource-manager', connection=redis_conn, is_async=False)
q.enqueue(func, args=inner_args, kwargs=inner_kwargs, job_id=inner_task_id,
job_timeout=TASK_TIMEOUT, **options)
- task_status.state = TASK_STATES.COMPLETED
- task_status.save()
+ task_status.set_completed()
return
try:
diff --git a/pulpcore/tasking/util.py b/pulpcore/tasking/util.py
--- a/pulpcore/tasking/util.py
+++ b/pulpcore/tasking/util.py
@@ -4,7 +4,8 @@
from django.db import transaction
from django.urls import reverse
-from rq.job import Job
+from rq.job import Job, get_current_job
+from rq.worker import Worker
from pulpcore.app.models import Task
from pulpcore.app.util import get_view_name_for_model
@@ -92,3 +93,17 @@ def get_url(model):
str: The path component of the resource url
"""
return reverse(get_view_name_for_model(model, 'detail'), args=[model.pk])
+
+
+def get_current_worker():
+ """
+ Get the rq worker assigned to the current job
+
+ Returns:
+ class:`rq.worker.Worker`: The worker assigned to the current job
+ """
+ for worker in Worker.all():
+ if worker.get_current_job() == get_current_job():
+ return worker
+
+ return None
| Update CI files for branch 3.23
[noissue]
| 2019-07-10T21:09:21 |
||
pulp/pulpcore | 208 | pulp__pulpcore-208 | [
"4945"
] | 305038ac91b4d730ca57070abe34785474ecd9fa | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -201,7 +201,7 @@
}
}
-CONTENT_HOST = None
+CONTENT_HOST = ''
CONTENT_PATH_PREFIX = '/pulp/content/'
CONTENT_APP_TTL = 30
| Update CI files for branch 3.21
| 2019-07-12T19:47:09 |
||
pulp/pulpcore | 210 | pulp__pulpcore-210 | [
"5092"
] | adeb2a7f073b1a6cbb3c72c6b6a23d55e1e7386e | diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py
--- a/pulpcore/app/models/upload.py
+++ b/pulpcore/app/models/upload.py
@@ -49,6 +49,17 @@ def sha256(self, rehash=False):
self._sha256 = sha256.hexdigest()
return self._sha256
+ def delete(self, *args, **kwargs):
+ """
+ Deletes Upload model and the file associated with the model
+
+ Args:
+ args (list): list of positional arguments for Model.delete()
+ kwargs (dict): dictionary of keyword arguments to pass to Model.delete()
+ """
+ super().delete(*args, **kwargs)
+ self.file.delete(save=False)
+
class UploadChunk(Model):
"""
| [noissue]: Update django-lifecycle requirement from <=1.1.2,>=1.0 to >=1.0,<=1.2.0
Updates the requirements on [django-lifecycle](https://github.com/rsinger86/django-lifecycle) to permit the latest version.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/rsinger86/django-lifecycle/releases">django-lifecycle's releases</a>.</em></p>
<blockquote>
<h2>1.2.0</h2>
<h2>What's Changed</h2>
<ul>
<li>Fix <code>has_changed</code> and <code>changed_to</code> when working with mutable data (i.e.: <code>dict</code>s) by <a href="https://github.com/AlaaNour94"><code>@AlaaNour94</code></a> in <a href="https://redirect.github.com/rsinger86/django-lifecycle/pull/151">rsinger86/django-lifecycle#151</a></li>
<li>Hook condition can be now built using some predefined conditions and/or with custom ones by <a href="https://github.com/EnriqueSoria"><code>@EnriqueSoria</code></a> in <a href="https://redirect.github.com/rsinger86/django-lifecycle/pull/150">rsinger86/django-lifecycle#150</a></li>
</ul>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/AlaaNour94"><code>@AlaaNour94</code></a> made their first contribution in <a href="https://redirect.github.com/rsinger86/django-lifecycle/pull/151">rsinger86/django-lifecycle#151</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a href="https://github.com/rsinger86/django-lifecycle/compare/1.1.2...1.2.0">https://github.com/rsinger86/django-lifecycle/compare/1.1.2...1.2.0</a></p>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/rsinger86/django-lifecycle/blob/master/CHANGELOG.md">django-lifecycle's changelog</a>.</em></p>
<blockquote>
<h1>1.2.0 (February 2024)</h1>
<ul>
<li>Hook condition can be now built using some predefined conditions and/or with custom ones.</li>
<li>Fix <code>has_changed</code> and <code>changed_to</code> when working with mutable data (i.e.: <code>dict</code>s). Thanks <a href="https://github.com/AlaaNour94"><code>@AlaaNour94</code></a></li>
</ul>
<h1>1.1.2 (November 2023)</h1>
<ul>
<li>Fix: Hooks were failing if some watched field (those in <code>when=""</code> or <code>when_any=[...]</code>) was a <code>GenericForeignKey</code></li>
</ul>
<h2>1.1.1 (November 2023)</h2>
<ul>
<li>Fix: Include missing <code>django_lifecycle_checks</code> into python package</li>
</ul>
<h2>1.1.0 (November 2023)</h2>
<ul>
<li>Drop support for Django < 2.2.</li>
<li>Confirm support for Django 5.0. Thanks <a href="https://github.com/adamchainz"><code>@adamchainz</code></a>!</li>
<li>Remove urlman from required packages. Thanks <a href="https://github.com/DmytroLitvinov"><code>@DmytroLitvinov</code></a>!</li>
<li>Add an optional Django check to avoid errors by not inheriting from <code>LifecycleModelMixin</code> (or <code>LifecycleModel</code>)</li>
</ul>
<h2>1.0.2 (September 2023)</h2>
<ul>
<li>Correct package info to note that Django 4.0, 4.1, and 4.2 are supported.</li>
</ul>
<h2>1.0.1 (August 2023)</h2>
<ul>
<li>Initial state gets reset using <code>transaction.on_commit()</code>, fixing the <code>has_changed()</code> and <code>initial_value()</code> methods for on_commit hooks. Thanks <a href="https://github.com/alb3rto269"><code>@alb3rto269</code></a>!</li>
</ul>
<h2>1.0.0 (May 2022)</h2>
<ul>
<li>Drops Python 3.6 support</li>
<li>Adds <code>priority</code> hook kwarg to control the order in which hooked methods fire. Thanks <a href="https://github.com/EnriqueSoria"><code>@EnriqueSoria</code></a>!</li>
<li>Internal cleanup/refactoring. Thanks <a href="https://github.com/EnriqueSoria"><code>@EnriqueSoria</code></a>!</li>
</ul>
<h2>0.9.6 (February 2022)</h2>
<ul>
<li>Adds missing <code>packaging</code> to <code>install_requires</code>. Thanks <a href="https://github.com/mikedep333"><code>@mikedep333</code></a>!</li>
</ul>
<h2>0.9.5 (February 2022)</h2>
<ul>
<li>Makes the <code>has_changed</code>, <code>changes_to</code> conditions depend on whether the field in question was included in the SQL update/insert statement by checking
the <code>update_fields</code> argument passed to save.</li>
</ul>
<h2>0.9.4 (February 2022)</h2>
<ul>
<li>Adds optional <a href="https://github.com/hook"><code>@hook</code></a> <code>on_commit</code> argument for executing hooks when the database transaction is committed. Thanks <a href="https://github.com/amcclosky"><code>@amcclosky</code></a>!</li>
</ul>
<h2>0.9.3 (October 2021)</h2>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/4cc4c0b8f5688f87e520a949350519619311815a"><code>4cc4c0b</code></a> chore: Prepare release 1.2.0</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/2ba92cec9b54ec89a29cf8870b81acb7c9c41f05"><code>2ba92ce</code></a> Merge pull request <a href="https://redirect.github.com/rsinger86/django-lifecycle/issues/150">#150</a> from rsinger86/feature/generalize-conditions</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/b8b6336e19de4a6e5eb8ad6e52dda0257e57ce94"><code>b8b6336</code></a> refactor: Rename conditions as suggested by <a href="https://github.com/mateocpdev"><code>@mateocpdev</code></a></li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/bca05776ee77cf61f16d4bc8fc145d76bc276b01"><code>bca0577</code></a> docs: Update docs to add new way to add conditions</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/f2c6b803c5d4445d00c07ffeec7e5c73f75d0f6a"><code>f2c6b80</code></a> feat: Allow to specify both condition or legacy params</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/b6581622b49a906b1aab6ae07c80a29d61e77d2d"><code>b658162</code></a> feat: Make conditions chainable through <code>&</code> and <code>|</code></li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/6eb62e7ee5dbb8b2bca9c4c3f8eb0086765c4267"><code>6eb62e7</code></a> feat: Replicate conditions tests from LifecycleMixinTests using new conditions</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/1b2ccb86a583f729ff916f473562814c30b446aa"><code>1b2ccb8</code></a> Merge remote-tracking branch 'origin/master' into feature/generalize-conditions</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/2d1b46672b7f660012dbff08a7fe29f6d10094ed"><code>2d1b466</code></a> Merge pull request <a href="https://redirect.github.com/rsinger86/django-lifecycle/issues/151">#151</a> from AlaaNour94/capture_mutable_fields_changes</li>
<li><a href="https://github.com/rsinger86/django-lifecycle/commit/98b69493e3914299d67a7eb8b05146bcfe20ea53"><code>98b6949</code></a> capture mutable fields changes</li>
<li>Additional commits viewable in <a href="https://github.com/rsinger86/django-lifecycle/compare/1.0.0...1.2.0">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-07-15T18:41:06 |
||
pulp/pulpcore | 213 | pulp__pulpcore-213 | [
"5125"
] | ce2a9647b713a8f910b9324b460efaa8d48e8e25 | diff --git a/pulpcore/app/openapigenerator.py b/pulpcore/app/openapigenerator.py
--- a/pulpcore/app/openapigenerator.py
+++ b/pulpcore/app/openapigenerator.py
@@ -255,10 +255,11 @@ def get_operation(self, operation_keys):
produces = self.get_produces()
multipart = ['multipart/form-data', 'application/x-www-form-urlencoded']
- if 'file' in [param['type'] for param in self.get_request_body_parameters(multipart)]:
- # automatically set the media type to form data if there's a file
- # needed due to https://github.com/axnsan12/drf-yasg/issues/386
- consumes = multipart
+ if self.method != 'GET':
+ if 'file' in [param['type'] for param in self.get_request_body_parameters(multipart)]:
+ # automatically set the media type to form data if there's a file
+ # needed due to https://github.com/axnsan12/drf-yasg/issues/386
+ consumes = multipart
body = self.get_request_body_parameters(consumes)
query = self.get_query_parameters()
diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py
--- a/pulpcore/app/viewsets/upload.py
+++ b/pulpcore/app/viewsets/upload.py
@@ -4,7 +4,7 @@
from drf_yasg.utils import swagger_auto_schema
from drf_yasg.openapi import Parameter
from rest_framework import mixins, serializers
-from rest_framework.decorators import detail_route
+from rest_framework.decorators import action
from rest_framework.response import Response
from pulpcore.app import tasks
@@ -94,7 +94,7 @@ def update(self, request, pk=None):
@swagger_auto_schema(operation_summary="Finish an Upload",
request_body=UploadCommitSerializer,
responses={202: AsyncOperationResponseSerializer})
- @detail_route(methods=('post',))
+ @action(detail=True, methods=['post'])
def commit(self, request, pk):
"""
Generates a Task to commit the upload and create an artifact
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
'coreapi',
'Django~=2.2', # LTS version, switch only if we have a compelling reason to
'django-filter',
- 'djangorestframework<3.10',
+ 'djangorestframework',
'djangorestframework-queryfields',
'drf-nested-routers',
'drf-yasg',
| Update CI files for branch 3.21
| 2019-07-16T21:50:58 |
||
pulp/pulpcore | 216 | pulp__pulpcore-216 | [
"4982"
] | d73977474166241d7613d1e77117e2df9308d208 | diff --git a/pulpcore/app/models/upload.py b/pulpcore/app/models/upload.py
--- a/pulpcore/app/models/upload.py
+++ b/pulpcore/app/models/upload.py
@@ -3,6 +3,7 @@
from django.core.files.base import ContentFile
from django.db import models
+from rest_framework import serializers
from pulpcore.app.models import Model
@@ -22,7 +23,7 @@ class Upload(Model):
size = models.BigIntegerField()
completed = models.DateTimeField(null=True)
- def append(self, chunk, offset):
+ def append(self, chunk, offset, sha256=None):
"""
Append a chunk to an upload.
@@ -33,9 +34,14 @@ def append(self, chunk, offset):
if not self.file:
self.file.save(os.path.join('upload', str(self.pk)), ContentFile(''))
+ chunk_read = chunk.read()
+ current_sha256 = hashlib.sha256(chunk_read).hexdigest()
+ if sha256 and sha256 != current_sha256:
+ raise serializers.ValidationError("Checksum does not match chunk upload.")
+
with self.file.open(mode='r+b') as file:
file.seek(offset)
- file.write(chunk.read())
+ file.write(chunk_read)
self.chunks.create(offset=offset, size=len(chunk))
diff --git a/pulpcore/app/serializers/upload.py b/pulpcore/app/serializers/upload.py
--- a/pulpcore/app/serializers/upload.py
+++ b/pulpcore/app/serializers/upload.py
@@ -30,6 +30,12 @@ class UploadChunkSerializer(serializers.Serializer):
help_text=_("A chunk of the uploaded file."),
)
+ sha256 = serializers.CharField(
+ help_text=_("The SHA-256 checksum of the chunk if available."),
+ required=False,
+ allow_null=True,
+ )
+
class UploadCommitSerializer(serializers.Serializer):
sha256 = serializers.CharField(
diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py
--- a/pulpcore/app/viewsets/upload.py
+++ b/pulpcore/app/viewsets/upload.py
@@ -76,7 +76,8 @@ def update(self, request, pk=None):
if end > upload.size - 1:
raise serializers.ValidationError(_("End byte is greater than upload size."))
- upload.append(chunk, start)
+ sha256 = request.data.get('sha256')
+ upload.append(chunk, start, sha256)
serializer = UploadSerializer(upload, context={'request': request})
return Response(serializer.data)
| diff --git a/pulpcore/tests/functional/api/test_upload.py b/pulpcore/tests/functional/api/test_upload.py
--- a/pulpcore/tests/functional/api/test_upload.py
+++ b/pulpcore/tests/functional/api/test_upload.py
@@ -28,6 +28,7 @@ class ChunkedUploadTestCase(unittest.TestCase):
* `Pulp #4197 <https://pulp.plan.io/issues/4197>`_
* `Pulp #5092 <https://pulp.plan.io/issues/5092>`_
+ * `Pulp #4982 <https://pulp.plan.io/issues/4982>`_
"""
@classmethod
@@ -42,10 +43,27 @@ def setUpClass(cls):
cls.file_sha256 = hashlib.sha256(cls.file).hexdigest()
cls.size_file = len(cls.file)
- cls.first_chunk = http_get(FILE_CHUNKED_PART_1_URL)
- cls.second_chunk = http_get(FILE_CHUNKED_PART_2_URL)
+ first_chunk = http_get(FILE_CHUNKED_PART_1_URL)
+ header_first_chunk = {
+ 'Content-Range': 'bytes 0-{}/{}'.format(
+ len(first_chunk) - 1, cls.size_file
+ )
+ }
- def test_create_artifact(self):
+ second_chunk = http_get(FILE_CHUNKED_PART_2_URL)
+ header_second_chunk = {
+ 'Content-Range': 'bytes {}-{}/{}'.format(
+ len(first_chunk), cls.size_file - 1, cls.size_file
+ )
+ }
+
+ cls.chunked_data = [
+ [first_chunk, header_first_chunk],
+ [second_chunk, header_second_chunk],
+ ]
+ shuffle(cls.chunked_data)
+
+ def test_create_artifact_without_checksum(self):
"""Test creation of artifact using upload of files in chunks."""
upload_request = self.upload_chunks()
@@ -59,6 +77,52 @@ def test_create_artifact(self):
self.assertEqual(artifact['sha256'], self.file_sha256, artifact)
+ def test_create_artifact_passing_checksum(self):
+ """Test creation of artifact using upload of files in chunks passing checksum."""
+ upload_request = self.client.post(
+ UPLOAD_PATH, {'size': self.size_file}
+ )
+
+ for data in self.chunked_data:
+ self.client.put(
+ upload_request['_href'],
+ data={'sha256': hashlib.sha256(data[0]).hexdigest()},
+ files={'file': data[0]},
+ headers=data[1],
+ )
+
+ self.client.put(
+ urljoin(upload_request['_href'], 'commit/'),
+ data={'sha256': self.file_sha256},
+ )
+
+ response = self.client.post(
+ ARTIFACTS_PATH, {'upload': upload_request['_href']}
+ )
+
+ artifact = self.client.get(response['_href'])
+ self.addCleanup(self.client.delete, artifact['_href'])
+
+ self.assertEqual(artifact['sha256'], self.file_sha256, artifact)
+
+ def test_upload_chunk_wrong_checksum(self):
+ """Test creation of artifact using upload of files in chunks passing wrong checksum."""
+ self.client.response_handler = api.echo_handler
+
+ upload_request = self.client.post(
+ UPLOAD_PATH, {'size': self.size_file}
+ )
+
+ for data in self.chunked_data:
+ response = self.client.put(
+ upload_request.json()['_href'],
+ data={'sha256': "WRONG CHECKSUM"},
+ files={'file': data[0]},
+ headers=data[1],
+ )
+
+ assert response.status_code == 400
+
def test_delete_upload(self):
"""Test a deletion of an upload using upload of files in chunks."""
@@ -77,29 +141,11 @@ def test_delete_upload(self):
self.cli_client.run(cmd, sudo=True)
def upload_chunks(self):
- header_first_chunk = {
- 'Content-Range': 'bytes 0-{}/{}'.format(
- len(self.first_chunk) - 1, self.size_file
- )
- }
-
- header_second_chunk = {
- 'Content-Range': 'bytes {}-{}/{}'.format(
- len(self.first_chunk), self.size_file - 1, self.size_file
- )
- }
-
- chunked_data = [
- [self.first_chunk, header_first_chunk],
- [self.second_chunk, header_second_chunk],
- ]
- shuffle(chunked_data)
-
upload_request = self.client.post(
UPLOAD_PATH, {'size': self.size_file}
)
- for data in chunked_data:
+ for data in self.chunked_data:
self.client.put(
upload_request['_href'],
files={'file': data[0]},
| Update CI files for branch 3.18
| 2019-07-17T21:02:26 |
|
pulp/pulpcore | 222 | pulp__pulpcore-222 | [
"5150"
] | 6865f683e5e37fb55fb50e7d2eac1c95cee5ffb5 | diff --git a/pulpcore/app/serializers/__init__.py b/pulpcore/app/serializers/__init__.py
--- a/pulpcore/app/serializers/__init__.py
+++ b/pulpcore/app/serializers/__init__.py
@@ -51,4 +51,9 @@
TaskSerializer,
WorkerSerializer,
)
-from .upload import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer # noqa
+from .upload import ( # noqa
+ UploadChunkSerializer,
+ UploadCommitSerializer,
+ UploadSerializer,
+ UploadDetailSerializer
+)
diff --git a/pulpcore/app/serializers/upload.py b/pulpcore/app/serializers/upload.py
--- a/pulpcore/app/serializers/upload.py
+++ b/pulpcore/app/serializers/upload.py
@@ -5,6 +5,24 @@
from pulpcore.app.serializers import base
+class UploadChunkSerializer(serializers.Serializer):
+ file = serializers.FileField(
+ help_text=_("A chunk of the uploaded file."),
+ )
+
+ sha256 = serializers.CharField(
+ help_text=_("The SHA-256 checksum of the chunk if available."),
+ required=False,
+ allow_null=True,
+ )
+
+
+class UploadChunkDetailSerializer(base.ModelSerializer):
+ class Meta:
+ model = models.UploadChunk
+ fields = ('offset', 'size')
+
+
class UploadSerializer(base.ModelSerializer):
"""Serializer for chunked uploads."""
_href = base.IdentityField(
@@ -25,16 +43,15 @@ class Meta:
fields = base.ModelSerializer.Meta.fields + ('size', 'completed')
-class UploadChunkSerializer(serializers.Serializer):
- file = serializers.FileField(
- help_text=_("A chunk of the uploaded file."),
+class UploadDetailSerializer(UploadSerializer):
+ chunks = UploadChunkDetailSerializer(
+ many=True,
+ read_only=True,
)
- sha256 = serializers.CharField(
- help_text=_("The SHA-256 checksum of the chunk if available."),
- required=False,
- allow_null=True,
- )
+ class Meta:
+ model = models.Upload
+ fields = base.ModelSerializer.Meta.fields + ('size', 'completed', 'chunks')
class UploadCommitSerializer(serializers.Serializer):
diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py
--- a/pulpcore/app/viewsets/upload.py
+++ b/pulpcore/app/viewsets/upload.py
@@ -9,7 +9,12 @@
from rest_framework.response import Response
from pulpcore.app.models import Upload
-from pulpcore.app.serializers import UploadChunkSerializer, UploadCommitSerializer, UploadSerializer
+from pulpcore.app.serializers import (
+ UploadChunkSerializer,
+ UploadCommitSerializer,
+ UploadSerializer,
+ UploadDetailSerializer
+)
from pulpcore.app.viewsets import BaseFilterSet
from pulpcore.app.viewsets.base import DATETIME_FILTER_OPTIONS, NamedModelViewSet
from pulpcore.app.viewsets.custom_filters import IsoDateTimeFilter
@@ -34,7 +39,6 @@ class UploadViewSet(NamedModelViewSet,
"""View for chunked uploads."""
endpoint_name = 'uploads'
queryset = Upload.objects.all()
- serializer_class = UploadSerializer
filterset_class = UploadFilter
http_method_names = ['get', 'post', 'head', 'put', 'delete'] # remove PATCH
@@ -45,6 +49,11 @@ class UploadViewSet(NamedModelViewSet,
description='The Content-Range header specifies the location of the file chunk '
'within the file.')
+ def get_serializer_class(self):
+ if self.action == 'retrieve':
+ return UploadDetailSerializer
+ return UploadSerializer
+
@swagger_auto_schema(operation_summary="Upload a file chunk",
request_body=UploadChunkSerializer,
manual_parameters=[content_range_parameter],
| diff --git a/pulpcore/tests/functional/api/test_upload.py b/pulpcore/tests/functional/api/test_upload.py
--- a/pulpcore/tests/functional/api/test_upload.py
+++ b/pulpcore/tests/functional/api/test_upload.py
@@ -29,6 +29,7 @@ class ChunkedUploadTestCase(unittest.TestCase):
* `Pulp #4197 <https://pulp.plan.io/issues/4197>`_
* `Pulp #5092 <https://pulp.plan.io/issues/5092>`_
* `Pulp #4982 <https://pulp.plan.io/issues/4982>`_
+ * `Pulp #5150 <https://pulp.plan.io/issues/5150>`_
"""
@classmethod
@@ -123,6 +124,41 @@ def test_upload_chunk_wrong_checksum(self):
assert response.status_code == 400
+ def test_upload_response(self):
+ """Test upload responses when creating an upload and uploading chunks."""
+ self.client.response_handler = api.echo_handler
+
+ upload_request = self.client.post(
+ UPLOAD_PATH, {'size': self.size_file}
+ )
+
+ expected_keys = ['_href', '_created', 'size', 'completed']
+
+ self.assertEquals([*upload_request.json()], expected_keys)
+
+ for data in self.chunked_data:
+ response = self.client.put(
+ upload_request.json()['_href'],
+ files={'file': data[0]},
+ headers=data[1],
+ )
+
+ self.assertEquals([*response.json()], expected_keys)
+
+ response = self.client.get(upload_request.json()['_href'])
+
+ expected_keys = ['_href', '_created', 'size', 'completed', 'chunks']
+
+ self.assertEquals([*response.json()], expected_keys)
+
+ expected_chunks = [
+ {'offset': 0, 'size': 6291456},
+ {'offset': 6291456, 'size': 4194304}
+ ]
+
+ sorted_chunks_response = sorted(response.json()['chunks'], key=lambda i: i['offset'])
+ self.assertEquals(sorted_chunks_response, expected_chunks)
+
def test_delete_upload(self):
"""Test a deletion of an upload using upload of files in chunks."""
| [noissue]: Update djangorestframework requirement from <=3.15.0,>=3.14.0 to >=3.14.0,<=3.15.1
Updates the requirements on [djangorestframework](https://github.com/encode/django-rest-framework) to permit the latest version.
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/encode/django-rest-framework/commit/328591693d7a3e734ca5a740dddf85e11ccd208f"><code>3285916</code></a> Version 3.15.1 (<a href="https://redirect.github.com/encode/django-rest-framework/issues/9339">#9339</a>)</li>
<li><a href="https://github.com/encode/django-rest-framework/commit/eb361d289deb4bc99ad2ebab9c5f50a92de40339"><code>eb361d2</code></a> SearchFilter.get_search_terms returns list. (<a href="https://redirect.github.com/encode/django-rest-framework/issues/9338">#9338</a>)</li>
<li><a href="https://github.com/encode/django-rest-framework/commit/400b4c54419c5c88542ddd0b97219ad4fa8ee29a"><code>400b4c5</code></a> Revert "Fix NamespaceVersioning ignoring DEFAULT_VERSION on non-None namespac...</li>
<li><a href="https://github.com/encode/django-rest-framework/commit/4ef3aaf0ad0e31efe13d3c503a7099915a7c8875"><code>4ef3aaf</code></a> Revert <a href="https://redirect.github.com/encode/django-rest-framework/issues/9030">#9030</a> (<a href="https://redirect.github.com/encode/django-rest-framework/issues/9333">#9333</a>)</li>
<li><a href="https://github.com/encode/django-rest-framework/commit/4f10c4e43ee57f4a2e387e0c8d44d28d21a3621c"><code>4f10c4e</code></a> Revert "Fix Respect <code>can_read_model</code> permission in DjangoModelPermissions (<a href="https://redirect.github.com/encode/django-rest-framework/issues/8">#8</a>...</li>
<li><a href="https://github.com/encode/django-rest-framework/commit/a4d58077a0aca89b82f63ab33ebb36b16bf26d4a"><code>a4d5807</code></a> Revert "feat: Add some changes to ValidationError to support django style vad...</li>
<li><a href="https://github.com/encode/django-rest-framework/commit/da78a147f2f3c5820396f9fcd9372b2f5b9bbd18"><code>da78a14</code></a> Revert "Re-prefetch related objects after updating (<a href="https://redirect.github.com/encode/django-rest-framework/issues/8043">#8043</a>)" (<a href="https://redirect.github.com/encode/django-rest-framework/issues/9327">#9327</a>)</li>
<li><a href="https://github.com/encode/django-rest-framework/commit/0e4ed816279a8b9332544192ad90f7324f49cd62"><code>0e4ed81</code></a> Revert "feat: Add some changes to ValidationError to support django style vad...</li>
<li><a href="https://github.com/encode/django-rest-framework/commit/56a5b354d0f82ea7e0df5cc4aa5187fe8450cbcb"><code>56a5b35</code></a> Add drf-sendables to third-party-packages.md (<a href="https://redirect.github.com/encode/django-rest-framework/issues/9261">#9261</a>)</li>
<li><a href="https://github.com/encode/django-rest-framework/commit/d7c8dcfc7ee33edff0383688f89e7c1f6b474f6d"><code>d7c8dcf</code></a> Revert PR that adds autocomplete_fields to TokenAdmin, as this break some use...</li>
<li>Additional commits viewable in <a href="https://github.com/encode/django-rest-framework/compare/3.14.0...3.15.1">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-07-19T19:28:49 |
|
pulp/pulpcore | 228 | pulp__pulpcore-228 | [
"4722"
] | e0f07972fa67c0ddf7e8edf28ab8ec616d831898 | diff --git a/pulpcore/app/serializers/task.py b/pulpcore/app/serializers/task.py
--- a/pulpcore/app/serializers/task.py
+++ b/pulpcore/app/serializers/task.py
@@ -23,9 +23,11 @@ def to_representation(self, data):
return None
except AttributeError:
pass
- request = self.context['request']
viewset = get_viewset_for_model(data.content_object)
- serializer = viewset.serializer_class(data.content_object, context={'request': request})
+
+ # serializer contains all serialized fields because we are passing
+ # 'None' to the request's context
+ serializer = viewset.serializer_class(data.content_object, context={'request': None})
return serializer.data.get('_href')
class Meta:
| [3.28] Fix files being deleted during file:// syncs
closes #4681
| 2019-07-23T15:35:38 |
||
pulp/pulpcore | 229 | pulp__pulpcore-229 | [
"4931"
] | dd3ba8d269ae8fbaa0a7579e7e15971db7e49a13 | diff --git a/pulpcore/app/viewsets/custom_filters.py b/pulpcore/app/viewsets/custom_filters.py
--- a/pulpcore/app/viewsets/custom_filters.py
+++ b/pulpcore/app/viewsets/custom_filters.py
@@ -41,6 +41,33 @@ def filter(self, qs, value):
return qs.filter(reserved_resources_record__resource=value)
+class CreatedResourcesFilter(Filter):
+ """
+ Filter used to get tasks by created resources.
+
+ Created resources contain a reference to newly created repository
+ versions, distributions, etc.
+ """
+
+ def filter(self, qs, value):
+ """
+ Args:
+ qs (django.db.models.query.QuerySet): The QuerySet to filter
+ value (string): The content href to filter by
+
+ Returns:
+ Queryset of the content contained within the specified created resource
+ """
+
+ if value is None:
+ return qs
+
+ match = resolve(value)
+ resource = NamedModelViewSet.get_resource(value, match.func.cls.queryset.model)
+
+ return qs.filter(created_resources__object_id=resource.pk)
+
+
class HyperlinkRelatedFilter(Filter):
"""
Enables a user to filter by a foreign key using that FK's href
diff --git a/pulpcore/app/viewsets/task.py b/pulpcore/app/viewsets/task.py
--- a/pulpcore/app/viewsets/task.py
+++ b/pulpcore/app/viewsets/task.py
@@ -20,6 +20,7 @@
HyperlinkRelatedFilter,
IsoDateTimeFilter,
ReservedResourcesFilter,
+ CreatedResourcesFilter,
)
from pulpcore.constants import TASK_INCOMPLETE_STATES
from pulpcore.tasking.util import cancel as cancel_task
@@ -33,6 +34,7 @@ class TaskFilter(BaseFilterSet):
finished_at = IsoDateTimeFilter(field_name='finished_at')
parent = HyperlinkRelatedFilter()
reserved_resources_record = ReservedResourcesFilter()
+ created_resources = CreatedResourcesFilter()
class Meta:
model = Task
@@ -43,7 +45,8 @@ class Meta:
'started_at': DATETIME_FILTER_OPTIONS,
'finished_at': DATETIME_FILTER_OPTIONS,
'parent': ['exact'],
- 'reserved_resources_record': ['exact']
+ 'reserved_resources_record': ['exact'],
+ 'created_resources': ['exact']
}
| diff --git a/pulpcore/tests/functional/api/test_tasks.py b/pulpcore/tests/functional/api/test_tasks.py
--- a/pulpcore/tests/functional/api/test_tasks.py
+++ b/pulpcore/tests/functional/api/test_tasks.py
@@ -148,7 +148,7 @@ def filter_tasks(self, criteria):
class FilterTaskCreatedResourcesTestCase(unittest.TestCase):
- """Perform filtering over task resources.
+ """Perform filtering over the task's field created_resources.
This test targets the following issue:
@@ -220,3 +220,44 @@ def test_02_filter_tasks_by_non_existing_resources(self):
}
with self.assertRaises(HTTPError):
self.client.get(TASKS_PATH, params=filter_params)
+
+
+class FilterTaskCreatedResourcesContentTestCase(unittest.TestCase):
+ """Perform filtering for contents of created resources.
+
+ This test targets the following issue:
+
+ * `Pulp #4931 <https://pulp.plan.io/issues/4931>`_
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """Create class-wide variables."""
+ cls.client = api.Client(config.get_config(), api.page_handler)
+
+ cls.repository = cls.client.post(REPO_PATH, gen_repo())
+ response = cls.client.post(cls.repository['_versions_href'])
+ cls.task = cls.client.get(response['task'])
+
+ @classmethod
+ def tearDownClass(cls):
+ """Clean created resources."""
+ cls.client.delete(cls.repository['_href'])
+ cls.client.delete(cls.task['_href'])
+
+ def test_01_filter_tasks_by_created_resources(self):
+ """Filter all tasks by a particular created resource."""
+ filter_params = {
+ 'created_resources': self.task['created_resources'][0]
+ }
+ results = self.client.get(TASKS_PATH, params=filter_params)
+ self.assertEqual(len(results), 1, results)
+ self.assertEqual(self.task, results[0], results)
+
+ def test_02_filter_tasks_by_non_existing_resources(self):
+ """Filter all tasks by a non-existing reserved resource."""
+ filter_params = {
+ 'created_resources': 'a_resource_should_be_never_named_like_this'
+ }
+ with self.assertRaises(HTTPError):
+ self.client.get(TASKS_PATH, params=filter_params)
| Bump minor version
[noissue]
| 2019-07-23T15:45:06 |
|
pulp/pulpcore | 231 | pulp__pulpcore-231 | [
"5182"
] | cf5badac304e058226c58433058a65c272b53c14 | diff --git a/pulpcore/app/viewsets/custom_filters.py b/pulpcore/app/viewsets/custom_filters.py
--- a/pulpcore/app/viewsets/custom_filters.py
+++ b/pulpcore/app/viewsets/custom_filters.py
@@ -6,7 +6,7 @@
from uuid import UUID
from django.urls import Resolver404, resolve
-from django_filters import DateTimeFilter, Filter
+from django_filters import BaseInFilter, CharFilter, DateTimeFilter, Filter
from django_filters.fields import IsoDateTimeField
from rest_framework import serializers
@@ -190,3 +190,7 @@ def filter(self, qs, value):
repo_version = self.get_repository_version(value)
return qs.filter(pk__in=repo_version.removed())
+
+
+class CharInFilter(BaseInFilter, CharFilter):
+ pass
| Replace aioredis with redis-py
closes #3574
https://github.com/pulp/pulpcore/issues/3574
(cherry picked from commit 099e2eabc86e4bfe944c069d616cd172e1617b7e)
| 2019-07-24T13:41:27 |
||
pulp/pulpcore | 236 | pulp__pulpcore-236 | [
"4910",
"4910"
] | 121ec18f56e280a37e0557a8f6428a540f35ee1a | diff --git a/pulpcore/app/serializers/publication.py b/pulpcore/app/serializers/publication.py
--- a/pulpcore/app/serializers/publication.py
+++ b/pulpcore/app/serializers/publication.py
@@ -220,9 +220,23 @@ class Meta:
def validate(self, data):
super().validate(data)
- if 'repository' in data and 'repository_version' in data:
+ repository_in_data = 'repository' in data
+ repository_version_in_data = 'repository_version' in data
+ repository_in_instance = self.instance.repository if self.instance else None
+ repository_version_in_instance = self.instance.repository_version if self.instance else None
+
+ if repository_in_data and repository_version_in_data:
+ error = True
+ elif repository_in_data and repository_version_in_instance:
+ error = True
+ elif repository_in_instance and repository_version_in_data:
+ error = True
+ else:
+ error = False
+
+ if error:
msg = _("The attributes 'repository' and 'repository_version' must be used"
- "exclusively.")
+ " exclusively.")
raise serializers.ValidationError(msg)
return data
| Update CI files for branch 3.21
Update CI files for branch 3.21
| 2019-07-26T20:03:24 |
||
pulp/pulpcore | 239 | pulp__pulpcore-239 | [
"5196"
] | fcd2de0e64b19fad852e3a1879910612b7cca281 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,21 +4,21 @@
long_description = f.read()
requirements = [
- 'coreapi',
- 'Django~=2.2', # LTS version, switch only if we have a compelling reason to
- 'django-filter',
- 'djangorestframework',
- 'djangorestframework-queryfields',
- 'drf-nested-routers',
- 'drf-yasg',
- 'gunicorn',
+ 'coreapi~=2.3.3',
+ 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to
+ 'django-filter~=2.2.0',
+ 'djangorestframework~=3.10.2',
+ 'djangorestframework-queryfields~=1.0.0',
+ 'drf-nested-routers~=0.91.0',
+ 'drf-yasg~=1.16.1',
+ 'gunicorn~=19.9.0',
'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
- 'PyYAML',
- 'rq~=1.0',
- 'redis<3.2.0',
- 'setuptools',
- 'dynaconf~=2.0',
- 'whitenoise',
+ 'PyYAML~=5.1.1',
+ 'rq~=1.1.0',
+ 'redis~=3.1.0',
+ 'setuptools~=41.0.1',
+ 'dynaconf~=2.0.3',
+ 'whitenoise~=4.1.3',
]
setup(
| Fix bug where Last-Modified header was being updated on duplicate package uploads
Fixes a bug where the Last-Modified header of a package stored in django-storages was being updated on duplicate uploads.
Closes #5149
| 2019-07-31T12:00:42 |
||
pulp/pulpcore | 259 | pulp__pulpcore-259 | [
"4681"
] | dcaf70f92d3e2f31c65fd9e6c94145861613c2b1 | diff --git a/pulpcore/app/models/base.py b/pulpcore/app/models/base.py
--- a/pulpcore/app/models/base.py
+++ b/pulpcore/app/models/base.py
@@ -1,7 +1,9 @@
+from gettext import gettext as _
import uuid
from django.db import models
from django.db.models import options
+from django.db.models.base import ModelBase
class Model(models.Model):
@@ -35,7 +37,24 @@ def __repr__(self):
return str(self)
-class MasterModel(Model):
+class MasterModelMeta(ModelBase):
+ def __new__(cls, name, bases, attrs, **kwargs):
+ """Override __new__ to set the default_related_name."""
+ if Model not in bases and MasterModel not in bases: # Only affects "Detail" models.
+ meta = attrs.get("Meta")
+ default_related_name = getattr(
+ meta, "default_related_name", None)
+ abstract = getattr(meta, "abstract", None)
+
+ if not default_related_name and not abstract:
+ raise Exception(_("The 'default_related_name' option has not been set for "
+ "{class_name}").format(class_name=name))
+
+ new_class = super().__new__(cls, name, bases, attrs, **kwargs)
+ return new_class
+
+
+class MasterModel(Model, metaclass=MasterModelMeta):
"""Base model for the "Master" model in a "Master-Detail" relationship.
Provides methods for casting down to detail types, back up to the master type,
| file:// sync deletes files from directory
**Version**
Pulpcore 3.39
**Describe the bug**
When syncing file:// repositories, files are disappearing after the sync.
**To Reproduce**
1) Copy these two repositories to the FS:
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1
- https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2
2) Sync one, then the other
3) See that some files disappeared.
- In my case, file2 lost every file except PULP_MANIFEST
**Expected behavior**
No files disappear.
**Additional context**
This also occurred with RPM content type files.
| 2019-08-09T18:08:25 |
||
pulp/pulpcore | 265 | pulp__pulpcore-265 | [
"5210"
] | 5510ab0b33a7a8631969c0a8d68e549453d5f174 | diff --git a/pulpcore/app/serializers/task.py b/pulpcore/app/serializers/task.py
--- a/pulpcore/app/serializers/task.py
+++ b/pulpcore/app/serializers/task.py
@@ -58,7 +58,8 @@ class TaskSerializer(ModelSerializer):
"task."),
read_only=True
)
- error = serializers.JSONField(
+ error = serializers.DictField(
+ child=serializers.JSONField(),
help_text=_("A JSON Object of a fatal error encountered during the execution of this "
"task."),
read_only=True
| Update CI files for branch 3.39
| 2019-08-13T18:09:48 |
||
pulp/pulpcore | 272 | pulp__pulpcore-272 | [
"5290"
] | aaf382fd5dc180fd9e2d0be2053d9fdaa8195a0b | diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -223,7 +223,10 @@
GLOBAL_ENV_FOR_DYNACONF='PULP',
ENV_SWITCHER_FOR_DYNACONF='PULP_ENV',
SETTINGS_MODULE_FOR_DYNACONF='/etc/pulp/settings.py',
- INCLUDES_FOR_DYNACONF=['/etc/pulp/plugins/*'],
+ INCLUDES_FOR_DYNACONF=[
+ '{}.app.settings'.format(plugin_name)
+ for plugin_name in INSTALLED_PULP_PLUGINS
+ ],
ENVVAR_FOR_DYNACONF='PULP_SETTINGS',
)
# HERE ENDS DYNACONF EXTENSION LOAD (No more code below this line)
| Update CI files for branch 3.22
| 2019-08-22T21:05:03 |
||
pulp/pulpcore | 275 | pulp__pulpcore-275 | [
"5324"
] | d5b3f70353915c826cacb4bba10344b73cc0796b | diff --git a/pulpcore/app/pagination.py b/pulpcore/app/pagination.py
--- a/pulpcore/app/pagination.py
+++ b/pulpcore/app/pagination.py
@@ -1,22 +1,6 @@
from rest_framework import pagination
-class IDPagination(pagination.PageNumberPagination):
- """
- Paginate an API view naively, based on the ID of objects being iterated over.
-
- This assumes that the objects being iterated over have an 'id' field, that the value of this
- field is a int, and that the field is indexed.
-
- This assumption should be True for all Models inheriting `pulpcore.app.models.Model`, the Pulp
- base Model class.
-
- """
- ordering = 'id'
- page_size_query_param = 'page_size'
- max_page_size = 5000
-
-
class NamePagination(pagination.PageNumberPagination):
"""
Paginate an API view based on the value of the 'name' field of objects being iterated over.
diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -116,7 +116,7 @@
REST_FRAMEWORK = {
'URL_FIELD_NAME': '_href',
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
- 'DEFAULT_PAGINATION_CLASS': 'pulpcore.app.pagination.IDPagination',
+ 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100,
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticated',),
'DEFAULT_AUTHENTICATION_CLASSES': (
| Make the import/export machinery compatible with `django-import-export>=4.0`
We should review the import/export machinery to understand how to update the `django-import-export` requirement.
#### Additional context
https://github.com/pulp/pulpcore/pull/5317
| 2019-08-23T21:21:03 |
||
pulp/pulpcore | 285 | pulp__pulpcore-285 | [
"4992"
] | fc683b6d0e16be6bfbd354f997f2142de1891514 | diff --git a/pulpcore/app/openapigenerator.py b/pulpcore/app/openapigenerator.py
--- a/pulpcore/app/openapigenerator.py
+++ b/pulpcore/app/openapigenerator.py
@@ -5,6 +5,7 @@
from drf_yasg import openapi
from drf_yasg.generators import OpenAPISchemaGenerator
from drf_yasg.inspectors import SwaggerAutoSchema
+from drf_yasg.openapi import Parameter
from drf_yasg.utils import filter_none, force_real_str
@@ -263,6 +264,15 @@ def get_operation(self, operation_keys):
body = self.get_request_body_parameters(consumes)
query = self.get_query_parameters()
+ if self.method == 'GET':
+ fields_paramenter = Parameter(
+ name="fields",
+ in_="query",
+ description="A list of fields to include in the response.",
+ required=False,
+ type="string",
+ )
+ query.append(fields_paramenter)
parameters = body + query
parameters = filter_none(parameters)
parameters = self.add_manual_parameters(parameters)
| [noissue]: Bump black from 23.12.1 to 24.1.1
Bumps [black](https://github.com/psf/black) from 23.12.1 to 24.1.1.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/psf/black/releases">black's releases</a>.</em></p>
<blockquote>
<h2>24.1.1</h2>
<p>Bugfix release to fix a bug that made Black unusable on certain file systems
with strict limits on path length.</p>
<h3>Preview style</h3>
<ul>
<li>Consistently add trailing comma on typed parameters (<a href="https://redirect.github.com/psf/black/issues/4164">#4164</a>)</li>
</ul>
<h3>Configuration</h3>
<ul>
<li>Shorten the length of the name of the cache file to fix crashes on file systems that
do not support long paths (<a href="https://redirect.github.com/psf/black/issues/4176">#4176</a>)</li>
</ul>
<h2>24.1.0</h2>
<h3>Highlights</h3>
<p>This release introduces the new 2024 stable style (<a href="https://redirect.github.com/psf/black/issues/4106">#4106</a>), stabilizing the following
changes:</p>
<ul>
<li>Add parentheses around <code>if</code>-<code>else</code> expressions (<a href="https://redirect.github.com/psf/black/issues/2278">#2278</a>)</li>
<li>Dummy class and function implementations consisting only of <code>...</code> are formatted more
compactly (<a href="https://redirect.github.com/psf/black/issues/3796">#3796</a>)</li>
<li>If an assignment statement is too long, we now prefer splitting on the right-hand side
(<a href="https://redirect.github.com/psf/black/issues/3368">#3368</a>)</li>
<li>Hex codes in Unicode escape sequences are now standardized to lowercase (<a href="https://redirect.github.com/psf/black/issues/2916">#2916</a>)</li>
<li>Allow empty first lines at the beginning of most blocks (<a href="https://redirect.github.com/psf/black/issues/3967">#3967</a>, <a href="https://redirect.github.com/psf/black/issues/4061">#4061</a>)</li>
<li>Add parentheses around long type annotations (<a href="https://redirect.github.com/psf/black/issues/3899">#3899</a>)</li>
<li>Enforce newline after module docstrings (<a href="https://redirect.github.com/psf/black/issues/3932">#3932</a>, <a href="https://redirect.github.com/psf/black/issues/4028">#4028</a>)</li>
<li>Fix incorrect magic trailing comma handling in return types (<a href="https://redirect.github.com/psf/black/issues/3916">#3916</a>)</li>
<li>Remove blank lines before class docstrings (<a href="https://redirect.github.com/psf/black/issues/3692">#3692</a>)</li>
<li>Wrap multiple context managers in parentheses if combined in a single <code>with</code> statement
(<a href="https://redirect.github.com/psf/black/issues/3489">#3489</a>)</li>
<li>Fix bug in line length calculations for power operations (<a href="https://redirect.github.com/psf/black/issues/3942">#3942</a>)</li>
<li>Add trailing commas to collection literals even if there's a comment after the last
entry (<a href="https://redirect.github.com/psf/black/issues/3393">#3393</a>)</li>
<li>When using <code>--skip-magic-trailing-comma</code> or <code>-C</code>, trailing commas are stripped from
subscript expressions with more than 1 element (<a href="https://redirect.github.com/psf/black/issues/3209">#3209</a>)</li>
<li>Add extra blank lines in stubs in a few cases (<a href="https://redirect.github.com/psf/black/issues/3564">#3564</a>, <a href="https://redirect.github.com/psf/black/issues/3862">#3862</a>)</li>
<li>Accept raw strings as docstrings (<a href="https://redirect.github.com/psf/black/issues/3947">#3947</a>)</li>
<li>Split long lines in case blocks (<a href="https://redirect.github.com/psf/black/issues/4024">#4024</a>)</li>
<li>Stop removing spaces from walrus operators within subscripts (<a href="https://redirect.github.com/psf/black/issues/3823">#3823</a>)</li>
<li>Fix incorrect formatting of certain async statements (<a href="https://redirect.github.com/psf/black/issues/3609">#3609</a>)</li>
<li>Allow combining <code># fmt: skip</code> with other comments (<a href="https://redirect.github.com/psf/black/issues/3959">#3959</a>)</li>
</ul>
<p>There are already a few improvements in the <code>--preview</code> style, which are slated for the
2025 stable style. Try them out and
<a href="https://github.com/psf/black/issues">share your feedback</a>. In the past, the preview
style has included some features that we were not able to stabilize. This year, we're</p>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/psf/black/blob/main/CHANGES.md">black's changelog</a>.</em></p>
<blockquote>
<h2>24.1.1</h2>
<p>Bugfix release to fix a bug that made Black unusable on certain file systems with strict
limits on path length.</p>
<h3>Preview style</h3>
<ul>
<li>Consistently add trailing comma on typed parameters (<a href="https://redirect.github.com/psf/black/issues/4164">#4164</a>)</li>
</ul>
<h3>Configuration</h3>
<ul>
<li>Shorten the length of the name of the cache file to fix crashes on file systems that
do not support long paths (<a href="https://redirect.github.com/psf/black/issues/4176">#4176</a>)</li>
</ul>
<h2>24.1.0</h2>
<h3>Highlights</h3>
<p>This release introduces the new 2024 stable style (<a href="https://redirect.github.com/psf/black/issues/4106">#4106</a>), stabilizing the following
changes:</p>
<ul>
<li>Add parentheses around <code>if</code>-<code>else</code> expressions (<a href="https://redirect.github.com/psf/black/issues/2278">#2278</a>)</li>
<li>Dummy class and function implementations consisting only of <code>...</code> are formatted more
compactly (<a href="https://redirect.github.com/psf/black/issues/3796">#3796</a>)</li>
<li>If an assignment statement is too long, we now prefer splitting on the right-hand side
(<a href="https://redirect.github.com/psf/black/issues/3368">#3368</a>)</li>
<li>Hex codes in Unicode escape sequences are now standardized to lowercase (<a href="https://redirect.github.com/psf/black/issues/2916">#2916</a>)</li>
<li>Allow empty first lines at the beginning of most blocks (<a href="https://redirect.github.com/psf/black/issues/3967">#3967</a>, <a href="https://redirect.github.com/psf/black/issues/4061">#4061</a>)</li>
<li>Add parentheses around long type annotations (<a href="https://redirect.github.com/psf/black/issues/3899">#3899</a>)</li>
<li>Enforce newline after module docstrings (<a href="https://redirect.github.com/psf/black/issues/3932">#3932</a>, <a href="https://redirect.github.com/psf/black/issues/4028">#4028</a>)</li>
<li>Fix incorrect magic trailing comma handling in return types (<a href="https://redirect.github.com/psf/black/issues/3916">#3916</a>)</li>
<li>Remove blank lines before class docstrings (<a href="https://redirect.github.com/psf/black/issues/3692">#3692</a>)</li>
<li>Wrap multiple context managers in parentheses if combined in a single <code>with</code> statement
(<a href="https://redirect.github.com/psf/black/issues/3489">#3489</a>)</li>
<li>Fix bug in line length calculations for power operations (<a href="https://redirect.github.com/psf/black/issues/3942">#3942</a>)</li>
<li>Add trailing commas to collection literals even if there's a comment after the last
entry (<a href="https://redirect.github.com/psf/black/issues/3393">#3393</a>)</li>
<li>When using <code>--skip-magic-trailing-comma</code> or <code>-C</code>, trailing commas are stripped from
subscript expressions with more than 1 element (<a href="https://redirect.github.com/psf/black/issues/3209">#3209</a>)</li>
<li>Add extra blank lines in stubs in a few cases (<a href="https://redirect.github.com/psf/black/issues/3564">#3564</a>, <a href="https://redirect.github.com/psf/black/issues/3862">#3862</a>)</li>
<li>Accept raw strings as docstrings (<a href="https://redirect.github.com/psf/black/issues/3947">#3947</a>)</li>
<li>Split long lines in case blocks (<a href="https://redirect.github.com/psf/black/issues/4024">#4024</a>)</li>
<li>Stop removing spaces from walrus operators within subscripts (<a href="https://redirect.github.com/psf/black/issues/3823">#3823</a>)</li>
<li>Fix incorrect formatting of certain async statements (<a href="https://redirect.github.com/psf/black/issues/3609">#3609</a>)</li>
<li>Allow combining <code># fmt: skip</code> with other comments (<a href="https://redirect.github.com/psf/black/issues/3959">#3959</a>)</li>
</ul>
<p>There are already a few improvements in the <code>--preview</code> style, which are slated for the
2025 stable style. Try them out and
<a href="https://github.com/psf/black/issues">share your feedback</a>. In the past, the preview
style has included some features that we were not able to stabilize. This year, we're</p>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/psf/black/commit/e026c93888f91a47a9c9f4e029f3eb07d96375e6"><code>e026c93</code></a> Prepare release 24.1.1 (<a href="https://redirect.github.com/psf/black/issues/4186">#4186</a>)</li>
<li><a href="https://github.com/psf/black/commit/79fc1158a98281dac798feb14b8fddb4051e4a42"><code>79fc115</code></a> chore: ignore node_modules (produced by a pre-commit check) (<a href="https://redirect.github.com/psf/black/issues/4184">#4184</a>)</li>
<li><a href="https://github.com/psf/black/commit/8bf04549ffd276a1bad6eb110e66e6557ee630d9"><code>8bf0454</code></a> Consistently add trailing comma on typed parameters (<a href="https://redirect.github.com/psf/black/issues/4164">#4164</a>)</li>
<li><a href="https://github.com/psf/black/commit/1607e9ab20ad550cf940482d0d361ca31fc03189"><code>1607e9a</code></a> Fix missing space in option description (<a href="https://redirect.github.com/psf/black/issues/4182">#4182</a>)</li>
<li><a href="https://github.com/psf/black/commit/ed770ba4dd50c419148a0fca2b43937a7447e1f9"><code>ed770ba</code></a> Fix cache file length (<a href="https://redirect.github.com/psf/black/issues/4176">#4176</a>)</li>
<li><a href="https://github.com/psf/black/commit/659c29a41c7c686687aef21f57b95bcfa236b03b"><code>659c29a</code></a> New changelog</li>
<li><a href="https://github.com/psf/black/commit/0e6e46b9eb45f5a22062fe84c2c2ff46bd0d738e"><code>0e6e46b</code></a> Prepare release 24.1.0 (<a href="https://redirect.github.com/psf/black/issues/4170">#4170</a>)</li>
<li><a href="https://github.com/psf/black/commit/4f47cac1925a2232892ceae438e2c62f81517714"><code>4f47cac</code></a> Add --unstable flag (<a href="https://redirect.github.com/psf/black/issues/4096">#4096</a>)</li>
<li><a href="https://github.com/psf/black/commit/bccec8adfbed2bbc24c0859e8758d5e7809d42b7"><code>bccec8a</code></a> Show warning on invalid toml configuration (<a href="https://redirect.github.com/psf/black/issues/4165">#4165</a>)</li>
<li><a href="https://github.com/psf/black/commit/7d789469ed947022f183962b823f5862511272ac"><code>7d78946</code></a> Describe 2024 module docstring more accurately (<a href="https://redirect.github.com/psf/black/issues/4168">#4168</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/psf/black/compare/23.12.1...24.1.1">compare view</a></li>
</ul>
</details>
<br />
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-09-06T21:19:11 |
||
pulp/pulpcore | 288 | pulp__pulpcore-288 | [
"5282"
] | 3fce961eb9b5a55d3609b2319b504a6bd1e40d72 | diff --git a/pulpcore/exceptions/base.py b/pulpcore/exceptions/base.py
--- a/pulpcore/exceptions/base.py
+++ b/pulpcore/exceptions/base.py
@@ -40,10 +40,7 @@ def exception_to_dict(exc, traceback=None):
:return: dictionary representing the Exception
:rtype: dict
"""
- result = {'code': None, 'description': str(exc), 'traceback': traceback}
- if isinstance(exc, PulpException):
- result['code'] = exc.error_code
- return result
+ return {'description': str(exc), 'traceback': traceback}
class ResourceImmutableError(PulpException):
| [PR #5281/bd8cdc34 backport][3.51] Added csrf_token passing to django optional_logout
**This is a backport of PR #5281 as merged into main (bd8cdc343995c9b62e1ac32e012f71539beceabc).**
fixes: #5250
| 2019-09-09T19:13:24 |
||
pulp/pulpcore | 291 | pulp__pulpcore-291 | [
"5190"
] | 94a50405d16e73813df4201b6ee0ae30d362b065 | diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py
--- a/pulpcore/app/viewsets/upload.py
+++ b/pulpcore/app/viewsets/upload.py
@@ -97,7 +97,7 @@ def update(self, request, pk=None):
@action(detail=True, methods=['post'])
def commit(self, request, pk):
"""
- Generates a Task to commit the upload and create an artifact
+ Queues a Task that creates an Artifact, and the Upload gets deleted and cannot be re-used.
"""
try:
sha256 = request.data['sha256']
| [Backport-3.49] Fix import in wsgi preventing startup
Fixes #5189
| 2019-09-10T17:57:44 |
||
pulp/pulpcore | 294 | pulp__pulpcore-294 | [
"5428"
] | bdab0353a647f8d10fb2a18232768d65d16c9616 | diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py
--- a/pulpcore/app/serializers/content.py
+++ b/pulpcore/app/serializers/content.py
@@ -27,16 +27,27 @@ class Meta:
class SingleArtifactContentSerializer(BaseContentSerializer):
- _artifact = fields.SingleContentArtifactField(
+ artifact = fields.SingleContentArtifactField(
help_text=_("Artifact file representing the physical content"),
)
- _relative_path = serializers.CharField(
+ relative_path = serializers.CharField(
help_text=_("Path where the artifact is located relative to distributions base_path"),
validators=[fields.relative_path_validator],
write_only=True,
)
+ def __init__(self, *args, **kwargs):
+ """
+ Initializer for SingleArtifactContentSerializer
+ """
+ super().__init__(*args, **kwargs)
+
+ # If the content model has its own database field 'relative_path',
+ # we should not mark the field write_only
+ if hasattr(self.Meta.model, 'relative_path'):
+ self.fields["relative_path"].write_only = False
+
@transaction.atomic
def create(self, validated_data):
"""
@@ -45,8 +56,11 @@ def create(self, validated_data):
Args:
validated_data (dict): Data to save to the database
"""
- artifact = validated_data.pop('_artifact')
- relative_path = validated_data.pop('_relative_path')
+ artifact = validated_data.pop('artifact')
+ if self.fields["relative_path"].write_only:
+ relative_path = validated_data.pop('relative_path')
+ else:
+ relative_path = validated_data.get('relative_path')
content = self.Meta.model.objects.create(**validated_data)
models.ContentArtifact.objects.create(
artifact=artifact,
@@ -57,11 +71,11 @@ def create(self, validated_data):
class Meta:
model = models.Content
- fields = BaseContentSerializer.Meta.fields + ('_artifact', '_relative_path')
+ fields = BaseContentSerializer.Meta.fields + ('artifact', 'relative_path')
class MultipleArtifactContentSerializer(BaseContentSerializer):
- _artifacts = fields.ContentArtifactsField(
+ artifacts = fields.ContentArtifactsField(
help_text=_("A dict mapping relative paths inside the Content to the corresponding"
"Artifact URLs. E.g.: {'relative/path': "
"'/artifacts/1/'"),
@@ -75,9 +89,9 @@ def create(self, validated_data):
Args:
validated_data (dict): Data to save to the database
"""
- _artifacts = validated_data.pop('_artifacts')
+ artifacts = validated_data.pop('artifacts')
content = self.Meta.model.objects.create(**validated_data)
- for relative_path, artifact in _artifacts.items():
+ for relative_path, artifact in artifacts.items():
models.ContentArtifact.objects.create(
artifact=artifact,
content=content,
@@ -87,7 +101,7 @@ def create(self, validated_data):
class Meta:
model = models.Content
- fields = BaseContentSerializer.Meta.fields + ('_artifacts',)
+ fields = BaseContentSerializer.Meta.fields + ('artifacts',)
class ContentChecksumSerializer(serializers.Serializer):
diff --git a/pulpcore/app/serializers/fields.py b/pulpcore/app/serializers/fields.py
--- a/pulpcore/app/serializers/fields.py
+++ b/pulpcore/app/serializers/fields.py
@@ -123,7 +123,7 @@ def run_validation(self, data):
"""
ret = {}
if data is empty:
- raise serializers.ValidationError(_('_artifacts field must be specified.'))
+ raise serializers.ValidationError(_('artifacts field must be specified.'))
for relative_path, url in data.items():
relative_path_validator(relative_path)
artifactfield = RelatedField(view_name='artifacts-detail',
| diff --git a/.travis/test_bindings.rb b/.travis/test_bindings.rb
--- a/.travis/test_bindings.rb
+++ b/.travis/test_bindings.rb
@@ -125,7 +125,7 @@ def upload_file_in_chunks(file_path)
artifact = @artifacts_api.create({file: File.new(file_path)})
# Create a FileContent from the artifact
-file_data = PulpFileClient::FileContent.new({relative_path: 'foo.tar.gz', _artifact: artifact._href})
+file_data = PulpFileClient::FileContent.new({relative_path: 'foo.tar.gz', artifact: artifact._href})
filecontent = @filecontent_api.create(file_data)
# Add the new FileContent to a repository version
diff --git a/pulpcore/tests/functional/api/using_plugin/test_orphans.py b/pulpcore/tests/functional/api/using_plugin/test_orphans.py
--- a/pulpcore/tests/functional/api/using_plugin/test_orphans.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_orphans.py
@@ -80,7 +80,7 @@ def test_clean_orphan_content_unit(self):
)
# Verify that the artifact is present on disk.
- artifact_path = os.path.join(MEDIA_PATH, self.api_client.get(content['_artifact'])['file'])
+ artifact_path = os.path.join(MEDIA_PATH, self.api_client.get(content['artifact'])['file'])
cmd = ('ls', artifact_path)
self.cli_client.run(cmd, sudo=True)
diff --git a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
--- a/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
+++ b/pulpcore/tests/functional/api/using_plugin/test_repo_versions.py
@@ -330,7 +330,7 @@ def test_delete_first_version(self):
get_content(self.repo, self.repo_version_hrefs[0])
for repo_version_href in self.repo_version_hrefs[1:]:
artifact_paths = get_artifact_paths(self.repo, repo_version_href)
- self.assertIn(self.content[0]['_artifact'], artifact_paths)
+ self.assertIn(self.content[0]['artifact'], artifact_paths)
def test_delete_last_version(self):
"""Delete the last repository version.
@@ -352,8 +352,8 @@ def test_delete_last_version(self):
self.repo = self.client.get(self.repo['_href'])
artifact_paths = get_artifact_paths(self.repo)
- self.assertNotIn(self.content[-2]['_artifact'], artifact_paths)
- self.assertIn(self.content[-1]['_artifact'], artifact_paths)
+ self.assertNotIn(self.content[-2]['artifact'], artifact_paths)
+ self.assertIn(self.content[-1]['artifact'], artifact_paths)
def test_delete_middle_version(self):
"""Delete a middle version."""
@@ -365,7 +365,7 @@ def test_delete_middle_version(self):
for repo_version_href in self.repo_version_hrefs[index + 1:]:
artifact_paths = get_artifact_paths(self.repo, repo_version_href)
- self.assertIn(self.content[index]['_artifact'], artifact_paths)
+ self.assertIn(self.content[index]['artifact'], artifact_paths)
def test_delete_publication(self):
"""Delete a publication.
| [noissue]: Update opentelemetry-exporter-otlp-proto-http requirement from <=1.24.0,>=1.24.0 to >=1.24.0,<=1.25.0
Updates the requirements on [opentelemetry-exporter-otlp-proto-http](https://github.com/open-telemetry/opentelemetry-python) to permit the latest version.
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/open-telemetry/opentelemetry-python/blob/main/CHANGELOG.md">opentelemetry-exporter-otlp-proto-http's changelog</a>.</em></p>
<blockquote>
<h2>Version 1.25.0/0.46b0 (2024-05-30)</h2>
<ul>
<li>Fix class BoundedAttributes to have RLock rather than Lock
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3859">#3859</a>)</li>
<li>Remove thread lock by loading RuntimeContext explicitly.
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3763">#3763</a>)</li>
<li>Update proto version to v1.2.0
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3844">#3844</a>)</li>
<li>Add to_json method to ExponentialHistogram
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3780">#3780</a>)</li>
<li>Bump mypy to 1.9.0
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3795">#3795</a>)</li>
<li>Fix exponential histograms
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3798">#3798</a>)</li>
<li>Fix otlp exporter to export log_record.observed_timestamp
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3785">#3785</a>)</li>
<li>Add capture the fully qualified type name for raised exceptions in spans
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3837">#3837</a>)</li>
<li>Prometheus exporter sort label keys to prevent duplicate metrics when user input changes order
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3698">#3698</a>)</li>
<li>Update semantic conventions to version 1.25.0.
Refactor semantic-convention structure:
<ul>
<li><code>SpanAttributes</code>, <code>ResourceAttributes</code>, and <code>MetricInstruments</code> are deprecated.</li>
<li>Attribute and metric definitions are now grouped by the namespace.</li>
<li>Stable attributes and metrics are moved to <code>opentelemetry.semconv.attributes</code>
and <code>opentelemetry.semconv.metrics</code> modules.</li>
<li>Stable and experimental attributes and metrics are defined under
<code>opentelemetry.semconv._incubating</code> import path.
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3586">#3586</a>)</li>
</ul>
</li>
<li>Rename test objects to avoid pytest warnings
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3823">#3823</a> (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3823">open-telemetry/opentelemetry-python#3823</a>))</li>
<li>Add span flags to OTLP spans and links
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3881">#3881</a>)</li>
<li>Record links with invalid SpanContext if either attributes or TraceState are not empty
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3917/">#3917</a>)</li>
<li>Add OpenTelemetry trove classifiers to PyPI packages
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3913">#3913</a> (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3913">open-telemetry/opentelemetry-python#3913</a>))</li>
<li>Fix prometheus metric name and unit conversion
(<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/pull/3924">#3924</a>)
<ul>
<li>this is a breaking change to prometheus metric names so they comply with the
<a href="https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus">specification</a>.</li>
<li>you can temporarily opt-out of the unit normalization by setting the environment variable
<code>OTEL_PYTHON_EXPERIMENTAL_DISABLE_PROMETHEUS_UNIT_NORMALIZATION=true</code></li>
<li>common unit abbreviations are converted to Prometheus conventions (<code>s</code> -> <code>seconds</code>),
following the <a href="https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/c0b51136575aa7ba89326d18edb4549e7e1bbdb9/pkg/translator/prometheus/normalize_name.go#L108">collector's implementation</a></li>
<li>repeated <code>_</code> are replaced with a single <code>_</code></li>
<li>unit annotations (enclosed in curly braces like <code>{requests}</code>) are stripped away</li>
<li>units with slash are converted e.g. <code>m/s</code> -> <code>meters_per_second</code>.</li>
<li>The exporter's API is not changed</li>
</ul>
</li>
<li>Add parameters for Distros and configurators to configure autoinstrumentation in addition to existing environment variables.</li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/257e6fe3345d42203109128a6ccc9afbb4b777bc"><code>257e6fe</code></a> [release/v1.25.x-0.46bx] Prepare release 1.25.0/0.46b0 (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3942">#3942</a>)</li>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/d73593d1137a3854ceff3d7c94180d2bdb8b097f"><code>d73593d</code></a> Fix prometheus metric name and unit conversion (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3924">#3924</a>)</li>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/832e85946a2a4ffcc566646b6fa0e830b9b04a66"><code>832e859</code></a> Add OpenTelemetry trove classifiers to PyPI packages (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3913">#3913</a>)</li>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/dbf69435269896a344074e7563ede71e5697ff0c"><code>dbf6943</code></a> Fixup pylint broad exceptions warnings (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3923">#3923</a>)</li>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/a156bf161d4de96766cac09a5ef4fcddbf367604"><code>a156bf1</code></a> Do not install unnecessary packages (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3896">#3896</a>)</li>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/6e84b1f50979c77896c687a40a65f0e79f20a4b2"><code>6e84b1f</code></a> exporter: add is_remote_parent span flags to OTLP exported spans and links (#...</li>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/fbbf5b565032e2d9826dd8e631733c0cd255513f"><code>fbbf5b5</code></a> Revert modifications to Apache license (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3870">#3870</a>)</li>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/afc3acef6d32b26511f1ad7080493225015afaac"><code>afc3ace</code></a> Bump black to 24.3.0 (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3871">#3871</a>)</li>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/b51a6f8e62a10a4f22455f55e439fe5c5fcac44d"><code>b51a6f8</code></a> Sort by label keys before generating labels key and value lists (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3698">#3698</a>)</li>
<li><a href="https://github.com/open-telemetry/opentelemetry-python/commit/9398f26ecad09e02ad044859334cd4c75299c3cd"><code>9398f26</code></a> Use a single install command for lint (<a href="https://redirect.github.com/open-telemetry/opentelemetry-python/issues/3848">#3848</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/open-telemetry/opentelemetry-python/compare/v1.24.0...v1.25.0">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-09-11T12:42:37 |
|
pulp/pulpcore | 298 | pulp__pulpcore-298 | [
"5445"
] | e0fe8f7a9dbef711e43e27e61a33552b01422cbc | diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py
--- a/pulpcore/app/serializers/content.py
+++ b/pulpcore/app/serializers/content.py
@@ -45,7 +45,7 @@ def __init__(self, *args, **kwargs):
# If the content model has its own database field 'relative_path',
# we should not mark the field write_only
- if hasattr(self.Meta.model, 'relative_path'):
+ if hasattr(self.Meta.model, 'relative_path') and "relative_path" in self.fields:
self.fields["relative_path"].write_only = False
@transaction.atomic
@@ -57,7 +57,7 @@ def create(self, validated_data):
validated_data (dict): Data to save to the database
"""
artifact = validated_data.pop('artifact')
- if self.fields["relative_path"].write_only:
+ if "relative_path" in self.fields and self.fields["relative_path"].write_only:
relative_path = validated_data.pop('relative_path')
else:
relative_path = validated_data.get('relative_path')
| [PR #5443/c7079c89 backport][3.54] Pin the pytest-redis requirement for unit tests
**This is a backport of PR #5443 as merged into main (c7079c891b87ecd9073260db5baa35826803f635).**
[noissue]
| 2019-09-12T22:02:36 |
||
pulp/pulpcore | 299 | pulp__pulpcore-299 | [
"5445"
] | 4158042bf39f0b7d9d8e5d0203053fafb4fbf39d | diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py
--- a/pulpcore/app/serializers/content.py
+++ b/pulpcore/app/serializers/content.py
@@ -57,7 +57,7 @@ def create(self, validated_data):
validated_data (dict): Data to save to the database
"""
artifact = validated_data.pop('artifact')
- if "relative_path" in self.fields and self.fields["relative_path"].write_only:
+ if "relative_path" not in self.fields or self.fields["relative_path"].write_only:
relative_path = validated_data.pop('relative_path')
else:
relative_path = validated_data.get('relative_path')
| [PR #5443/c7079c89 backport][3.54] Pin the pytest-redis requirement for unit tests
**This is a backport of PR #5443 as merged into main (c7079c891b87ecd9073260db5baa35826803f635).**
[noissue]
| 2019-09-13T15:05:11 |
||
pulp/pulpcore | 302 | pulp__pulpcore-302 | [
"5009"
] | 2d0fcae30205e76cd80614c0b498f5021ccdc755 | diff --git a/pulpcore/app/openapigenerator.py b/pulpcore/app/openapigenerator.py
--- a/pulpcore/app/openapigenerator.py
+++ b/pulpcore/app/openapigenerator.py
@@ -2,6 +2,7 @@
from collections import OrderedDict
import uritemplate
+from django.utils.html import strip_tags
from drf_yasg import openapi
from drf_yasg.generators import OpenAPISchemaGenerator
from drf_yasg.inspectors import SwaggerAutoSchema
@@ -283,6 +284,10 @@ def get_operation(self, operation_keys):
else:
operation_id = self.get_operation_id(operation_keys)
summary, description = self.get_summary_and_description()
+
+ if "include_html" not in self.request.query_params:
+ description = strip_tags(description)
+
security = self.get_security()
assert security is None or isinstance(security, list), "security must be a list of " \
"security requirement objects"
diff --git a/pulpcore/app/settings.py b/pulpcore/app/settings.py
--- a/pulpcore/app/settings.py
+++ b/pulpcore/app/settings.py
@@ -215,6 +215,11 @@
'DEFAULT_INFO': 'pulpcore.app.urls.api_info',
}
+# have the docs url show field descriptions with html
+REDOC_SETTINGS = {
+ 'SPEC_URL': '/pulp/api/v3/docs/?format=openapi&include_html=1',
+}
+
# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
# Read more at https://dynaconf.readthedocs.io/en/latest/guides/django.html
import dynaconf # noqa
| Improve staging_docs landing page
What this PR does:
- Improve landing page layout
- Replace placeholder landing page cards with real content
| 2019-09-18T13:19:37 |
||
pulp/pulpcore | 304 | pulp__pulpcore-304 | [
"5478"
] | d615f23a0f59caf6c49e20cfa427d7d5e97ab4c5 | diff --git a/pulpcore/app/tasks/repository.py b/pulpcore/app/tasks/repository.py
--- a/pulpcore/app/tasks/repository.py
+++ b/pulpcore/app/tasks/repository.py
@@ -89,7 +89,10 @@ def add_and_remove(repository_pk, add_content_units, remove_content_units, base_
if '*' in remove_content_units:
latest = models.RepositoryVersion.latest(repository)
- remove_content_units = latest.content.values_list('pk', flat=True)
+ if latest:
+ remove_content_units = latest.content.values_list('pk', flat=True)
+ else:
+ remove_content_units = []
with models.RepositoryVersion.create(repository, base_version=base_version) as new_version:
new_version.remove_content(models.Content.objects.filter(pk__in=remove_content_units))
| docs should not use underscores in http headers for auth
**Version**
any
**Describe the bug**
https://docs.pulpproject.org/pulpcore/authentication/webserver.html#webserver-auth-with-reverse-proxy says:
> With nginx providing authentication, all it can do is pass REMOTE_USER (or similar name) to the application webserver, i.e. gunicorn. You can pass the header as part of the proxy request in nginx with a config line like:
>
> proxy_set_header REMOTE_USER $remote_user;
But since gunicorn 22.0 (more precisely https://github.com/benoitc/gunicorn/commit/72b8970dbf2bf3444eb2e8b12aeff1a3d5922a9a/ https://github.com/benoitc/gunicorn/issues/2799) headers with underscores are forbidden by default.
If the docs would use a dash, so `proxy_set_header REMOTE-USER …` things would work :)
**Additional context**
Grant made me file this, and I did not want to post a screenshot of our colorful conversation ;)
| 2019-09-18T19:30:17 |
||
pulp/pulpcore | 306 | pulp__pulpcore-306 | [
"5129"
] | d545ecb091c5fb8003fbe2cb87bdeb988bb33bcd | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,6 +13,7 @@
'drf-yasg~=1.16.1',
'gunicorn~=19.9.0',
'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
+ 'psycopg2-binary',
'PyYAML~=5.1.1',
'rq~=1.1.0',
'redis~=3.1.0',
@@ -34,10 +35,6 @@
url='http://www.pulpproject.org',
python_requires='>=3.6',
install_requires=requirements,
- extras_require={
- 'postgres': ['psycopg2-binary'],
- 'mysql': ['mysqlclient']
- },
include_package_data=True,
classifiers=(
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
| Fix content app not showing file size for 0 byte files
fixes: #5100
| 2019-09-20T16:26:23 |
||
pulp/pulpcore | 307 | pulp__pulpcore-307 | [
"4554"
] | 91519235026ce3be192200a0905816175eda4b6f | diff --git a/pulpcore/tasking/services/manage_workers.py b/pulpcore/tasking/services/manage_workers.py
--- a/pulpcore/tasking/services/manage_workers.py
+++ b/pulpcore/tasking/services/manage_workers.py
@@ -10,7 +10,7 @@
_ENVIRONMENT_FILE = os.path.join('/', 'etc', 'default', 'pulp-workers')
_SYSTEMD_UNIT_PATH = os.path.join('/', 'run', 'systemd', 'system')
-_UNIT_FILENAME_TEMPLATE = 'pulp-worker-%s.service'
+_UNIT_FILENAME_TEMPLATE = 'pulpcore-worker-%s.service'
_WORKER_TEMPLATE = """[Unit]
Description=Pulp Worker #%(num)s
After=network.target
diff --git a/pulpcore/tasking/services/worker_watcher.py b/pulpcore/tasking/services/worker_watcher.py
--- a/pulpcore/tasking/services/worker_watcher.py
+++ b/pulpcore/tasking/services/worker_watcher.py
@@ -64,7 +64,7 @@ def check_worker_processes():
present. If there are zero of either, log at the error level that Pulp will not operate
correctly.
"""
- msg = _('Checking if pulp-workers or pulp-resource-manager processes are '
+ msg = _('Checking if pulpcore-workers or pulpcore-resource-manager processes are '
'missing for more than %d seconds') % TASKING_CONSTANTS.WORKER_TTL
_logger.debug(msg)
@@ -81,8 +81,8 @@ def check_worker_processes():
name__startswith=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME).count()
if resource_manager_count == 0:
- msg = _("There are 0 pulp-resource-manager processes running. Pulp will not operate "
- "correctly without at least one pulp-resource-mananger process running.")
+ msg = _("There are 0 pulpcore-resource-manager processes running. Pulp will not operate "
+ "correctly without at least one pulpcore-resource-mananger process running.")
_logger.error(msg)
if worker_count == 0:
@@ -91,8 +91,8 @@ def check_worker_processes():
_logger.error(msg)
output_dict = {'workers': worker_count, 'resource-manager': resource_manager_count}
- msg = _("%(workers)d pulp-worker processes and %(resource-manager)d "
- "pulp-resource-manager processes") % output_dict
+ msg = _("%(workers)d pulpcore-worker processes and %(resource-manager)d "
+ "pulpcore-resource-manager processes") % output_dict
_logger.debug(msg)
| diff --git a/pulpcore/tests/functional/api/test_workers.py b/pulpcore/tests/functional/api/test_workers.py
--- a/pulpcore/tests/functional/api/test_workers.py
+++ b/pulpcore/tests/functional/api/test_workers.py
@@ -123,14 +123,14 @@ def setUpClass(cls):
cls.client = api.Client(cls.cfg, api.json_handler)
cls.svc_mgr = cli.ServiceManager(cls.cfg, cls.cfg.get_hosts('api')[0])
cls.worker = {}
- if not cls.svc_mgr.is_active(['pulp-worker@*']):
+ if not cls.svc_mgr.is_active(['pulpcore-worker@*']):
raise unittest.SkipTest(
'These tests require pulp workers running on systemd'
)
def test_01_start_new_worker(self):
"""Start a new worker to be used in next assertions."""
- self.svc_mgr.start(['pulp-worker@99'])
+ self.svc_mgr.start(['pulpcore-worker@99'])
time.sleep(2)
workers = self.client.get(
WORKER_PATH, params={'online': True}
@@ -145,7 +145,7 @@ def test_01_start_new_worker(self):
@skip_if(bool, 'worker', False)
def test_02_stop_worker(self):
"""Stop the worker and assert it is offline."""
- self.svc_mgr.stop(['pulp-worker@99'])
+ self.svc_mgr.stop(['pulpcore-worker@99'])
time.sleep(2)
worker = self.client.get(self.worker['_href'])
self.assertEqual(worker['online'], False)
| [PR #4540/671f2074 backport][3.21] More export optimizations and a bugfix
**This is a backport of PR #4540 as merged into main (671f2074716e509ea83d90afa1fea013d6de0668).**
None
| 2019-09-23T15:26:10 |
|
pulp/pulpcore | 323 | pulp__pulpcore-323 | [
"4554"
] | 4d22cf71d72e087bc43f71178fe57130cacbb244 | diff --git a/pulpcore/tasking/services/manage_workers.py b/pulpcore/tasking/services/manage_workers.py
--- a/pulpcore/tasking/services/manage_workers.py
+++ b/pulpcore/tasking/services/manage_workers.py
@@ -10,7 +10,7 @@
_ENVIRONMENT_FILE = os.path.join('/', 'etc', 'default', 'pulp-workers')
_SYSTEMD_UNIT_PATH = os.path.join('/', 'run', 'systemd', 'system')
-_UNIT_FILENAME_TEMPLATE = 'pulp-worker-%s.service'
+_UNIT_FILENAME_TEMPLATE = 'pulpcore-worker-%s.service'
_WORKER_TEMPLATE = """[Unit]
Description=Pulp Worker #%(num)s
After=network.target
diff --git a/pulpcore/tasking/services/worker_watcher.py b/pulpcore/tasking/services/worker_watcher.py
--- a/pulpcore/tasking/services/worker_watcher.py
+++ b/pulpcore/tasking/services/worker_watcher.py
@@ -64,7 +64,7 @@ def check_worker_processes():
present. If there are zero of either, log at the error level that Pulp will not operate
correctly.
"""
- msg = _('Checking if pulp-workers or pulp-resource-manager processes are '
+ msg = _('Checking if pulpcore-workers or pulpcore-resource-manager processes are '
'missing for more than %d seconds') % TASKING_CONSTANTS.WORKER_TTL
_logger.debug(msg)
@@ -81,8 +81,8 @@ def check_worker_processes():
name__startswith=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME).count()
if resource_manager_count == 0:
- msg = _("There are 0 pulp-resource-manager processes running. Pulp will not operate "
- "correctly without at least one pulp-resource-mananger process running.")
+ msg = _("There are 0 pulpcore-resource-manager processes running. Pulp will not operate "
+ "correctly without at least one pulpcore-resource-mananger process running.")
_logger.error(msg)
if worker_count == 0:
@@ -91,8 +91,8 @@ def check_worker_processes():
_logger.error(msg)
output_dict = {'workers': worker_count, 'resource-manager': resource_manager_count}
- msg = _("%(workers)d pulp-worker processes and %(resource-manager)d "
- "pulp-resource-manager processes") % output_dict
+ msg = _("%(workers)d pulpcore-worker processes and %(resource-manager)d "
+ "pulpcore-resource-manager processes") % output_dict
_logger.debug(msg)
| diff --git a/pulpcore/tests/functional/api/test_workers.py b/pulpcore/tests/functional/api/test_workers.py
--- a/pulpcore/tests/functional/api/test_workers.py
+++ b/pulpcore/tests/functional/api/test_workers.py
@@ -123,14 +123,14 @@ def setUpClass(cls):
cls.client = api.Client(cls.cfg, api.json_handler)
cls.svc_mgr = cli.ServiceManager(cls.cfg, cls.cfg.get_hosts('api')[0])
cls.worker = {}
- if not cls.svc_mgr.is_active(['pulp-worker@*']):
+ if not cls.svc_mgr.is_active(['pulpcore-worker@*']):
raise unittest.SkipTest(
'These tests require pulp workers running on systemd'
)
def test_01_start_new_worker(self):
"""Start a new worker to be used in next assertions."""
- self.svc_mgr.start(['pulp-worker@99'])
+ self.svc_mgr.start(['pulpcore-worker@99'])
time.sleep(2)
workers = self.client.get(
WORKER_PATH, params={'online': True}
@@ -145,7 +145,7 @@ def test_01_start_new_worker(self):
@skip_if(bool, 'worker', False)
def test_02_stop_worker(self):
"""Stop the worker and assert it is offline."""
- self.svc_mgr.stop(['pulp-worker@99'])
+ self.svc_mgr.stop(['pulpcore-worker@99'])
time.sleep(2)
worker = self.client.get(self.worker['_href'])
self.assertEqual(worker['online'], False)
| [PR #4540/671f2074 backport][3.21] More export optimizations and a bugfix
**This is a backport of PR #4540 as merged into main (671f2074716e509ea83d90afa1fea013d6de0668).**
None
| 2019-10-03T16:34:20 |
|
pulp/pulpcore | 324 | pulp__pulpcore-324 | [
"5311"
] | e077cc55056e208886585c47ce3de23f901c8432 | diff --git a/pulpcore/app/viewsets/base.py b/pulpcore/app/viewsets/base.py
--- a/pulpcore/app/viewsets/base.py
+++ b/pulpcore/app/viewsets/base.py
@@ -8,7 +8,7 @@
from drf_yasg.utils import swagger_auto_schema
from rest_framework import viewsets
from rest_framework.generics import get_object_or_404
-from rest_framework.schemas import AutoSchema
+from rest_framework.schemas.openapi import AutoSchema
from rest_framework.serializers import ValidationError as DRFValidationError
from pulpcore.app import tasks
diff --git a/pulpcore/app/viewsets/upload.py b/pulpcore/app/viewsets/upload.py
--- a/pulpcore/app/viewsets/upload.py
+++ b/pulpcore/app/viewsets/upload.py
@@ -60,7 +60,7 @@ def get_serializer_class(self):
def get_serializer_context(self):
context = super().get_serializer_context()
- if self.action == 'update':
+ if self.action == 'update' and self.kwargs.get("pk"):
context["upload"] = self.get_object()
return context
| [noissue]: Update django-guid requirement from <=3.4.0,>=3.3 to >=3.3,<=3.5.0
Updates the requirements on [django-guid](https://github.com/snok/django-guid) to permit the latest version.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/snok/django-guid/releases">django-guid's releases</a>.</em></p>
<blockquote>
<h2>3.5.0 - Pptionally override the record field name where the guid is stored</h2>
<h2>What's Changed</h2>
<ul>
<li>Add the ability to customize the log record field name for correlation ids. by <a href="https://github.com/NiklasBeierl"><code>@NiklasBeierl</code></a> in <a href="https://redirect.github.com/snok/django-guid/pull/115">snok/django-guid#115</a></li>
</ul>
<p>Usage:</p>
<pre lang="python"><code> 'filters': {
'correlation_id': {
'()': 'django_guid.log_filters.CorrelationId',
# You can optionally override the record field name where the guid is stored
'correlation_id_field': 'my_custom_correlation_id'
}
}
</code></pre>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/NiklasBeierl"><code>@NiklasBeierl</code></a> made their first contribution in <a href="https://redirect.github.com/snok/django-guid/pull/115">snok/django-guid#115</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a href="https://github.com/snok/django-guid/compare/3.4.1...3.5.0">https://github.com/snok/django-guid/compare/3.4.1...3.5.0</a></p>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/snok/django-guid/commit/4d80d58a12f9f09c0ed34d40c3418ed1a77938c6"><code>4d80d58</code></a> chore: version bump</li>
<li><a href="https://github.com/snok/django-guid/commit/d039a7e081170804a5fe7c56620df7ff04d61a36"><code>d039a7e</code></a> New: Custom log field for correlation ids</li>
<li><a href="https://github.com/snok/django-guid/commit/da0fadb71e3a4f272887083b8a7056a0629bccb0"><code>da0fadb</code></a> Doc: Complete sentence for VALIDATE</li>
<li><a href="https://github.com/snok/django-guid/commit/9a9b00a37c0cfd8c1cd1ede08d180b3e8b6aa6e6"><code>9a9b00a</code></a> Merge pull request <a href="https://redirect.github.com/snok/django-guid/issues/114">#114</a> from ingvaldlorentzen/django-5-docs</li>
<li><a href="https://github.com/snok/django-guid/commit/b7bbbe59ce085377302413d2a16679075f9e79c0"><code>b7bbbe5</code></a> Update metadata to reflect supported versions</li>
<li><a href="https://github.com/snok/django-guid/commit/9bf8c7f8f818ec973d358dda731d13dfc614e7e0"><code>9bf8c7f</code></a> Merge pull request <a href="https://redirect.github.com/snok/django-guid/issues/113">#113</a> from ingvaldlorentzen/django-5</li>
<li><a href="https://github.com/snok/django-guid/commit/9047e4368cc3ac589e28f60fdd986ae0b5dd5ffb"><code>9047e43</code></a> Fix pre-commit changes</li>
<li><a href="https://github.com/snok/django-guid/commit/f5cda888a0e4760219acd74667e49ddbc9a99485"><code>f5cda88</code></a> Update pre-commit for linting fix</li>
<li><a href="https://github.com/snok/django-guid/commit/80dc41cecaeb5cad5af9774c07f86795b219eee5"><code>80dc41c</code></a> Bump version</li>
<li><a href="https://github.com/snok/django-guid/commit/9ac05f09946522682fd3e2e3ddc22a6fffa9741d"><code>9ac05f0</code></a> Update version matrix to use LTS for v3</li>
<li>Additional commits viewable in <a href="https://github.com/snok/django-guid/compare/3.3.0...3.5.0">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-10-04T18:42:33 |
||
pulp/pulpcore | 331 | pulp__pulpcore-331 | [
"5008"
] | 11d634a1fb0f48ba681b69c8a1c582a7921e3e26 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -244,6 +244,7 @@ class Content(MasterModel, QueryMixin):
_artifacts (models.ManyToManyField): Artifacts related to Content through ContentArtifact
"""
TYPE = 'content'
+ repo_key = ()
_artifacts = models.ManyToManyField(Artifact, through='ContentArtifact')
diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -1,10 +1,14 @@
"""
Repository related Django models.
"""
+from collections import defaultdict
from contextlib import suppress
+from gettext import gettext as _
+import logging
import django
from django.db import models, transaction
+from django.db.models import Q
from django.urls import reverse
from pulpcore.app.util import get_view_name_for_model
@@ -15,6 +19,9 @@
from .task import CreatedResource
+_logger = logging.getLogger(__name__)
+
+
class Repository(Model):
"""
Collection of content.
@@ -342,7 +349,12 @@ def next(self):
def add_content(self, content):
"""
- Add a content unit to this version.
+ Add a content unit to this version, and possibly remove duplcates in some situations.
+
+ In some situations plugin writers have configured new content to automatically remove other
+ content in the repository. For example, `pulp_file` content would conflict with a
+ `relative_path`. That is resolved by having the newer associated unit automatically remove
+ the other `pulp_file` content with the same `relative_path`.
Args:
content (django.db.models.QuerySet): Set of Content to add
@@ -351,9 +363,24 @@ def add_content(self, content):
pulpcore.exception.ResourceImmutableError: if add_content is called on a
complete RepositoryVersion
"""
+
if self.complete:
raise ResourceImmutableError(self)
+ query_for_repo_duplicates_by_type = defaultdict(lambda: Q())
+ for item in content.all():
+ if item.repo_key == ():
+ continue
+ unit_q_dict = {
+ field: getattr(item, field) for field in item.repo_key
+ }
+ query_for_repo_duplicates_by_type[item._meta.model] |= Q(**unit_q_dict)
+
+ for model in query_for_repo_duplicates_by_type:
+ _logger.debug(_("Removing duplicates for type: {}".format(model)))
+ qs = model.objects.filter(query_for_repo_duplicates_by_type[model])
+ self.remove_content(qs)
+
repo_content = []
for content_pk in content.exclude(pk__in=self.content).values_list('pk', flat=True):
repo_content.append(
| [PR #5007/075a37fd backport][3.45] Fix crud exporter test
**This is a backport of PR #5007 as merged into main (075a37fd5b68c09bddffb3205e2aa5c82e2bb549).**
Deletion task was not properly awaited.
[noissue]
| 2019-10-07T21:48:19 |
||
pulp/pulpcore | 336 | pulp__pulpcore-336 | [
"4544"
] | 54e9636f7aaf7507bb9f2beb286b12793a61bc11 | diff --git a/pulpcore/app/migrations/0011_relative_path.py b/pulpcore/app/migrations/0011_relative_path.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/migrations/0011_relative_path.py
@@ -0,0 +1,28 @@
+# Generated by Django 2.2.6 on 2019-10-11 17:29
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('core', '0010_pulp_fields'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='contentartifact',
+ name='relative_path',
+ field=models.TextField(),
+ ),
+ migrations.AlterField(
+ model_name='publishedartifact',
+ name='relative_path',
+ field=models.TextField(),
+ ),
+ migrations.AlterField(
+ model_name='publishedmetadata',
+ name='relative_path',
+ field=models.TextField(),
+ ),
+ ]
diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -289,7 +289,7 @@ class ContentArtifact(Model, QueryMixin):
"""
artifact = models.ForeignKey(Artifact, on_delete=models.PROTECT, null=True)
content = models.ForeignKey(Content, on_delete=models.CASCADE)
- relative_path = models.CharField(max_length=255)
+ relative_path = models.TextField()
objects = BulkCreateManager()
diff --git a/pulpcore/app/models/publication.py b/pulpcore/app/models/publication.py
--- a/pulpcore/app/models/publication.py
+++ b/pulpcore/app/models/publication.py
@@ -143,7 +143,7 @@ class PublishedArtifact(Model):
content_artifact (models.ForeignKey): The referenced content artifact.
publication (models.ForeignKey): The publication in which the artifact is included.
"""
- relative_path = models.CharField(max_length=255)
+ relative_path = models.TextField()
content_artifact = models.ForeignKey('ContentArtifact', on_delete=models.CASCADE)
publication = models.ForeignKey(Publication, on_delete=models.CASCADE)
@@ -169,7 +169,7 @@ class PublishedMetadata(Content):
TYPE = 'publishedmetadata'
- relative_path = models.CharField(max_length=255)
+ relative_path = models.TextField()
publication = models.ForeignKey(Publication, on_delete=models.CASCADE)
| [noissue]: Update whitenoise requirement from <6.6.0,>=5.0 to >=5.0,<6.7.0
Updates the requirements on [whitenoise](https://github.com/evansd/whitenoise) to permit the latest version.
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/evansd/whitenoise/blob/main/docs/changelog.rst">whitenoise's changelog</a>.</em></p>
<blockquote>
<h2>6.6.0 (2023-10-11)</h2>
<ul>
<li>
<p>Support Django 5.0.</p>
</li>
<li>
<p>Drop Python 3.7 support.</p>
</li>
</ul>
<h2>6.5.0 (2023-06-16)</h2>
<ul>
<li>
<p>Support Python 3.12.</p>
</li>
<li>
<p>Changed documentation site URL from <code>https://whitenoise.evans.io/</code> to <code>https://whitenoise.readthedocs.io/</code>.</p>
</li>
</ul>
<h2>6.4.0 (2023-02-25)</h2>
<ul>
<li>
<p>Support Django 4.2.</p>
</li>
<li>
<p>Remove further support for byte strings from the <code>root</code> and <code>prefix</code> arguments to <code>WhiteNoise</code>, and Django’s <code>STATIC_ROOT</code> setting.
Like in the previous release, this seems to be a remnant of Python 2 support.
Again, this change may be backwards incompatible for a small number of projects, but it’s unlikely.
Django does not support <code>STATIC_ROOT</code> being a byte string.</p>
</li>
</ul>
<h2>6.3.0 (2023-01-03)</h2>
<ul>
<li>
<p>Add some video file extensions to be ignored during compression.
Since such files are already heavily compressed, further compression rarely helps.</p>
<p>Thanks to Jon Ribbens in <code>PR [#431](https://github.com/evansd/whitenoise/issues/431) <https://github.com/evansd/whitenoise/pull/431></code>__.</p>
</li>
<li>
<p>Remove the behaviour of decoding byte strings passed for settings that take strings.
This seemed to be left around from supporting Python 2.
This change may be backwards incompatible for a small number of projects.</p>
</li>
<li>
<p>Document “hidden” feature of setting <code>max_age</code> to <code>None</code> to disable the <code>Cache-Control</code> header.</p>
</li>
<li>
<p>Drop support for working as old-style Django middleware, as support was <code>removed in Django 2.0 <https://docs.djangoproject.com/en/dev/releases/2.0/#features-removed-in-2-0></code>__.</p>
</li>
</ul>
<h2>6.2.0 (2022-06-05)</h2>
<ul>
<li>
<p>Support Python 3.11.</p>
</li>
<li>
<p>Support Django 4.1.</p>
</li>
</ul>
<h2>6.1.0 (2022-05-10)</h2>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/evansd/whitenoise/commit/dbfd5f262f3390bd2304113a1b4eefa521056ef4"><code>dbfd5f2</code></a> Version 6.6.0</li>
<li><a href="https://github.com/evansd/whitenoise/commit/7119a17da8983e5f83ed0cc1e022cc673c01f553"><code>7119a17</code></a> Add changelog entry for Django 5.0 support</li>
<li><a href="https://github.com/evansd/whitenoise/commit/bcd0b785429efe87f18b6ce73ef69d3d1b6efd6d"><code>bcd0b78</code></a> [pre-commit.ci] pre-commit autoupdate (<a href="https://redirect.github.com/evansd/whitenoise/issues/534">#534</a>)</li>
<li><a href="https://github.com/evansd/whitenoise/commit/59348bad08fcb184f91552ac688c9e3ac5c7da89"><code>59348ba</code></a> [pre-commit.ci] pre-commit autoupdate (<a href="https://redirect.github.com/evansd/whitenoise/issues/532">#532</a>)</li>
<li><a href="https://github.com/evansd/whitenoise/commit/8b8770db03fe4cd043eaba3cd98c9f8c19831f3a"><code>8b8770d</code></a> [pre-commit.ci] pre-commit autoupdate (<a href="https://redirect.github.com/evansd/whitenoise/issues/530">#530</a>)</li>
<li><a href="https://github.com/evansd/whitenoise/commit/6034700b74c167bfb087ae7151b193e7591a31ca"><code>6034700</code></a> Upgrade requirements (<a href="https://redirect.github.com/evansd/whitenoise/issues/529">#529</a>)</li>
<li><a href="https://github.com/evansd/whitenoise/commit/776fbc8d65d8ad9e40c8343f43ccd4dea652bb18"><code>776fbc8</code></a> Standardize requirements section in docs (<a href="https://redirect.github.com/evansd/whitenoise/issues/528">#528</a>)</li>
<li><a href="https://github.com/evansd/whitenoise/commit/1929d89bb6aa42450b9227e2da26588c35a90adb"><code>1929d89</code></a> Use only absolute imports (<a href="https://redirect.github.com/evansd/whitenoise/issues/527">#527</a>)</li>
<li><a href="https://github.com/evansd/whitenoise/commit/803d3df22cd810f02372a19c5343db3140c34e7e"><code>803d3df</code></a> Support Django 5.0 (<a href="https://redirect.github.com/evansd/whitenoise/issues/526">#526</a>)</li>
<li><a href="https://github.com/evansd/whitenoise/commit/bcbf50f5c9e3b944d81d625155cddca60a8f3f5a"><code>bcbf50f</code></a> [pre-commit.ci] pre-commit autoupdate (<a href="https://redirect.github.com/evansd/whitenoise/issues/525">#525</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/evansd/whitenoise/compare/v5.0...6.6.0">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-10-11T17:33:56 |
||
pulp/pulpcore | 337 | pulp__pulpcore-337 | [
"4780"
] | c4411d94a96b6bda4c103e8385d94c0b7fb284c5 | diff --git a/pulpcore/app/viewsets/base.py b/pulpcore/app/viewsets/base.py
--- a/pulpcore/app/viewsets/base.py
+++ b/pulpcore/app/viewsets/base.py
@@ -3,6 +3,7 @@
from urllib.parse import urlparse
from django.core.exceptions import FieldError, ValidationError
+from django.forms.utils import ErrorList
from django.urls import Resolver404, resolve
from django_filters.rest_framework import filterset
from drf_yasg.utils import swagger_auto_schema
@@ -458,3 +459,20 @@ def filter_for_field(cls, field, name, lookup_expr):
field=name, expr=cls.LOOKUP_EXPR_TEXT[lookup_expr], value=val_word)
return f
+
+ def is_valid(self, *args, **kwargs):
+ is_valid = super().is_valid(*args, **kwargs)
+ DEFAULT_FILTERS = [
+ "exclude_fields", "fields", "limit", "minimal", "offset", "page_size"
+ ]
+ for field in self.data.keys():
+ if field in DEFAULT_FILTERS:
+ continue
+
+ if field not in self.filters:
+ errors = self.form._errors.get("errors", ErrorList())
+ errors.extend(["Invalid Filter: '{field}'".format(field=field)])
+ self.form._errors["errors"] = errors
+ is_valid = False
+
+ return is_valid
| Update CI files for branch 3.21
[noissue]
| 2019-10-11T22:24:18 |
||
pulp/pulpcore | 344 | pulp__pulpcore-344 | [
"5028"
] | 065364029f21f7aefcda44ce601fcd7b86cf10ed | diff --git a/pulpcore/app/models/repository.py b/pulpcore/app/models/repository.py
--- a/pulpcore/app/models/repository.py
+++ b/pulpcore/app/models/repository.py
@@ -18,7 +18,7 @@
from .base import MasterModel, Model
from .content import Artifact, Content
-from .task import CreatedResource
+from .task import CreatedResource, Task
_logger = logging.getLogger(__name__)
@@ -431,7 +431,7 @@ def create(cls, repository, base_version=None):
# now add any content that's in the base_version but not in version
version.add_content(base_version.content.exclude(pk__in=version.content))
- if not repository.plugin_managed:
+ if Task.current and not repository.plugin_managed:
resource = CreatedResource(content_object=version)
resource.save()
return version
| Update CI files for branch 3.21
| 2019-10-21T09:38:29 |
||
pulp/pulpcore | 364 | pulp__pulpcore-364 | [
"5378"
] | 4e14783c00f37691022ffe2c6b712b1bed142818 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -1,13 +1,14 @@
import logging
import mimetypes
import os
+import re
from gettext import gettext as _
import django # noqa otherwise E402: module level not at top of file
django.setup() # noqa otherwise E402: module level not at top of file
from aiohttp.client_exceptions import ClientResponseError
-from aiohttp.web import FileResponse, StreamResponse
+from aiohttp.web import FileResponse, StreamResponse, HTTPOk
from aiohttp.web_exceptions import HTTPForbidden, HTTPFound, HTTPNotFound
from django.conf import settings
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
@@ -21,6 +22,7 @@
RepositoryVersion,
)
+from jinja2 import Template
log = logging.getLogger(__name__)
@@ -111,7 +113,7 @@ def _base_paths(path):
"""
tree = []
while True:
- base = os.path.split(path.strip('/'))[0]
+ base = os.path.split(path)[0]
if not base:
break
tree.append(base)
@@ -195,6 +197,69 @@ def response_headers(path):
headers['Content-Encoding'] = encoding
return headers
+ async def list_directory(self, repo_version, publication, path):
+ """
+ Generate HTML with directory listing of the path.
+
+ This method expects either a repo_version or a publication in addition to a path. This
+ method generates HTML directory list of a path inside the repository version or
+ publication.
+
+ Args:
+ repo_version (:class:`~pulpcore.app.models.RepositoryVersion`): The repository version
+ publication (:class:`~pulpcore.app.models.Publication`): Publication
+ path (str): relative path inside the repo version of publication.
+
+ Returns:
+ String representing HTML of the directory listing.
+ """
+ if not publication and not repo_version:
+ raise Exception("Either a repo_version or publication is required.")
+ if publication and repo_version:
+ raise Exception("Either a repo_version or publication can be specified.")
+
+ template = Template("""
+ <html>
+ <body>
+ <ul>
+ {% for name in dir_list %}
+ <li><a href="{{ name }}">{{ name }}</a></li>
+ {% endfor %}
+ </ul>
+ </body>
+ </html>
+ """)
+
+ def file_or_directory_name(directory_path, relative_path):
+ result = re.match(r'({})([^\/]*)(\/*)'.format(directory_path), relative_path)
+ return '{}{}'.format(result.groups()[1], result.groups()[2])
+
+ directory_list = set()
+
+ if publication:
+ pas = publication.published_artifact.filter(relative_path__startswith=path)
+ for pa in pas:
+ directory_list.add(file_or_directory_name(path, pa.relative_path))
+
+ if publication.pass_through:
+ cas = ContentArtifact.objects.filter(
+ content__in=publication.repository_version.content,
+ relative_path__startswith=path)
+ for ca in cas:
+ directory_list.add(file_or_directory_name(path, ca.relative_path))
+
+ if repo_version:
+ cas = ContentArtifact.objects.filter(
+ content__in=repo_version.content,
+ relative_path__startswith=path)
+ for ca in cas:
+ directory_list.add(file_or_directory_name(path, ca.relative_path))
+
+ if directory_list:
+ return template.render(dir_list=sorted(directory_list))
+ else:
+ raise PathNotResolved(path)
+
async def _match_and_stream(self, path, request):
"""
Match the path and stream results either from the filesystem or by downloading new data.
@@ -223,6 +288,15 @@ async def _match_and_stream(self, path, request):
publication = getattr(distro, 'publication', None)
if publication:
+ if rel_path == '' or rel_path[-1] == '/':
+ try:
+ index_path = '{}index.html'.format(rel_path)
+ publication.published_artifact.get(relative_path=index_path)
+ rel_path = index_path
+ except ObjectDoesNotExist:
+ dir_list = await self.list_directory(None, publication, rel_path)
+ return HTTPOk(headers={"Content-Type": "text/html"}, body=dir_list)
+
# published artifact
try:
pa = publication.published_artifact.get(relative_path=rel_path)
@@ -268,6 +342,17 @@ async def _match_and_stream(self, path, request):
if repository:
repo_version = RepositoryVersion.latest(distro.repository)
+ if rel_path == '' or rel_path[-1] == '/':
+ try:
+ index_path = '{}index.html'.format(rel_path)
+ ContentArtifact.objects.get(
+ content__in=repo_version.content,
+ relative_path=index_path)
+ rel_path = index_path
+ except ObjectDoesNotExist:
+ dir_list = await self.list_directory(repo_version, None, rel_path)
+ return HTTPOk(headers={"Content-Type": "text/html"}, body=dir_list)
+
try:
ca = ContentArtifact.objects.get(
content__in=repo_version.content,
| Add a scheduled cleanup task to keep the tasks table reasonable
We have seen substancial performance degredation on tasks tables that are several million rows long. While this is probably not uncommon in a usual Pulp installation it is a tedious task for the admin to call purge tasks regularly.
We should just schedule it. To give admins the control they are used to, this should be guarded by a setting (maybe down to different tasks states) for how long tasks are supposed to be finished before vacuumed.
Question: Are we confident to add a reasonable time as default, or does this feature need to be off by default?
| 2019-11-05T22:45:51 |
||
pulp/pulpcore | 382 | pulp__pulpcore-382 | [
"3541"
] | 93530a997585d74436b91eb336b23eff6e637d41 | diff --git a/pulpcore/plugin/repo_version_utils.py b/pulpcore/plugin/repo_version_utils.py
--- a/pulpcore/plugin/repo_version_utils.py
+++ b/pulpcore/plugin/repo_version_utils.py
@@ -22,10 +22,6 @@ def remove_duplicates(repository_version):
a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`
contain the same value for two content units, they are considered "repository duplicates".
- After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the
- :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a
- parameter to `run()`.
-
Args:
repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked
and possibly modified.
| Improve the logic in the ACSHandleStage
**Version**
main
**Describe the bug**
If there are ACS that point to the same content, pick the first RA instead of last
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
| 2019-11-13T19:17:45 |
||
pulp/pulpcore | 427 | pulp__pulpcore-427 | [
"3707"
] | 9defd0608e353b863fc9a2b89d316b4a453e10f5 | diff --git a/pulpcore/app/models/task.py b/pulpcore/app/models/task.py
--- a/pulpcore/app/models/task.py
+++ b/pulpcore/app/models/task.py
@@ -121,7 +121,7 @@ def get_unreserved_worker(self):
Worker.DoesNotExist: If all Workers have at least one ReservedResource entry.
"""
workers_qs = self.online_workers().exclude(
- name__startswith=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME
+ name=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME
)
workers_qs_with_counts = workers_qs.annotate(models.Count('reservations'))
try:
@@ -215,7 +215,7 @@ def resource_managers(self):
:class:`django.db.models.query.QuerySet`: A query set of the Worker objects which
which match the resource manager name.
"""
- return self.filter(name__startswith=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME)
+ return self.filter(name=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME)
class Worker(Model):
diff --git a/pulpcore/tasking/services/worker_watcher.py b/pulpcore/tasking/services/worker_watcher.py
--- a/pulpcore/tasking/services/worker_watcher.py
+++ b/pulpcore/tasking/services/worker_watcher.py
@@ -75,10 +75,10 @@ def check_worker_processes():
mark_worker_offline(worker.name)
worker_count = Worker.objects.online_workers().exclude(
- name__startswith=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME).count()
+ name=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME).count()
resource_manager_count = Worker.objects.online_workers().filter(
- name__startswith=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME).count()
+ name=TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME).count()
if resource_manager_count == 0:
msg = _("There are 0 pulpcore-resource-manager processes running. Pulp will not operate "
diff --git a/pulpcore/tasking/worker.py b/pulpcore/tasking/worker.py
--- a/pulpcore/tasking/worker.py
+++ b/pulpcore/tasking/worker.py
@@ -54,7 +54,7 @@ def __init__(self, queues, **kwargs):
else:
kwargs['name'] = "{pid}@{hostname}".format(pid=os.getpid(), hostname=socket.getfqdn())
- if kwargs['name'].startswith(TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME):
+ if kwargs['name'] == TASKING_CONSTANTS.RESOURCE_MANAGER_WORKER_NAME:
queues = [Queue('resource-manager', connection=kwargs['connection'])]
else:
queues = [Queue(kwargs['name'], connection=kwargs['connection'])]
| [noissue]: Update psycopg2 requirement from <2.9.6,>=2.9.3 to >=2.9.3,<2.9.7
Updates the requirements on [psycopg2](https://github.com/psycopg/psycopg2) to permit the latest version.
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/psycopg/psycopg2/blob/master/NEWS">psycopg2's changelog</a>.</em></p>
<blockquote>
<h2>Current release</h2>
<p>What's new in psycopg 2.9.6
^^^^^^^^^^^^^^^^^^^^^^^^^^^</p>
<ul>
<li>Package manylinux 2014 for aarch64 and ppc64le platforms, in order to
include libpq 15 in the binary package (:ticket:<code>[#1396](https://github.com/psycopg/psycopg2/issues/1396)</code>).</li>
<li>Wheel package compiled against OpenSSL 1.1.1t.</li>
</ul>
<p>What's new in psycopg 2.9.5
^^^^^^^^^^^^^^^^^^^^^^^^^^^</p>
<ul>
<li>Add support for Python 3.11.</li>
<li>Add support for rowcount in MERGE statements in binary packages
(:ticket:<code>[#1497](https://github.com/psycopg/psycopg2/issues/1497)</code>).</li>
<li>Wheel package compiled against OpenSSL 1.1.1r and PostgreSQL 15 libpq.</li>
</ul>
<p>What's new in psycopg 2.9.4
^^^^^^^^^^^^^^^^^^^^^^^^^^^</p>
<ul>
<li>Fix <code>~psycopg2.extras.register_composite()</code>,
<code>~psycopg2.extras.register_range()</code> with customized :sql:<code>search_path</code>
(:ticket:<code>[#1487](https://github.com/psycopg/psycopg2/issues/1487)</code>).</li>
<li>Handle correctly composite types with names or in schemas requiring escape.</li>
<li>Find <code>pg_service.conf</code> file in the <code>/etc/postgresql-common</code> directory in
binary packages (:ticket:<code>[#1365](https://github.com/psycopg/psycopg2/issues/1365)</code>).</li>
<li><code>~psycopg2.errorcodes</code> map and <code>~psycopg2.errors</code> classes updated to
PostgreSQL 15.</li>
<li>Wheel package compiled against OpenSSL 1.1.1q and PostgreSQL 14.4 libpq.</li>
</ul>
<p>What's new in psycopg 2.9.3
^^^^^^^^^^^^^^^^^^^^^^^^^^^</p>
<ul>
<li>Alpine (musl) wheels now available (:ticket:<code>[#1392](https://github.com/psycopg/psycopg2/issues/1392)</code>).</li>
<li>macOS arm64 (Apple M1) wheels now available (:ticket:<code>1482</code>).</li>
</ul>
<p>What's new in psycopg 2.9.2
^^^^^^^^^^^^^^^^^^^^^^^^^^^</p>
<ul>
<li>Raise <code>ValueError</code> for dates >= Y10k (:ticket:<code>[#1307](https://github.com/psycopg/psycopg2/issues/1307)</code>).</li>
<li><code>~psycopg2.errorcodes</code> map and <code>~psycopg2.errors</code> classes updated to
PostgreSQL 14.</li>
<li>Add preliminary support for Python 3.11 (:tickets:<code>[#1376](https://github.com/psycopg/psycopg2/issues/1376), [#1386](https://github.com/psycopg/psycopg2/issues/1386)</code>).</li>
<li>Wheel package compiled against OpenSSL 1.1.1l and PostgreSQL 14.1 libpq
(:ticket:<code>[#1388](https://github.com/psycopg/psycopg2/issues/1388)</code>).</li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li>See full diff in <a href="https://github.com/psycopg/psycopg2/commits/2.9.6">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| 2019-12-02T18:29:33 |
||
pulp/pulpcore | 522 | pulp__pulpcore-522 | [
"4733"
] | 46c19a87e74be0fc5cd178d4bdf6dd8b877a47b0 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -334,7 +334,7 @@ async def _match_and_stream(self, path, request):
pass
else:
if ca.artifact:
- return self._handle_file_response(ca.artifact.file, headers)
+ return self._serve_content_artifact(ca, headers)
else:
return await self._stream_content_artifact(request,
StreamResponse(headers=headers), ca)
@@ -358,7 +358,7 @@ async def _match_and_stream(self, path, request):
pass
else:
if ca.artifact:
- return self._handle_file_response(ca.artifact.file, headers)
+ return self._serve_content_artifact(ca, headers)
else:
return await self._stream_content_artifact(request,
StreamResponse(headers=headers),
@@ -398,7 +398,7 @@ async def _match_and_stream(self, path, request):
except ObjectDoesNotExist:
pass
else:
- return self._handle_file_response(ca.artifact.file, headers)
+ return self._serve_content_artifact(ca, headers)
if distro.remote:
remote = distro.remote.cast()
@@ -407,7 +407,7 @@ async def _match_and_stream(self, path, request):
ra = RemoteArtifact.objects.get(remote=remote, url=url)
ca = ra.content_artifact
if ca.artifact:
- return self._handle_file_response(ca.artifact.file, headers)
+ return self._serve_content_artifact(ca, headers)
else:
return await self._stream_content_artifact(request,
StreamResponse(headers=headers),
@@ -519,15 +519,16 @@ def _save_artifact(self, download_result, remote_artifact):
content_artifact.save()
return artifact
- def _handle_file_response(self, file, headers):
+ def _serve_content_artifact(self, content_artifact, headers):
"""
- Handle response for file.
+ Handle response for a Content Artifact with the file present.
Depending on where the file storage (e.g. filesystem, S3, etc) this could be responding with
the file (filesystem) or a redirect (S3).
Args:
- file (:class:`django.db.models.fields.files.FieldFile`): File to respond with
+ content_artifact (:class:`pulpcore.app.models.ContentArtifact`): The Content Artifact to
+ respond with.
headers (dict): A dictionary of response headers.
Raises:
@@ -538,10 +539,15 @@ def _handle_file_response(self, file, headers):
The :class:`aiohttp.web.FileResponse` for the file.
"""
if settings.DEFAULT_FILE_STORAGE == 'pulpcore.app.models.storage.FileSystem':
- return FileResponse(os.path.join(settings.MEDIA_ROOT, file.name), headers=headers)
+ filename = content_artifact.artifact.file.name
+ return FileResponse(os.path.join(settings.MEDIA_ROOT, filename), headers=headers)
elif (settings.DEFAULT_FILE_STORAGE == 'storages.backends.s3boto3.S3Boto3Storage' or
settings.DEFAULT_FILE_STORAGE == 'storages.backends.azure_storage.AzureStorage'):
- raise HTTPFound(file.url, headers=headers)
+ filename_portion = '?response-content-disposition=attachment; filename={}'.format(
+ content_artifact.relative_path
+ )
+ url = content_artifact.artifact.file.url + filename_portion
+ raise HTTPFound(url, headers=headers)
else:
raise NotImplementedError()
| [PR #4731/2a3f42ea backport][3.22] Use new fixture signing key
**This is a backport of PR #4731 as merged into main (2a3f42ea8252560565a9fd0191101b73cf095988).**
[noissue]
| 2020-01-28T19:46:51 |
||
pulp/pulpcore | 1,062 | pulp__pulpcore-1062 | [
"3387"
] | b3afb2c193c05c88480ca514b69c9ee2af6304ac | diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py
--- a/pulpcore/app/serializers/content.py
+++ b/pulpcore/app/serializers/content.py
@@ -1,4 +1,3 @@
-import hashlib
from gettext import gettext as _
from django.db import transaction
@@ -219,24 +218,24 @@ def validate(self, data):
_(f"Checksum algorithms {bad_algs} forbidden for this Pulp instance.")
)
- for algorithm in hashlib.algorithms_guaranteed:
- if algorithm in models.Artifact.DIGEST_FIELDS:
- digest = data["file"].hashers[algorithm].hexdigest()
-
- if algorithm in data and digest != data[algorithm]:
- raise serializers.ValidationError(
- _("The %s checksum did not match.") % algorithm
- )
- else:
- data[algorithm] = digest
- if algorithm in models.Artifact.RELIABLE_DIGEST_FIELDS:
- validator = UniqueValidator(
- models.Artifact.objects.all(),
- message=_("{0} checksum must be unique.").format(algorithm),
- )
- validator.instance = None
-
- validator(digest, self.fields[algorithm])
+ for algorithm in reversed(models.Artifact.DIGEST_FIELDS):
+ digest = data["file"].hashers[algorithm].hexdigest()
+
+ if algorithm in data and digest != data[algorithm]:
+ raise serializers.ValidationError(_("The %s checksum did not match.") % algorithm)
+ else:
+ data[algorithm] = digest
+
+ if algorithm in models.Artifact.RELIABLE_DIGEST_FIELDS:
+ validator = UniqueValidator(
+ models.Artifact.objects.all(),
+ message=_("Artifact with {0} checksum of '{1}' already exists.").format(
+ algorithm, digest
+ ),
+ )
+ validator.instance = None
+ validator(digest, self.fields[algorithm])
+
return data
class Meta:
| File System Exporter should have an option to generate listing files
**Is your feature request related to a problem? Please describe.**
When Katello uses the File System exporter it does not generate the listing files in CDN directory structure. Katello does not directly have access to the pulp's content directories. Due to this it is not able to easily generate `Listing` files. It relies right now on the hammer export command being run from the same machine as pulp.
**Describe the solution you'd like**
Pulp exporter should ideally have an option or a flag to generate `listing` files when fs exporter is run.
**Describe alternatives you've considered**
Couple of things to consider here
- This is only relevant to YUM/KS repos
- This probably only useful for certain Yum repos. But a flag in the exporter would take care of that.
| 2020-12-22T14:19:28 |
||
pulp/pulpcore | 1,434 | pulp__pulpcore-1434 | [
"1945"
] | e4c28367be25cd2c255c00223d130e5a52e72ef5 | diff --git a/pulpcore/app/apps.py b/pulpcore/app/apps.py
--- a/pulpcore/app/apps.py
+++ b/pulpcore/app/apps.py
@@ -100,7 +100,7 @@ def ready(self):
post_migrate.connect(
_populate_access_policies,
sender=self,
- dispatch_uid="polulate_access_policies_identifier",
+ dispatch_uid="populate_access_policies_identifier",
)
post_migrate.connect(_populate_roles, sender=self, dispatch_uid="populate_roles_identifier")
diff --git a/pulpcore/app/management/commands/remove-plugin.py b/pulpcore/app/management/commands/remove-plugin.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/management/commands/remove-plugin.py
@@ -0,0 +1,201 @@
+import time
+
+from gettext import gettext as _
+
+from django.apps import apps
+from django.db import connection, IntegrityError
+from django.db.migrations.exceptions import IrreversibleError
+from django.db.models.signals import post_migrate
+from django.conf import settings
+from django.contrib.contenttypes.models import ContentType
+from django.core.management import BaseCommand, call_command, CommandError
+
+from pulpcore.app.apps import pulp_plugin_configs
+from pulpcore.app.models import AccessPolicy, ContentAppStatus, Worker
+from pulpcore.app.models.role import Role
+from pulpcore.app.util import get_view_urlpattern
+
+DROP_PLUGIN_TABLES_QUERY = """
+DO $$
+ BEGIN
+ EXECUTE format('DROP TABLE %s',
+ (SELECT STRING_AGG(table_name, ', ')
+ FROM information_schema.tables
+ WHERE table_schema = 'public' AND table_name like '{app_label}_%'
+ )
+ );
+ END
+$$;
+""" # noqa
+
+
+class Command(BaseCommand):
+ """
+ Django management command for removing a plugin.
+
+ This command is in tech-preview.
+ """
+
+ help = _(
+ "[tech-preview] Disable a Pulp plugin and remove all the relevant data from the database. "
+ "Destructive!"
+ )
+
+ def add_arguments(self, parser):
+ parser.add_argument(
+ "plugin_name",
+ help=_("Name of a plugin to remove. E.g. file, container, rpm, pulp_2to3_migration."),
+ )
+
+ def _check_pulp_services(self):
+ """
+ Check if any pulp services are running and error out if they are.
+ """
+ is_pulp_running = True
+ waiting_time = max(settings.CONTENT_APP_TTL, settings.WORKER_TTL)
+ check_started = time.time()
+ self.stdout.write(
+ _("Checking if Pulp services are running, it can take up to {}s...").format(
+ waiting_time
+ )
+ )
+ while is_pulp_running and (time.time() - check_started) < waiting_time:
+ is_pulp_running = (
+ ContentAppStatus.objects.online().exists()
+ or Worker.objects.online_workers().exists()
+ )
+ time.sleep(2)
+
+ if is_pulp_running:
+ raise CommandError(
+ _(
+ "The command can't be used when Pulp services are running. "
+ "Please stop the services: pulpcore-api, pulpcore-content and all "
+ "pulpcore-worker@*."
+ )
+ )
+
+ def _remove_indirect_plugin_data(self, app_label):
+ """
+ Remove plugin data not accessible via plugin models.
+
+ Specifically,
+ - remove django content type by app_label (also auth permissions are removed by cascade)
+ - remove default access policies related to the plugin with provided app_label
+ - remove locked roles related to the plugin, do not touch the user defined ones.
+ """
+ ContentType.objects.filter(app_label=app_label).delete()
+ app_config = apps.get_app_config(app_label)
+ viewset_names = []
+ role_names = []
+ for viewset_batch in app_config.named_viewsets.values():
+ for viewset in viewset_batch:
+ viewset_names.append(get_view_urlpattern(viewset))
+ role_names.extend(getattr(viewset, "LOCKED_ROLES", {}).keys())
+
+ AccessPolicy.objects.filter(viewset_name__in=viewset_names, customized=False).delete()
+ Role.objects.filter(name__in=role_names, locked=True).delete()
+
+ def _remove_plugin_data(self, app_label):
+ """
+ Remove all plugin data.
+
+ Removal happens via ORM to be sure that all relations are cleaned properly as well,
+ e.g. Master-Detail, FKs to various content plugins in pulp-2to3-migration.
+
+ In some cases, the order in which models are removed matters, e.g. FK is a part of
+ uniqueness constraint. Try to remove such problematic models later.
+ """
+
+ models_to_delete = set(apps.all_models[app_label].values())
+ prev_model_count = len(models_to_delete) + 1
+ while models_to_delete and len(models_to_delete) < prev_model_count:
+ # while there is something to delete and something is being deleted on each iteration
+ removed_models = set()
+ for model in models_to_delete:
+ self.stdout.write(_("Removing model: {}").format(model))
+ try:
+ model.objects.filter().delete()
+ except IntegrityError:
+ continue
+ else:
+ removed_models.add(model)
+
+ prev_model_count = len(models_to_delete)
+ models_to_delete = models_to_delete - removed_models
+
+ if models_to_delete:
+ # Never-happen case
+ raise CommandError(
+ _(
+ "Data for the following models can't be removed: {}. Please contact plugin "
+ "maintainers."
+ ).format(list(models_to_delete))
+ )
+
+ self._remove_indirect_plugin_data(app_label)
+
+ def _drop_plugin_tables(self, app_label):
+ """
+ Drop plugin table with raw SQL.
+ """
+ with connection.cursor() as cursor:
+ cursor.execute(DROP_PLUGIN_TABLES_QUERY.format(app_label=app_label))
+
+ def _unapply_migrations(self, app_label):
+ """
+ Unapply migrations so the plugin can be installed/run django migrations again if needed.
+
+ Make sure no post migration signals are connected/run (it's enough to disable only
+ `populate_access_policy` and `populate_roles` for the requested plugin, so after
+ migration is run, policies are not repopulated but there is no need for any of
+ post_migrate operations to happen.)
+
+ Then, try to unmigrate the clean way, and if it fails, fake it until you make it.
+ A potential reason for the failure can be that some migrations are irreversible.
+ """
+ for app_config in pulp_plugin_configs():
+ post_migrate.disconnect(
+ sender=app_config, dispatch_uid="populate_access_policies_identifier"
+ )
+ post_migrate.disconnect(sender=app_config, dispatch_uid="populate_roles_identifier")
+ if app_config.label == "core":
+ post_migrate.disconnect(sender=app_config, dispatch_uid="delete_anon_identifier")
+
+ try:
+ call_command("migrate", app_label=app_label, migration_name="zero")
+ except (IrreversibleError, Exception):
+ # a plugin has irreversible migrations or some other problem, drop the tables and fake
+ # that migrations are unapplied.
+ self._drop_plugin_tables(app_label)
+ call_command("migrate", app_label=app_label, migration_name="zero", fake=True)
+
+ def handle(self, *args, **options):
+ plugin_name = options["plugin_name"]
+ if plugin_name == "core":
+ raise CommandError(_("Please specify a plugin name, core can't be removed."))
+
+ available_plugins = {app.label for app in pulp_plugin_configs()} - {"core"}
+ if plugin_name not in available_plugins:
+ raise CommandError(
+ _(
+ "Plugin name is incorrectly specified or plugin is not installed. Please "
+ "specify one of the following plugin names: {}."
+ ).format(list(available_plugins))
+ )
+
+ self._check_pulp_services()
+
+ self.stdout.write(_("Cleaning up the database for {} plugin...").format(plugin_name))
+ self._remove_plugin_data(app_label=plugin_name)
+
+ self.stdout.write(_("Unapplying {} plugin migrations...").format(plugin_name))
+ self._unapply_migrations(app_label=plugin_name)
+
+ self.stdout.write(
+ _(
+ "Successfully removed the {} plugin data. It is ready to be uninstalled. "
+ "NOTE: Please do uninstall, otherwise `pulp status` might not show you the correct "
+ "list of plugins available."
+ ).format(plugin_name)
+ )
| diff --git a/pulpcore/tests/conftest_pulp_file.py b/pulpcore/tests/conftest_pulp_file.py
--- a/pulpcore/tests/conftest_pulp_file.py
+++ b/pulpcore/tests/conftest_pulp_file.py
@@ -123,3 +123,13 @@ def _file_fixture_gen_remote_client_cert_req(*, fixture_name, policy, **kwargs):
return gen_object_with_cleanup(file_remote_api_client, kwargs)
yield _file_fixture_gen_remote_client_cert_req
+
+
[email protected]
+def file_fixture_gen_file_repo(file_repo_api_client, gen_object_with_cleanup):
+ """A factory to generate a File Repository with auto-deletion after the test run."""
+
+ def _file_fixture_gen_file_repo(**kwargs):
+ return gen_object_with_cleanup(file_repo_api_client, kwargs)
+
+ yield _file_fixture_gen_file_repo
diff --git a/pulpcore/tests/functional/api/using_plugin/test_remove_plugin.py b/pulpcore/tests/functional/api/using_plugin/test_remove_plugin.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/tests/functional/api/using_plugin/test_remove_plugin.py
@@ -0,0 +1,34 @@
+import pytest
+
+from pulp_smash.pulp3.utils import gen_repo
+
+
[email protected]
+def test_remove_plugin(
+ cli_client,
+ delete_orphans_pre,
+ file_fixture_gen_file_repo,
+ file_repo_api_client,
+ start_and_check_services,
+ stop_and_check_services,
+):
+ repo_name = "repo for plugin removal test"
+ file_repo_pre_removal = file_repo_api_client.create(gen_repo(name=repo_name))
+
+ assert stop_and_check_services() is True
+
+ res = cli_client.run(["pulpcore-manager", "remove-plugin", "file"])
+ assert "Successfully removed" in res.stdout
+
+ # Without uninstalling the package just run migrations again to mimic the reinstallation
+ # of a plugin at least from pulp's perspective
+ res = cli_client.run(["pulpcore-manager", "migrate", "file"])
+ assert res.stdout.endswith("updated.\n") is True
+
+ assert start_and_check_services() is True
+
+ # create a repo with the same name as before the removal
+ file_repo_post_reinstall = file_fixture_gen_file_repo(name=repo_name)
+
+ assert file_repo_pre_removal.name == file_repo_post_reinstall.name
+ assert file_repo_pre_removal.pulp_href != file_repo_post_reinstall.pulp_href
| As a user, I can remove a Pulp plugin
Author: @jlsherrill ([email protected])
Redmine Issue: 7822, https://pulp.plan.io/issues/7822
---
### Motivation
There are cases when a user might want to remove a plugin:
- plugin is no longer in use but it's data in the db is quite large (pulp-2to3-migration case)
- incompatible plugins
### Deliverables
- all plugin related data should be removed from the DB, its schema as well
- artifacts can be left for orphan cleanup, don't need to be removed immediately
- it should be possible to install this plugin back again, make sure django migrations are applied in this case accordingly
### Original request from Katello
> After a pulp2 to 3 migration, the left over migration data is quite large and needs to be cleaned up. there needs to be a command or api to clean it up.
>
> In addition we need a way/instructions to remove the plugin altogether and delete its added schema entirely
| 2021-06-22T18:42:48 |
|
pulp/pulpcore | 1,851 | pulp__pulpcore-1851 | [
"2078"
] | b17c8d1dba4add0a3c1572f3fa85af4009798487 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -160,6 +160,7 @@ async def finalize(self):
self._writer.flush()
os.fsync(self._writer.fileno())
self._writer.close()
+ self._writer = None
self.validate_digests()
self.validate_size()
| Downloader retry logic is broken when retrying a partially written file
Author: @mdellweg (mdellweg)
Redmine Issue: 9673, https://pulp.plan.io/issues/9673
---
None
| 2022-01-17T12:07:02 |
||
pulp/pulpcore | 2,103 | pulp__pulpcore-2103 | [
"2102"
] | da8770892af16992d5d546238bcf5b6e5a466978 | diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -54,14 +54,37 @@ def _destination_repo(importer, source_repo_name):
return Repository.objects.get(name=dest_repo_name)
-def _import_file(fpath, resource_class, do_raise=True):
+def _import_file(fpath, resource_class, retry=False):
try:
log.info(_("Importing file {}.").format(fpath))
with open(fpath, "r") as json_file:
data = Dataset().load(json_file.read(), format="json")
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
- return resource.import_data(data, raise_errors=do_raise)
+ if retry:
+ # django import-export can have a problem with concurrent-imports that are
+ # importing the same 'thing' (e.g., a Package that exists in two different
+ # repo-versions that are being imported at the same time). If we're asked to
+ # retry, we will try an import that will simply record errors as they happen
+ # (rather than failing with an exception) first. If errors happen, we'll do one
+ # retry before we give up on this repo-version's import.
+ a_result = resource.import_data(data, raise_errors=False)
+ if a_result.has_errors():
+ log.info(
+ _("...{} import-errors encountered importing {}, retrying").format(
+ a_result.totals["error"], fpath
+ )
+ )
+ # Second attempt, we raise an exception on any problem.
+ # This will either succeed, or log a fatal error and fail.
+ try:
+ a_result = resource.import_data(data, raise_errors=True)
+ except Exception as e: # noqa log on ANY exception and then re-raise
+ log.error(_("FATAL import-failure importing {}").format(fpath))
+ raise
+ else:
+ a_result = resource.import_data(data, raise_errors=True)
+ return a_result
except AttributeError:
log.error(_("FAILURE importing file {}!").format(fpath))
raise
@@ -157,36 +180,14 @@ def import_repository_version(importer_pk, destination_repo_pk, source_repo_name
resulting_content_ids = []
for res_class in cfg.exportable_classes:
filename = f"{res_class.__module__}.{res_class.__name__}.json"
- a_result = _import_file(os.path.join(rv_path, filename), res_class, do_raise=False)
- # django import-export can have a problem with concurrent-imports that are
- # importing the same 'thing' (e.g., a Package that exists in two different
- # repo-versions that are being imported at the same time). We will try an import
- # that will simply record errors as they happen (rather than failing with an exception)
- # first. If errors happen, we'll do one retry before we give up on this repo-version's
- # import.
- if a_result.has_errors():
- log.info(
- _("...{} import-errors encountered importing {} from {}, retrying").format(
- a_result.totals["error"], filename, rv_name
- )
- )
- # Second attempt, we allow to raise an exception on any problem.
- # This will either succeed, or log a fatal error and fail.
- try:
- a_result = _import_file(os.path.join(rv_path, filename), res_class)
- except Exception as e: # noqa log on ANY exception and then re-raise
- log.error(
- _("FATAL import-failure importing {} from {}").format(filename, rv_name)
- )
- raise
-
+ a_result = _import_file(os.path.join(rv_path, filename), res_class, retry=True)
resulting_content_ids.extend(
row.object_id for row in a_result.rows if row.import_type in ("new", "update")
)
# Once all content exists, create the ContentArtifact links
ca_path = os.path.join(rv_path, CA_FILE)
- _import_file(ca_path, ContentArtifactResource)
+ _import_file(ca_path, ContentArtifactResource, retry=True)
# see if we have a content mapping
mapping_path = f"{rv_name}/{CONTENT_MAPPING_FILE}"
| Occasional UQ error core_repositoryversion_repository_id_number_3c54ce50_uniq on DistTree PulpImport
Rare failure, in this occasion invoked during a pulp-import testcase:
`pulp_rpm/tests/functional/api/test_pulpimport.py::DistributionTreePulpImportTestCase::test_import FAILED [ 81%]`
```
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task c455492e-bc13-4517-b86a-6069bca53ed7
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Starting task 023517b6-401d-4f7e-be44-1d5814d0fe0c
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ModulemdDefaultsResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ModulemdDefaultsResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageGroupResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageGroupResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageCategoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageCategoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpopm4e5hz/repository-7206edf0-b89d-4da5-93d0-dcae838399c5_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageEnvironmentResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageEnvironmentResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.PackageLangpacksResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource PackageLangpacksResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateRecordResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateRecordResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.RepoMetadataFileResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource RepoMetadataFileResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.DistributionTreeRepositoryResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource DistributionTreeRepositoryResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ChecksumResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ChecksumResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.ImageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ImageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.AddonResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource AddonResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.VariantResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource VariantResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateReferenceResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateReferenceResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulp_rpm.app.modelresource.UpdateCollectionPackageResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource UpdateCollectionPackageResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: Importing file /tmp/tmpg9flo4a_/repository-868191e5-b1bb-49f7-86b8-efe70ecb0329_1/pulpcore.app.modelresource.ContentArtifactResource.json.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.app.tasks.importer:INFO: ...Importing resource ContentArtifactResource.
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: Task c455492e-bc13-4517-b86a-6069bca53ed7 failed (duplicate key value violates unique constraint "core_repositoryversion_repository_id_number_3c54ce50_uniq"
DETAIL: Key (repository_id, number)=(e84ff294-5d1b-4bfe-b5fb-e98d0c328537, 2) already exists.
)
pulp [2edbce945b1e462193b75d622f48aa44]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/local/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py", line 362, in _perform_task
result = func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/tasks/importer.py", line 206, in import_repository_version
with repo.new_version() as new_version:
File "/usr/local/lib/python3.8/site-packages/pulpcore/app/models/repository.py", line 126, in new_version
version.save()
File "/usr/lib64/python3.8/contextlib.py", line 75, in inner
return func(*args, **kwds)
File "/usr/local/lib/python3.8/site-packages/django_lifecycle/mixins.py", line 134, in save
save(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 739, in save
self.save_base(using=using, force_insert=force_insert,
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 776, in save_base
updated = self._save_table(
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 881, in _save_table
results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw)
File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 919, in _do_insert
return manager._insert(
File "/usr/local/lib/python3.8/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/django/db/models/query.py", line 1270, in _insert
return query.get_compiler(using=using).execute_sql(returning_fields)
File "/usr/local/lib/python3.8/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql
cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.8/site-packages/django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/python3.8/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
| 2022-01-18T19:43:58 |
||
pulp/pulpcore | 2,117 | pulp__pulpcore-2117 | [
"2034"
] | 16b9e2ec954e33fed66ad6dccdd9d3077168907d | diff --git a/pulpcore/app/models/task.py b/pulpcore/app/models/task.py
--- a/pulpcore/app/models/task.py
+++ b/pulpcore/app/models/task.py
@@ -89,6 +89,16 @@ class Worker(BaseModel):
name = models.TextField(db_index=True, unique=True)
last_heartbeat = models.DateTimeField(auto_now=True)
+ @property
+ def current_task(self):
+ """
+ The task this worker is currently executing, if any.
+
+ Returns:
+ Task: The currently executing task
+ """
+ return self.tasks.filter(state="running").first()
+
@property
def online(self):
"""
diff --git a/pulpcore/app/serializers/task.py b/pulpcore/app/serializers/task.py
--- a/pulpcore/app/serializers/task.py
+++ b/pulpcore/app/serializers/task.py
@@ -192,10 +192,15 @@ class WorkerSerializer(ModelSerializer):
last_heartbeat = serializers.DateTimeField(
help_text=_("Timestamp of the last time the worker talked to the service."), read_only=True
)
- # disable "created" because we don't care about it
- created = None
+ current_task = RelatedField(
+ help_text=_(
+ "The task this worker is currently executing, or empty if the worker is not "
+ "currently assigned to a task."
+ ),
+ read_only=True,
+ view_name="tasks-detail",
+ )
class Meta:
model = models.Worker
- _base_fields = tuple(set(ModelSerializer.Meta.fields) - set(["created"]))
- fields = _base_fields + ("name", "last_heartbeat")
+ fields = ModelSerializer.Meta.fields + ("name", "last_heartbeat", "current_task")
| diff --git a/pulpcore/tests/functional/api/test_workers.py b/pulpcore/tests/functional/api/test_workers.py
--- a/pulpcore/tests/functional/api/test_workers.py
+++ b/pulpcore/tests/functional/api/test_workers.py
@@ -10,18 +10,12 @@
from pulpcore.tests.functional.utils import set_up_module as setUpModule # noqa:F401
from pulpcore.tests.functional.utils import skip_if
-_DYNAMIC_WORKER_ATTRS = ("last_heartbeat",)
+_DYNAMIC_WORKER_ATTRS = ("last_heartbeat", "current_task")
"""Worker attributes that are dynamically set by Pulp, not set by a user."""
class WorkersTestCase(unittest.TestCase):
- """Test actions over workers.
-
- This test targets the following issues:
-
- * `Pulp #3143 <https://pulp.plan.io/issues/3143>`_
- * `Pulp Smash #945 <https://github.com/pulp/pulp-smash/issues/945>`_
- """
+ """Test actions over workers."""
@classmethod
def setUpClass(cls):
@@ -38,6 +32,8 @@ def test_01_read_all_workers(self):
workers = self.client.get(WORKER_PATH)["results"]
for worker in workers:
for key, val in worker.items():
+ if key in _DYNAMIC_WORKER_ATTRS:
+ continue
with self.subTest(key=key):
self.assertIsNotNone(val)
self.worker.update(choice(workers))
| As a user, I can see the status (idle, working) of my workers
Author: @dralley (dalley)
Redmine Issue: 9183, https://pulp.plan.io/issues/9183
---
It would be helpful if the worker API (and status API) should show the task currently executing on that worker, and/or state information such as "idle" / "working" for each worker.
It may also be a good idea for the status API to list the number of tasks currently queued up.
This would re-enable some of the functionality provided by the "rq info" and "qpid-stat" tools in the past.
| 2022-01-19T20:40:34 |
|
pulp/pulpcore | 2,122 | pulp__pulpcore-2122 | [
"2135"
] | 16b9e2ec954e33fed66ad6dccdd9d3077168907d | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -103,7 +103,7 @@ def __init__(
self.semaphore = semaphore
else:
self.semaphore = asyncio.Semaphore() # This will always be acquired
- self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}
+ self._digests = {}
self._size = 0
if self.expected_digests:
if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):
@@ -124,6 +124,8 @@ def _ensure_writer_has_open_file(self):
if not self._writer:
self._writer = tempfile.NamedTemporaryFile(dir=os.getcwd(), delete=False)
self.path = self._writer.name
+ self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}
+ self._size = 0
async def handle_data(self, data):
"""
| Retried downloads are not validating properly
When retrying a partial download, the size and digests need to be reset to validate.
| 2022-01-20T12:23:30 |
||
pulp/pulpcore | 2,124 | pulp__pulpcore-2124 | [
"2123"
] | bbf5a94803c18d6e111c76acdb874d7568d09103 | diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -7,6 +7,7 @@
import tempfile
from pulpcore.app import pulp_hashlib
+from pulpcore.app.loggers import deprecation_logger
from pulpcore.app.models import Artifact
from pulpcore.exceptions import (
DigestValidationError,
@@ -94,6 +95,12 @@ def __init__(
semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.
Useful for limiting the number of outstanding downloaders in various ways.
"""
+ if custom_file_object:
+ deprecation_logger.warn(
+ "The 'custom_file_object' argument to 'BaseDownloader' is"
+ "deprecated and will be removed in pulpcore==3.20; stop using it."
+ )
+
self.url = url
self._writer = custom_file_object
self.path = None
| Deprecate unused custom_file_object in downloaders
This parameter is not properly handled by current code and must therefore be considered broken. Also it is unused.
| 2022-01-20T12:34:40 |
||
pulp/pulpcore | 2,149 | pulp__pulpcore-2149 | [
"2200"
] | f57bb9c8fc41831b5e5c392334c863035627b8c1 | diff --git a/pulpcore/app/management/commands/analyze-publication.py b/pulpcore/app/management/commands/analyze-publication.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/app/management/commands/analyze-publication.py
@@ -0,0 +1,72 @@
+from gettext import gettext as _
+
+from django.core.management import BaseCommand, CommandError
+from django.urls import reverse
+
+from pulpcore.app.models import Publication, Artifact, Distribution
+from pulpcore.app.util import get_view_name_for_model
+
+
+class Command(BaseCommand):
+ """Django management command for viewing files in a publication and the artifacts on disk."""
+
+ help = _(__doc__)
+
+ def add_arguments(self, parser):
+ """Set up arguments."""
+ parser.add_argument("--publication", required=False, help=_("A publication ID."))
+ parser.add_argument(
+ "--distribution-base-path", required=False, help=_("A base_path of a distribution.")
+ )
+ parser.add_argument("--tabular", action="store_true", help=_("Display as a table"))
+
+ def handle(self, *args, **options):
+ """Implement the command."""
+
+ if options["tabular"]:
+ try:
+ from prettytable import PrettyTable
+ except ImportError:
+ raise CommandError("'prettytable' package must be installed.")
+
+ if not (options["publication"] or options["distribution_base_path"]):
+ raise CommandError("Must provide either --publication or --distribution-base-path")
+ elif options["publication"] and options["distribution_base_path"]:
+ raise CommandError("Cannot provide both --publication and --distribution-base-path")
+ elif options["publication"]:
+ publication = Publication.objects.get(pk=options["publication"])
+ else:
+ distribution = Distribution.objects.get(base_path=options["distribution_base_path"])
+ if distribution.publication:
+ publication = distribution.publication
+ elif distribution.repository:
+ repository = distribution.repository
+ publication = Publication.objects.filter(
+ repository_version__in=repository.versions.all(), complete=True
+ ).latest("repository_version", "pulp_created")
+
+ published_artifacts = publication.published_artifact.select_related(
+ "content_artifact__artifact"
+ ).order_by("relative_path")
+ artifact_href_prefix = reverse(get_view_name_for_model(Artifact, "list"))
+
+ if options["tabular"]:
+ table = PrettyTable()
+ table.field_names = ["Apparent path", "Storage path"]
+ table.align = "l" # left align values
+
+ for pa in published_artifacts.iterator():
+ ca = pa.content_artifact
+ path = ca.artifact.file.path if ca.artifact else None
+ artifact_id = ca.artifact_id
+ artifact_href = (artifact_href_prefix + str(artifact_id)) if artifact_id else None
+ if options["tabular"]:
+ table.add_row([pa.relative_path, path or ""])
+ else:
+ print(pa.relative_path)
+ print("└─ Storage path: {}".format(path))
+ print("└─ Artifact href: {}".format(artifact_href))
+ print()
+
+ if options["tabular"]:
+ print(table)
| A convenient way to inspect which files are served by a publication / distribution
> There are frequent support issues "I got 404 error on a package request" or "my repo metadata are corrupted" or similar, where a very common divide-and-conqueror approach is to examine the given package or metadata on the disk. In pulp-2, this was straightforward as everything for e.g. yum_distributor was in /var/lib/pulp/published/yum/master/yum_distributor/${repo}/${publication_timestamp}/ directory. But in pulp-3, there is no easy way to get the location information.
It would be a boon for debugging, especially for non-devs, to be able to map from the path served by a distribution to the file on disk and artifact ID information.
| 2022-01-24T22:57:07 |
||
pulp/pulpcore | 2,150 | pulp__pulpcore-2150 | [
"2147"
] | 499643a659bef0a1a537db57f3751dd1606909bc | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -825,16 +825,17 @@ async def handle_response_headers(headers):
continue
if response.status == 206 and lower_name == "content-length":
- range_bytes = int(value)
+ content_length = int(value)
start = 0 if range_start is None else range_start
- stop = range_bytes if range_stop is None else range_stop
+ stop = content_length if range_stop is None else range_stop
- range_bytes = range_bytes - range_start
- range_bytes = range_bytes - (int(value) - stop)
+ range_bytes = stop - start
response.headers[name] = str(range_bytes)
+ # aiohttp adds a 1 to the range.stop compared to http headers (including) to
+ # match python array adressing (exclusive)
response.headers["Content-Range"] = "bytes {0}-{1}/{2}".format(
- start, stop - start + 1, int(value)
+ start, stop - 1, content_length
)
continue
| Streaming range headers are calculated wrongly
| 2022-01-25T08:13:28 |
||
pulp/pulpcore | 2,154 | pulp__pulpcore-2154 | [
"2147"
] | 82828595c4407735238a43307feaa9b62b540613 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -837,16 +837,17 @@ async def handle_response_headers(headers):
continue
if response.status == 206 and lower_name == "content-length":
- range_bytes = int(value)
+ content_length = int(value)
start = 0 if range_start is None else range_start
- stop = range_bytes if range_stop is None else range_stop
+ stop = content_length if range_stop is None else range_stop
- range_bytes = range_bytes - range_start
- range_bytes = range_bytes - (int(value) - stop)
+ range_bytes = stop - start
response.headers[name] = str(range_bytes)
+ # aiohttp adds a 1 to the range.stop compared to http headers (including) to
+ # match python array adressing (exclusive)
response.headers["Content-Range"] = "bytes {0}-{1}/{2}".format(
- start, stop - start + 1, int(value)
+ start, stop - 1, content_length
)
continue
| Streaming range headers are calculated wrongly
| 2022-01-25T15:02:06 |
||
pulp/pulpcore | 2,155 | pulp__pulpcore-2155 | [
"2147"
] | 16ff1c5f895d43a2e68807653cfa8eab6ffbfb21 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -825,16 +825,17 @@ async def handle_response_headers(headers):
continue
if response.status == 206 and lower_name == "content-length":
- range_bytes = int(value)
+ content_length = int(value)
start = 0 if range_start is None else range_start
- stop = range_bytes if range_stop is None else range_stop
+ stop = content_length if range_stop is None else range_stop
- range_bytes = range_bytes - range_start
- range_bytes = range_bytes - (int(value) - stop)
+ range_bytes = stop - start
response.headers[name] = str(range_bytes)
+ # aiohttp adds a 1 to the range.stop compared to http headers (including) to
+ # match python array adressing (exclusive)
response.headers["Content-Range"] = "bytes {0}-{1}/{2}".format(
- start, stop - start + 1, int(value)
+ start, stop - 1, content_length
)
continue
| Streaming range headers are calculated wrongly
| 2022-01-25T15:02:26 |
||
pulp/pulpcore | 2,159 | pulp__pulpcore-2159 | [
"2157"
] | 93328094f9f82e0b6697dfcd692c08e2ac2881f0 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -14,11 +14,12 @@
from functools import lru_cache
from itertools import chain
+from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, models, transaction
+from django.db import IntegrityError, connection, models, transaction
from django.forms.models import model_to_dict
from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
@@ -104,10 +105,44 @@ def touch(self):
"""
Update the ``timestamp_of_interest`` on all objects of the query.
- We order-by-pk here to avoid deadlocking in high-concurrency
- environments.
- """
- return self.order_by("pk").update(timestamp_of_interest=now())
+ Postgres' UPDATE call doesn't support order-by. This can (and does) result in deadlocks in
+ high-concurrency environments, when using touch() on overlapping data sets. In order to
+ prevent this, we choose to SELECT FOR UPDATE with SKIP LOCKS == True, and only update
+ the rows that we were able to get locks on. Since a previously-locked-row implies
+ that updating that row's timestamp-of-interest is the responsibility of whoever currently
+ owns it, this results in correct data, while closing the window on deadlocks.
+ """
+ # Build the list of ids we need to work on, since we're going to be building a
+ # SQL-query "by hand" in a moment.
+ pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
+ if not pulp_ids:
+ return None
+ ids_str = ",".join(pulp_ids)
+ # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
+ # If we are an instance-of Content or its subclasses, we want to update the Content table.
+ # Otherwise, use the table associated w/ the query.
+ db_table = (
+ Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
+ )
+ cursor = connection.cursor()
+ with transaction.atomic():
+ # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
+ # require code calling touch() on a Model whose table-name-str was carefully chosen to
+ # be Bad - but, good habits...
+ stmt = sql.SQL(
+ "UPDATE {table_name} "
+ " SET timestamp_of_interest = NOW() "
+ " WHERE pulp_id IN ("
+ " SELECT pulp_id "
+ " FROM {table_name} "
+ " WHERE pulp_id in ({ids}) "
+ " ORDER BY pulp_id "
+ " FOR UPDATE "
+ " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
+ )
+ rslt = cursor.execute(stmt)
+ cursor.close()
+ return rslt
class QueryMixin:
| touch() path still has a potential deadlock window.
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
| You can force the deadlock by:
- Starting 10 pulpcore-workers
- Start up 2 instances of "pulpcore-manager shell"
- In each instance, execute the following script, at the same time:
```
import _thread
from pulpcore.plugin.models import Content
def update_timestamp(index):
print(">>>in update_timedstamp index {}".format(index))
Content.objects.touch()
print(">>>done {}".format(index))
for i in range(8):
_thread.start_new_thread(update_timestamp, (i,))
```
You will see several/manay of the threads throwing deadlock exceptions. | 2022-01-25T22:12:23 |
|
pulp/pulpcore | 2,160 | pulp__pulpcore-2160 | [
"2119"
] | 187ab3a2baace499904f3bb0f5d36668f01c9e97 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -95,6 +95,7 @@ class Handler:
hop_by_hop_headers = [
"connection",
"content-encoding",
+ "content-length",
"keep-alive",
"public",
"proxy-authenticate",
@@ -821,10 +822,9 @@ async def _stream_remote_artifact(self, request, response, remote_artifact):
async def handle_response_headers(headers):
for name, value in headers.items():
lower_name = name.lower()
- if lower_name in self.hop_by_hop_headers:
- continue
-
- if response.status == 206 and lower_name == "content-length":
+ if lower_name not in self.hop_by_hop_headers:
+ response.headers[name] = value
+ elif response.status == 206 and lower_name == "content-length":
content_length = int(value)
start = 0 if range_start is None else range_start
stop = content_length if range_stop is None else range_stop
@@ -837,9 +837,6 @@ async def handle_response_headers(headers):
response.headers["Content-Range"] = "bytes {0}-{1}/{2}".format(
start, stop - 1, content_length
)
- continue
-
- response.headers[name] = value
await response.prepare(request)
data_size_handled = 0
| DNF (still) gets wrong RPM download size for on-demand repo
Re-opening of https://pulp.plan.io/issues/9213, running the supposedly fixed pulp-rpm versions, dnf still gets the wrong sizes for RPM files downloaded from an on-demand repository. Fetching with curl or wget _sometimes_ causes dnf to then pull the correct content, but not other times. Pulp is in use here via Foreman/Katello nightly (updated regularly).
Issue reliably reproduceable with https://rpm.releases.hashicorp.com/. Every newly released RPM has the same issue.
Installed versions (fix for #9213 supposedly in 3.16.0):
- rubygem-pulp_rpm_client-3.16.1-1.el8.noarch
- python38-pulp-rpm-3.16.1-1.el8.noarch
```
[root@alma-canary ~]# dnf upgrade consul -y
Dependencies resolved.
=================================================================================================================================
Package Architecture Version Repository Size
=================================================================================================================================
Upgrading:
consul x86_64 1.11.2-1 sihnon_hashicorp_stable-el8 37 M
Transaction Summary
=================================================================================================================================
Upgrade 1 Package
Total download size: 37 M
Downloading Packages:
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[FAILED] consul-1.11.2.x86_64.rpm: No more mirrors to try - All mirrors were already tried without success
The downloaded packages were saved in cache until the next successful transaction.
You can remove cached packages by executing 'dnf clean packages'.
Error: Error downloading packages:
Cannot download Packages/c/consul-1.11.2.x86_64.rpm: All mirrors were tried
```
```
[root@alma-canary ~]# wget https://whitefall.jellybean.sihnon.net/pulp/content/sihnon/Library/custom/hashicorp/stable-el8/Packages/c/consul-1.11.2.x86_64.rpm
--2022-01-19 20:41:21-- https://whitefall.jellybean.sihnon.net/pulp/content/sihnon/Library/custom/hashicorp/stable-el8/Packages/c/consul-1.11.2.x86_64.rpm
Connecting to whitefall.jellybean.sihnon.net (whitefall.jellybean.sihnon.net)|81.187.154.141|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 38148438 (36M) [binary/octet-stream]
Saving to: ‘consul-1.11.2.x86_64.rpm.5’
consul-1.11.2.x86_64.rpm.5 100%[=======================================================>] 36.38M 11.5MB/s in 3.2s
2022-01-19 20:41:24 (11.5 MB/s) - ‘consul-1.11.2.x86_64.rpm.5’ saved [38148438/38148438]
[root@alma-canary ~]# rpm -K --nosignature consul-1.11.2.x86_64.rpm.5
consul-1.11.2.x86_64.rpm.5: DIGESTS NOT OK
```
Meanwhile downloading directly from upstream fetches the correct size:
```
[root@alma-canary ~]# dnf install --downloadonly consul-1.11.2 --disablerepo=* --enablerepo=hashicorp
Updating Subscription Management repositories.
Hashicorp Stable - x86_64 1.0 MB/s | 671 kB 00:00
Last metadata expiration check: 0:00:01 ago on Wed 19 Jan 2022 21:31:04 UTC.
Dependencies resolved.
=================================================================================================================================
Package Architecture Version Repository Size
=================================================================================================================================
Upgrading:
consul x86_64 1.11.2-1 hashicorp 37 M
Transaction Summary
=================================================================================================================================
Upgrade 1 Package
Total download size: 37 M
DNF will only download packages for the transaction.
Is this ok [y/N]: y
Downloading Packages:
consul-1.11.2.x86_64.rpm 8.9 MB/s | 37 MB 00:04
---------------------------------------------------------------------------------------------------------------------------------
Total 8.9 MB/s | 37 MB 00:04
Complete!
The downloaded packages were saved in cache until the next successful transaction.
You can remove cached packages by executing 'dnf clean packages'.
[root@alma-canary ~]# ls -l /var/cache/dnf/hashicorp-164999f2fbadbd87/packages/
total 105192
-rw-r--r-- 1 root root 38490036 Jan 19 21:31 consul-1.11.2.x86_64.rpm
```
| ~~Probably the same~~ A similar issue: https://github.com/pulp/pulpcore/issues/2147
@optiz0r Just to be clear on one detail, it's meant to be fixed in pulpcore 3.16, not pulp_rpm 3.16. What's your pulpcore version?
Nonetheless, I've reproduced it. It does still seem to just be the first download though (the streamed one) - further hits work as expected. Which means that `immediate` mode syncing should be OK.


Sorry for not specifying the pulp core versions:
```
pulpcore-selinux-1.2.6-2.el8.x86_64
python38-pulpcore-3.16.1-1.el8.noarch
rubygem-pulpcore_client-3.16.0-1.el8.noarch
```
I'm not currently able to reproduce the "first download fails, subsequent downloads succeed" reliably. In fact I have packages that no matter how many times I download them (via dnf, curl, wget), I consistently get the incorrect size.


I still haven't been able to reproduce that - yet. Maybe there's a bug that was unintentionally fixed on the main branch (which is what I'm using) vs. 3.16.
But we can focus on this first issue and see where we end up after fixing that.
Brain dump so I don't lose anything:
The size of the "incorrectly" downloaded file is identical to the `Content-Length` of the response from original server / repo. The difference is that the original response has `Content-Encoding: gzip` set which tells the client "I have applied this encoding, you should reverse it to get the correct data" and the streamed response from Pulp does not. ~~Thus for the first (streaming) download, depending entirely on the configuration of the remote server / repo, clients of Pulp could end up getting with a gzip-compressed version of the file they were supposed to get because Pulp forgot to tell them to decode the file.~~
[edit] nope, Pulp clients always get the decompressed bytes. But the `Content-Length` header is forwarded from the original request, and it seems that when the payload's actual size differs from `Content-Length`, the header wins, and the payload gets truncated. And that's why the files differ.
```
(pulp) [vagrant@pulp3-source-fedora35 ~]$ cmp -n 31024532 consul-1.11.0-1.x86_64.rpm consul-1.11.0-1.x86_64.rpm.1
(pulp) [vagrant@pulp3-source-fedora35 ~]$ cmp -n 31024533 consul-1.11.0-1.x86_64.rpm consul-1.11.0-1.x86_64.rpm.1
cmp: EOF on consul-1.11.0-1.x86_64.rpm after byte 31024532, in line 121994
```
(confirming that the file streamed from Pulp is a prefix of the file downloaded into Pulp)
The reason it isn't broken most of the time is that most of the time the remote server / repo doesn't try to gzip-compress RPM files. It's kind of pointless since they're already internally compressed.
The artifacts that get saved into Pulp's datastore seem to be *correct*, i.e. they are the decompressed versions - on the latest release. **TODO**: investigate if this was ever not the case in the past and whether that might cause "RPMs always have the wrong sizes"
If the actual cause is the `Content-Length` header being forwarded when the upstream server uses `Content-Encoding`, then the options are:
* Find a way to use a restrictive `Accept-Encoding` that prevents the server from setting `Content-Encoding` (disadvantage: for *some* files, but probably not RPMs, this might be less efficient)
* Disable `Content-Length` header for all streamed responses (disadvantages: TBD, but `Content-Length` is generally useful)
* Disable `Content-Length` header for all streamed responses IF the remote server used `Content-Encoding` (disadvantages: TBD)
* Some magic not known to me which that would allow us to know the size of the decompressed file while still in the process of downloading the compressed bytes?
Having dipped a toe into the downloader an streaming logic, i really think forwarding the headers from the upstream server we are downloading in the very same moment is probably a convenient but bad idea. We should really provide the same headers to the client whether we serve streaming or form saved artifacts. | 2022-01-26T04:37:49 |
|
pulp/pulpcore | 2,162 | pulp__pulpcore-2162 | [
"2119"
] | 6478b4fba85e07ccdc43b01d65cf8cc0cb37cc70 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -102,6 +102,7 @@ class Handler:
hop_by_hop_headers = [
"connection",
"content-encoding",
+ "content-length",
"keep-alive",
"public",
"proxy-authenticate",
@@ -833,10 +834,9 @@ def cast_remote_blocking():
async def handle_response_headers(headers):
for name, value in headers.items():
lower_name = name.lower()
- if lower_name in self.hop_by_hop_headers:
- continue
-
- if response.status == 206 and lower_name == "content-length":
+ if lower_name not in self.hop_by_hop_headers:
+ response.headers[name] = value
+ elif response.status == 206 and lower_name == "content-length":
content_length = int(value)
start = 0 if range_start is None else range_start
stop = content_length if range_stop is None else range_stop
@@ -849,9 +849,6 @@ async def handle_response_headers(headers):
response.headers["Content-Range"] = "bytes {0}-{1}/{2}".format(
start, stop - 1, content_length
)
- continue
-
- response.headers[name] = value
await response.prepare(request)
data_size_handled = 0
| DNF (still) gets wrong RPM download size for on-demand repo
Re-opening of https://pulp.plan.io/issues/9213, running the supposedly fixed pulp-rpm versions, dnf still gets the wrong sizes for RPM files downloaded from an on-demand repository. Fetching with curl or wget _sometimes_ causes dnf to then pull the correct content, but not other times. Pulp is in use here via Foreman/Katello nightly (updated regularly).
Issue reliably reproduceable with https://rpm.releases.hashicorp.com/. Every newly released RPM has the same issue.
Installed versions (fix for #9213 supposedly in 3.16.0):
- rubygem-pulp_rpm_client-3.16.1-1.el8.noarch
- python38-pulp-rpm-3.16.1-1.el8.noarch
```
[root@alma-canary ~]# dnf upgrade consul -y
Dependencies resolved.
=================================================================================================================================
Package Architecture Version Repository Size
=================================================================================================================================
Upgrading:
consul x86_64 1.11.2-1 sihnon_hashicorp_stable-el8 37 M
Transaction Summary
=================================================================================================================================
Upgrade 1 Package
Total download size: 37 M
Downloading Packages:
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[FAILED] consul-1.11.2.x86_64.rpm: No more mirrors to try - All mirrors were already tried without success
The downloaded packages were saved in cache until the next successful transaction.
You can remove cached packages by executing 'dnf clean packages'.
Error: Error downloading packages:
Cannot download Packages/c/consul-1.11.2.x86_64.rpm: All mirrors were tried
```
```
[root@alma-canary ~]# wget https://whitefall.jellybean.sihnon.net/pulp/content/sihnon/Library/custom/hashicorp/stable-el8/Packages/c/consul-1.11.2.x86_64.rpm
--2022-01-19 20:41:21-- https://whitefall.jellybean.sihnon.net/pulp/content/sihnon/Library/custom/hashicorp/stable-el8/Packages/c/consul-1.11.2.x86_64.rpm
Connecting to whitefall.jellybean.sihnon.net (whitefall.jellybean.sihnon.net)|81.187.154.141|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 38148438 (36M) [binary/octet-stream]
Saving to: ‘consul-1.11.2.x86_64.rpm.5’
consul-1.11.2.x86_64.rpm.5 100%[=======================================================>] 36.38M 11.5MB/s in 3.2s
2022-01-19 20:41:24 (11.5 MB/s) - ‘consul-1.11.2.x86_64.rpm.5’ saved [38148438/38148438]
[root@alma-canary ~]# rpm -K --nosignature consul-1.11.2.x86_64.rpm.5
consul-1.11.2.x86_64.rpm.5: DIGESTS NOT OK
```
Meanwhile downloading directly from upstream fetches the correct size:
```
[root@alma-canary ~]# dnf install --downloadonly consul-1.11.2 --disablerepo=* --enablerepo=hashicorp
Updating Subscription Management repositories.
Hashicorp Stable - x86_64 1.0 MB/s | 671 kB 00:00
Last metadata expiration check: 0:00:01 ago on Wed 19 Jan 2022 21:31:04 UTC.
Dependencies resolved.
=================================================================================================================================
Package Architecture Version Repository Size
=================================================================================================================================
Upgrading:
consul x86_64 1.11.2-1 hashicorp 37 M
Transaction Summary
=================================================================================================================================
Upgrade 1 Package
Total download size: 37 M
DNF will only download packages for the transaction.
Is this ok [y/N]: y
Downloading Packages:
consul-1.11.2.x86_64.rpm 8.9 MB/s | 37 MB 00:04
---------------------------------------------------------------------------------------------------------------------------------
Total 8.9 MB/s | 37 MB 00:04
Complete!
The downloaded packages were saved in cache until the next successful transaction.
You can remove cached packages by executing 'dnf clean packages'.
[root@alma-canary ~]# ls -l /var/cache/dnf/hashicorp-164999f2fbadbd87/packages/
total 105192
-rw-r--r-- 1 root root 38490036 Jan 19 21:31 consul-1.11.2.x86_64.rpm
```
| ~~Probably the same~~ A similar issue: https://github.com/pulp/pulpcore/issues/2147
@optiz0r Just to be clear on one detail, it's meant to be fixed in pulpcore 3.16, not pulp_rpm 3.16. What's your pulpcore version?
Nonetheless, I've reproduced it. It does still seem to just be the first download though (the streamed one) - further hits work as expected. Which means that `immediate` mode syncing should be OK.


Sorry for not specifying the pulp core versions:
```
pulpcore-selinux-1.2.6-2.el8.x86_64
python38-pulpcore-3.16.1-1.el8.noarch
rubygem-pulpcore_client-3.16.0-1.el8.noarch
```
I'm not currently able to reproduce the "first download fails, subsequent downloads succeed" reliably. In fact I have packages that no matter how many times I download them (via dnf, curl, wget), I consistently get the incorrect size.


I still haven't been able to reproduce that - yet. Maybe there's a bug that was unintentionally fixed on the main branch (which is what I'm using) vs. 3.16.
But we can focus on this first issue and see where we end up after fixing that.
Brain dump so I don't lose anything:
The size of the "incorrectly" downloaded file is identical to the `Content-Length` of the response from original server / repo. The difference is that the original response has `Content-Encoding: gzip` set which tells the client "I have applied this encoding, you should reverse it to get the correct data" and the streamed response from Pulp does not. ~~Thus for the first (streaming) download, depending entirely on the configuration of the remote server / repo, clients of Pulp could end up getting with a gzip-compressed version of the file they were supposed to get because Pulp forgot to tell them to decode the file.~~
[edit] nope, Pulp clients always get the decompressed bytes. But the `Content-Length` header is forwarded from the original request, and it seems that when the payload's actual size differs from `Content-Length`, the header wins, and the payload gets truncated. And that's why the files differ.
```
(pulp) [vagrant@pulp3-source-fedora35 ~]$ cmp -n 31024532 consul-1.11.0-1.x86_64.rpm consul-1.11.0-1.x86_64.rpm.1
(pulp) [vagrant@pulp3-source-fedora35 ~]$ cmp -n 31024533 consul-1.11.0-1.x86_64.rpm consul-1.11.0-1.x86_64.rpm.1
cmp: EOF on consul-1.11.0-1.x86_64.rpm after byte 31024532, in line 121994
```
(confirming that the file streamed from Pulp is a prefix of the file downloaded into Pulp)
The reason it isn't broken most of the time is that most of the time the remote server / repo doesn't try to gzip-compress RPM files. It's kind of pointless since they're already internally compressed.
The artifacts that get saved into Pulp's datastore seem to be *correct*, i.e. they are the decompressed versions - on the latest release. **TODO**: investigate if this was ever not the case in the past and whether that might cause "RPMs always have the wrong sizes"
If the actual cause is the `Content-Length` header being forwarded when the upstream server uses `Content-Encoding`, then the options are:
* Find a way to use a restrictive `Accept-Encoding` that prevents the server from setting `Content-Encoding` (disadvantage: for *some* files, but probably not RPMs, this might be less efficient)
* Disable `Content-Length` header for all streamed responses (disadvantages: TBD, but `Content-Length` is generally useful)
* Disable `Content-Length` header for all streamed responses IF the remote server used `Content-Encoding` (disadvantages: TBD)
* Some magic not known to me which that would allow us to know the size of the decompressed file while still in the process of downloading the compressed bytes?
Having dipped a toe into the downloader an streaming logic, i really think forwarding the headers from the upstream server we are downloading in the very same moment is probably a convenient but bad idea. We should really provide the same headers to the client whether we serve streaming or form saved artifacts. | 2022-01-26T15:43:35 |
|
pulp/pulpcore | 2,163 | pulp__pulpcore-2163 | [
"2119"
] | 37a2ed73d3ca6866257bb9cdfa18326fcb87677a | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -95,6 +95,7 @@ class Handler:
hop_by_hop_headers = [
"connection",
"content-encoding",
+ "content-length",
"keep-alive",
"public",
"proxy-authenticate",
@@ -821,10 +822,9 @@ async def _stream_remote_artifact(self, request, response, remote_artifact):
async def handle_response_headers(headers):
for name, value in headers.items():
lower_name = name.lower()
- if lower_name in self.hop_by_hop_headers:
- continue
-
- if response.status == 206 and lower_name == "content-length":
+ if lower_name not in self.hop_by_hop_headers:
+ response.headers[name] = value
+ elif response.status == 206 and lower_name == "content-length":
content_length = int(value)
start = 0 if range_start is None else range_start
stop = content_length if range_stop is None else range_stop
@@ -837,9 +837,6 @@ async def handle_response_headers(headers):
response.headers["Content-Range"] = "bytes {0}-{1}/{2}".format(
start, stop - 1, content_length
)
- continue
-
- response.headers[name] = value
await response.prepare(request)
data_size_handled = 0
| DNF (still) gets wrong RPM download size for on-demand repo
Re-opening of https://pulp.plan.io/issues/9213, running the supposedly fixed pulp-rpm versions, dnf still gets the wrong sizes for RPM files downloaded from an on-demand repository. Fetching with curl or wget _sometimes_ causes dnf to then pull the correct content, but not other times. Pulp is in use here via Foreman/Katello nightly (updated regularly).
Issue reliably reproduceable with https://rpm.releases.hashicorp.com/. Every newly released RPM has the same issue.
Installed versions (fix for #9213 supposedly in 3.16.0):
- rubygem-pulp_rpm_client-3.16.1-1.el8.noarch
- python38-pulp-rpm-3.16.1-1.el8.noarch
```
[root@alma-canary ~]# dnf upgrade consul -y
Dependencies resolved.
=================================================================================================================================
Package Architecture Version Repository Size
=================================================================================================================================
Upgrading:
consul x86_64 1.11.2-1 sihnon_hashicorp_stable-el8 37 M
Transaction Summary
=================================================================================================================================
Upgrade 1 Package
Total download size: 37 M
Downloading Packages:
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[MIRROR] consul-1.11.2.x86_64.rpm: Interrupted by header callback: Server reports Content-Length: 38148438 but expected size is: 38490036
[FAILED] consul-1.11.2.x86_64.rpm: No more mirrors to try - All mirrors were already tried without success
The downloaded packages were saved in cache until the next successful transaction.
You can remove cached packages by executing 'dnf clean packages'.
Error: Error downloading packages:
Cannot download Packages/c/consul-1.11.2.x86_64.rpm: All mirrors were tried
```
```
[root@alma-canary ~]# wget https://whitefall.jellybean.sihnon.net/pulp/content/sihnon/Library/custom/hashicorp/stable-el8/Packages/c/consul-1.11.2.x86_64.rpm
--2022-01-19 20:41:21-- https://whitefall.jellybean.sihnon.net/pulp/content/sihnon/Library/custom/hashicorp/stable-el8/Packages/c/consul-1.11.2.x86_64.rpm
Connecting to whitefall.jellybean.sihnon.net (whitefall.jellybean.sihnon.net)|81.187.154.141|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 38148438 (36M) [binary/octet-stream]
Saving to: ‘consul-1.11.2.x86_64.rpm.5’
consul-1.11.2.x86_64.rpm.5 100%[=======================================================>] 36.38M 11.5MB/s in 3.2s
2022-01-19 20:41:24 (11.5 MB/s) - ‘consul-1.11.2.x86_64.rpm.5’ saved [38148438/38148438]
[root@alma-canary ~]# rpm -K --nosignature consul-1.11.2.x86_64.rpm.5
consul-1.11.2.x86_64.rpm.5: DIGESTS NOT OK
```
Meanwhile downloading directly from upstream fetches the correct size:
```
[root@alma-canary ~]# dnf install --downloadonly consul-1.11.2 --disablerepo=* --enablerepo=hashicorp
Updating Subscription Management repositories.
Hashicorp Stable - x86_64 1.0 MB/s | 671 kB 00:00
Last metadata expiration check: 0:00:01 ago on Wed 19 Jan 2022 21:31:04 UTC.
Dependencies resolved.
=================================================================================================================================
Package Architecture Version Repository Size
=================================================================================================================================
Upgrading:
consul x86_64 1.11.2-1 hashicorp 37 M
Transaction Summary
=================================================================================================================================
Upgrade 1 Package
Total download size: 37 M
DNF will only download packages for the transaction.
Is this ok [y/N]: y
Downloading Packages:
consul-1.11.2.x86_64.rpm 8.9 MB/s | 37 MB 00:04
---------------------------------------------------------------------------------------------------------------------------------
Total 8.9 MB/s | 37 MB 00:04
Complete!
The downloaded packages were saved in cache until the next successful transaction.
You can remove cached packages by executing 'dnf clean packages'.
[root@alma-canary ~]# ls -l /var/cache/dnf/hashicorp-164999f2fbadbd87/packages/
total 105192
-rw-r--r-- 1 root root 38490036 Jan 19 21:31 consul-1.11.2.x86_64.rpm
```
| ~~Probably the same~~ A similar issue: https://github.com/pulp/pulpcore/issues/2147
@optiz0r Just to be clear on one detail, it's meant to be fixed in pulpcore 3.16, not pulp_rpm 3.16. What's your pulpcore version?
Nonetheless, I've reproduced it. It does still seem to just be the first download though (the streamed one) - further hits work as expected. Which means that `immediate` mode syncing should be OK.


Sorry for not specifying the pulp core versions:
```
pulpcore-selinux-1.2.6-2.el8.x86_64
python38-pulpcore-3.16.1-1.el8.noarch
rubygem-pulpcore_client-3.16.0-1.el8.noarch
```
I'm not currently able to reproduce the "first download fails, subsequent downloads succeed" reliably. In fact I have packages that no matter how many times I download them (via dnf, curl, wget), I consistently get the incorrect size.


I still haven't been able to reproduce that - yet. Maybe there's a bug that was unintentionally fixed on the main branch (which is what I'm using) vs. 3.16.
But we can focus on this first issue and see where we end up after fixing that.
Brain dump so I don't lose anything:
The size of the "incorrectly" downloaded file is identical to the `Content-Length` of the response from original server / repo. The difference is that the original response has `Content-Encoding: gzip` set which tells the client "I have applied this encoding, you should reverse it to get the correct data" and the streamed response from Pulp does not. ~~Thus for the first (streaming) download, depending entirely on the configuration of the remote server / repo, clients of Pulp could end up getting with a gzip-compressed version of the file they were supposed to get because Pulp forgot to tell them to decode the file.~~
[edit] nope, Pulp clients always get the decompressed bytes. But the `Content-Length` header is forwarded from the original request, and it seems that when the payload's actual size differs from `Content-Length`, the header wins, and the payload gets truncated. And that's why the files differ.
```
(pulp) [vagrant@pulp3-source-fedora35 ~]$ cmp -n 31024532 consul-1.11.0-1.x86_64.rpm consul-1.11.0-1.x86_64.rpm.1
(pulp) [vagrant@pulp3-source-fedora35 ~]$ cmp -n 31024533 consul-1.11.0-1.x86_64.rpm consul-1.11.0-1.x86_64.rpm.1
cmp: EOF on consul-1.11.0-1.x86_64.rpm after byte 31024532, in line 121994
```
(confirming that the file streamed from Pulp is a prefix of the file downloaded into Pulp)
The reason it isn't broken most of the time is that most of the time the remote server / repo doesn't try to gzip-compress RPM files. It's kind of pointless since they're already internally compressed.
The artifacts that get saved into Pulp's datastore seem to be *correct*, i.e. they are the decompressed versions - on the latest release. **TODO**: investigate if this was ever not the case in the past and whether that might cause "RPMs always have the wrong sizes"
If the actual cause is the `Content-Length` header being forwarded when the upstream server uses `Content-Encoding`, then the options are:
* Find a way to use a restrictive `Accept-Encoding` that prevents the server from setting `Content-Encoding` (disadvantage: for *some* files, but probably not RPMs, this might be less efficient)
* Disable `Content-Length` header for all streamed responses (disadvantages: TBD, but `Content-Length` is generally useful)
* Disable `Content-Length` header for all streamed responses IF the remote server used `Content-Encoding` (disadvantages: TBD)
* Some magic not known to me which that would allow us to know the size of the decompressed file while still in the process of downloading the compressed bytes?
Having dipped a toe into the downloader an streaming logic, i really think forwarding the headers from the upstream server we are downloading in the very same moment is probably a convenient but bad idea. We should really provide the same headers to the client whether we serve streaming or form saved artifacts. | 2022-01-26T15:43:49 |
|
pulp/pulpcore | 2,167 | pulp__pulpcore-2167 | [
"2187"
] | 8a739a76ad3c7694c8460730a3ad27373e8d41d4 | diff --git a/pulpcore/content/handler.py b/pulpcore/content/handler.py
--- a/pulpcore/content/handler.py
+++ b/pulpcore/content/handler.py
@@ -772,7 +772,10 @@ async def _serve_content_artifact(self, content_artifact, headers, request):
artifact_name = artifact_file.name
if settings.DEFAULT_FILE_STORAGE == "pulpcore.app.models.storage.FileSystem":
- return FileResponse(os.path.join(settings.MEDIA_ROOT, artifact_name), headers=headers)
+ path = os.path.join(settings.MEDIA_ROOT, artifact_name)
+ if not os.path.exists(path):
+ raise Exception(_("Expected path '{}' is not found").format(path))
+ return FileResponse(path, headers=headers)
elif settings.DEFAULT_FILE_STORAGE == "storages.backends.s3boto3.S3Boto3Storage":
content_disposition = f"attachment;filename={content_artifact.relative_path}"
parameters = {"ResponseContentDisposition": content_disposition}
| Raise 404 instead of 502 when expected file is missing from the FS.
After content sync/upload remove the artifact from /var/lib/pulp/media/artifact/...
```
$ http https://pulp3-source-fedora34.fluffy.example.com/pulp/content/test_path/1.iso
HTTP/1.1 502 Bad Gateway
Connection: keep-alive
Content-Length: 157
Content-Type: text/html
Date: Tue, 01 Feb 2022 14:17:11 GMT
Server: nginx/1.20.1
<html>
<head><title>502 Bad Gateway</title></head>
<body>
<center><h1>502 Bad Gateway</h1></center>
<hr><center>nginx/1.20.1</center>
</body>
</html>
(pulp) [vagrant@pulp3-source-fedora34 ~]$ sudo journalctl -r
-- Journal begins at Fri 2022-01-07 03:33:20 UTC, ends at Tue 2022-02-01 14:17:30 UTC. --
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: FileNotFoundError: [Errno 2] No such file or directory: '/var/lib/pulp/media/artif>
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: return self._accessor.stat(self)
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: File "/usr/lib64/python3.9/pathlib.py", line 1232, in stat
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: result = self.fn(*self.args, **self.kwargs)
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: File "/usr/lib64/python3.9/concurrent/futures/thread.py", line 52, in run
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: st: os.stat_result = await loop.run_in_executor(None, filepath.stat)
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: File "/usr/local/lib/pulp/lib64/python3.9/site-packages/aiohttp/web_fileresponse>
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: await prepare_meth(request)
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: File "/usr/local/lib/pulp/lib64/python3.9/site-packages/aiohttp/web_protocol.py">
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: reset = await self.finish_response(request, resp, start_time)
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: File "/usr/local/lib/pulp/lib64/python3.9/site-packages/aiohttp/web_protocol.py">
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: resp, reset = await task
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: File "/usr/local/lib/pulp/lib64/python3.9/site-packages/aiohttp/web_protocol.py">
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: Traceback (most recent call last):
Feb 01 14:17:11 pulp3-source-fedora34.fluffy.example.com gunicorn[154878]: [2022-02-01 14:17:11 +0000] [154878] [ERROR] Unhandled exception
```
| 2022-01-27T17:39:58 |
||
pulp/pulpcore | 2,184 | pulp__pulpcore-2184 | [
"2183"
] | 8ed01b53d3ce97926922c3a1dc455a210052184a | diff --git a/pulpcore/tasking/pulpcore_worker.py b/pulpcore/tasking/pulpcore_worker.py
--- a/pulpcore/tasking/pulpcore_worker.py
+++ b/pulpcore/tasking/pulpcore_worker.py
@@ -194,6 +194,13 @@ def iter_tasks(self):
# The lock will be automatically be released at the end of the block
# Check if someone else changed the task before we got the lock
task.refresh_from_db()
+ if task.state == TASK_STATES.CANCELING and task.worker is None:
+ # No worker picked this task up before being canceled
+ if self.cancel_abandoned_task(task, TASK_STATES.CANCELED):
+ # Continue looking for the next task
+ # without considering this tasks resources
+ # as we just released them
+ continue
if task.state in [TASK_STATES.RUNNING, TASK_STATES.CANCELING]:
# A running task without a lock must be abandoned
if self.cancel_abandoned_task(
| Canceling tasks never picked up by a worker marks them failed
Code investigation shows, we are doing the right thing for the right reason, but reported the wrong thing with the wrong reason.
| 2022-02-01T11:11:59 |
||
pulp/pulpcore | 2,186 | pulp__pulpcore-2186 | [
"2185"
] | 81185d2c54f8ae879cadcf554de5efce31a7e958 | diff --git a/pulpcore/app/models/__init__.py b/pulpcore/app/models/__init__.py
--- a/pulpcore/app/models/__init__.py
+++ b/pulpcore/app/models/__init__.py
@@ -59,6 +59,7 @@
)
# This can lead to circular imports with a custom user model depending on this very module
+# Moved to plugin/models/role.py to avoid the circular import.
# from .role import ( # noqa
# GroupRole,
# Role,
diff --git a/pulpcore/plugin/models/role.py b/pulpcore/plugin/models/role.py
new file mode 100644
--- /dev/null
+++ b/pulpcore/plugin/models/role.py
@@ -0,0 +1,8 @@
+# Models are exposed selectively in the versioned plugin API.
+# Any models defined in the pulpcore.plugin namespace should probably be proxy models.
+
+from pulpcore.app.models.role import ( # noqa
+ GroupRole,
+ Role,
+ UserRole,
+)
| Circular import exists when using custom user model
This currently occurs when a plugin using a custom user model depends on the Role model [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/app/models/__init__.py#L61).
| 2022-02-01T13:56:18 |
||
pulp/pulpcore | 2,193 | pulp__pulpcore-2193 | [
"2192"
] | 53fa7503a542772bde995d2ccddbadb7fcf72816 | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -144,10 +144,6 @@ def _combine_content_mappings(map1, map2):
)
)
- # Export the connection between content and artifacts
- resource = ContentArtifactResource(repository_version)
- _write_export(export.tarfile, resource, dest_dir)
-
# content mapping is used by repo versions with subrepos (eg distribution tree repos)
content_mapping = {}
@@ -164,6 +160,10 @@ def _combine_content_mappings(map1, map2):
content_mapping, resource.content_mapping
)
+ # Export the connection between content and artifacts
+ resource = ContentArtifactResource(repository_version, content_mapping)
+ _write_export(export.tarfile, resource, dest_dir)
+
msg = (
f"Exporting content for {plugin_name} "
f"repository-version {repository_version.repository.name}/{repository_version.number}"
diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py
--- a/pulpcore/app/modelresource.py
+++ b/pulpcore/app/modelresource.py
@@ -66,12 +66,19 @@ class ContentArtifactResource(QueryModelResource):
ContentArtifact is different from other import-export entities because it has no 'natural key'
other than a pulp_id, which aren't shared across instances. We do some magic to link up
ContentArtifacts to their matching (already-imported) Content.
+
+ Some plugin-models have sub-repositories. We take advantage of the content-mapping
+ machinery to account for those contentartifacts as well.
"""
artifact = fields.Field(
column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
)
+ def __init__(self, repo_version=None, content_mapping=None):
+ self.content_mapping = content_mapping
+ super().__init__(repo_version)
+
def before_import_row(self, row, **kwargs):
"""
Fixes the content-ptr of an incoming content-artifact row at import time.
@@ -92,9 +99,15 @@ def before_import_row(self, row, **kwargs):
row["content"] = str(linked_content.pulp_id)
def set_up_queryset(self):
- return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(
- "content", "relative_path"
- )
+ vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)
+ if self.content_mapping:
+ all_content = []
+ for content_ids in self.content_mapping.values():
+ all_content.extend(content_ids)
+ vers_content = vers_content.union(
+ ContentArtifact.objects.filter(content__in=all_content)
+ )
+ return vers_content.order_by("content", "relative_path")
class Meta:
model = ContentArtifact
| PulpImport/Export of kickstart repos with subrepos broken
See https://bugzilla.redhat.com/show_bug.cgi?id=2040870 for details.
| The import/export engine in pulpcore needs to expose a hook to plugins to let them describe contained/dependent/sub repos. Content, Artifact, and ContentArtifact processing currently make assumptions that are incomplete in the presence of subrepos, and our testing was insufficient to uncover the problem. | 2022-02-02T13:35:09 |
|
pulp/pulpcore | 2,198 | pulp__pulpcore-2198 | [
"2116"
] | ee0517b857ffd260a198d719f28648a6a0c8ab89 | diff --git a/pulpcore/app/access_policy.py b/pulpcore/app/access_policy.py
--- a/pulpcore/app/access_policy.py
+++ b/pulpcore/app/access_policy.py
@@ -1,8 +1,8 @@
from rest_access_policy import AccessPolicy
-from django.db.utils import ProgrammingError
+from pulpcore.app.loggers import deprecation_logger
from pulpcore.app.models import AccessPolicy as AccessPolicyModel
-from pulpcore.app.util import get_view_urlpattern
+from pulpcore.app.util import get_view_urlpattern, get_viewset_for_model
class AccessPolicyFromDB(AccessPolicy):
@@ -10,6 +10,57 @@ class AccessPolicyFromDB(AccessPolicy):
An AccessPolicy that loads statements from an `AccessPolicy` model instance.
"""
+ @staticmethod
+ def get_access_policy(view):
+ """
+ Retrieves the AccessPolicy from the DB or None if it doesn't exist.
+
+ Args:
+ view (subclass of rest_framework.view.APIView): The view or viewset to receive the
+ AccessPolicy model for.
+
+ Returns:
+ Either a `pulpcore.app.models.AccessPolicy` or None.
+ """
+ try:
+ urlpattern = get_view_urlpattern(view)
+ except AttributeError:
+ # The view does not define a `urlpattern()` method, e.g. it's not a NamedModelViewset
+ return None
+
+ try:
+ return AccessPolicyModel.objects.get(viewset_name=urlpattern)
+ except AccessPolicyModel.DoesNotExist:
+ return None
+
+ @classmethod
+ def handle_creation_hooks(cls, obj):
+ """
+ Handle the creation hooks defined in this policy for the passed in `obj`.
+
+ Args:
+ cls: The class this method belongs to.
+ obj: The model instance to have its creation hooks handled for.
+
+ """
+ viewset = get_viewset_for_model(obj)
+ access_policy = cls.get_access_policy(viewset)
+ if access_policy and access_policy.creation_hooks is not None:
+ for creation_hook in access_policy.creation_hooks:
+ function = obj.REGISTERED_CREATION_HOOKS.get(creation_hook["function"])
+ if function is not None:
+ kwargs = creation_hook.get("parameters") or {}
+ function(**kwargs)
+ else:
+ # Old interface deprecated for removal in 3.20
+ function = getattr(obj, creation_hook["function"])
+ deprecation_logger.warn(
+ "Calling unregistered creation hooks from the access policy is deprecated"
+ " and may be removed with pulpcore 3.20."
+ f"[hook={creation_hook}, viewset={access_policy.viewset_name}]."
+ )
+ function(creation_hook.get("permissions"), creation_hook.get("parameters"))
+
def get_policy_statements(self, request, view):
"""
Return the policy statements from an AccessPolicy instance matching the viewset name.
@@ -35,12 +86,9 @@ def get_policy_statements(self, request, view):
Returns:
The access policy statements in drf-access-policy policy structure.
"""
- try:
- viewset_name = get_view_urlpattern(view)
- access_policy_obj = AccessPolicyModel.objects.get(viewset_name=viewset_name)
- except (AccessPolicyModel.DoesNotExist, AttributeError, ProgrammingError):
+ if access_policy_obj := self.get_access_policy(view):
+ return access_policy_obj.statements
+ else:
default_statement = [{"action": "*", "principal": "admin", "effect": "allow"}]
policy = getattr(view, "DEFAULT_ACCESS_POLICY", {"statements": default_statement})
return policy["statements"]
- else:
- return access_policy_obj.statements
diff --git a/pulpcore/app/models/access_policy.py b/pulpcore/app/models/access_policy.py
--- a/pulpcore/app/models/access_policy.py
+++ b/pulpcore/app/models/access_policy.py
@@ -1,6 +1,3 @@
-from gettext import gettext as _
-
-from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group as BaseGroup
from django.db import models
@@ -10,7 +7,7 @@
from guardian.shortcuts import assign_perm, remove_perm
from pulpcore.app.models import BaseModel
-from pulpcore.app.loggers import deprecation_logger
+from pulpcore.app.util import get_viewset_for_model
def _ensure_iterable(obj):
@@ -76,33 +73,10 @@ def __init__(self, *args, **kwargs):
@hook("after_create")
def add_perms(self):
- try:
- access_policy = AccessPolicy.objects.get(viewset_name=self.ACCESS_POLICY_VIEWSET_NAME)
- except AttributeError:
- raise ImproperlyConfigured(
- _(
- "When using the `AutoAddObjPermsMixin`, plugin writers must declare an"
- "`ACCESS_POLICY_VIEWSET_NAME` class attribute."
- )
- )
- self._handle_creation_hooks(access_policy)
-
- def _handle_creation_hooks(self, access_policy):
- if access_policy.creation_hooks is not None:
- for creation_hook in access_policy.creation_hooks:
- function = self.REGISTERED_CREATION_HOOKS.get(creation_hook["function"])
- if function is not None:
- kwargs = creation_hook.get("parameters") or {}
- function(**kwargs)
- else:
- # Old interface deprecated for removal in 3.20
- function = getattr(self, creation_hook["function"])
- deprecation_logger.warn(
- "Calling unregistered creation hooks from the access policy is deprecated"
- " and may be removed with pulpcore 3.20 "
- f"[hook={creation_hook}, viewset={access_policy.viewset_name}]."
- )
- function(creation_hook.get("permissions"), creation_hook.get("parameters"))
+ viewset = get_viewset_for_model(self)
+ for permission_class in viewset.get_permissions(viewset):
+ if hasattr(permission_class, "handle_creation_hooks"):
+ permission_class.handle_creation_hooks(self)
def add_for_users(self, permissions, users):
"""
| There is no way for users to prevent creation_hooks from running without editing every AccessPolicy to remove them
## Problem
Users want to fully disable the default Authorization of pulp by modifying the DEFAULT_PERMISSION_CLASSES, but even if they do the creation hooks will still be running, likely assigned unwanted permissions to various users and groups.
## Solution
Have the calling of creation_hooks happen in side an interface on `AccessPolicyFromDB`, e.g. named `handle_creation_hooks` or some reasonable name. By delegating the running of creation hooks onto that object, when the user changes it, they won't run anymore.
| 2022-02-02T20:39:12 |
||
pulp/pulpcore | 2,205 | pulp__pulpcore-2205 | [
"2072"
] | db7ba7c92ca9c04cefa19ad6d6260d517ff4297c | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -3,8 +3,10 @@
import json
import tarfile
import tempfile
+import logging
from django.conf import settings
+from django.db.models.query import QuerySet
from pulpcore.app.apps import get_plugin_config
from pulpcore.app.models.progress import ProgressReport
@@ -14,7 +16,9 @@
ContentArtifactResource,
RepositoryResource,
)
-from pulpcore.constants import TASK_STATES
+from pulpcore.constants import TASK_STATES, EXPORT_BATCH_SIZE
+
+log = logging.getLogger(__name__)
def _write_export(the_tarfile, resource, dest_dir=None):
@@ -32,16 +36,37 @@ def _write_export(the_tarfile, resource, dest_dir=None):
dest_dir str(directory-path): directory 'inside' the tarfile to write to
"""
filename = "{}.{}.json".format(resource.__module__, type(resource).__name__)
- dataset = resource.export(resource.queryset)
if dest_dir:
dest_filename = os.path.join(dest_dir, filename)
else:
dest_filename = filename
- data = dataset.json.encode("utf8")
- info = tarfile.TarInfo(name=dest_filename)
- info.size = len(data)
- the_tarfile.addfile(info, io.BytesIO(data))
+ # If the resource is the type of QuerySet, then export the data in batch to save memory.
+ # Otherwise, export all data in oneshot. This is because the underlying libraries
+ # (json; django-import-export) do not support to stream the output to file, we export
+ # the data in batches to memory and concatenate the json lists via string manipulation.
+ with tempfile.NamedTemporaryFile(dir=os.getcwd(), mode="w", encoding="utf8") as temp_file:
+ if isinstance(resource.queryset, QuerySet):
+ temp_file.write("[")
+ total = resource.queryset.count()
+ for i in range(0, total, EXPORT_BATCH_SIZE):
+ current_batch = i + EXPORT_BATCH_SIZE
+ dataset = resource.export(resource.queryset[i:current_batch])
+ # Strip "[" and "]" as we are writing the dataset in batch
+ temp_file.write(dataset.json.lstrip("[").rstrip("]"))
+ if current_batch < total:
+ # Write "," if not last loop
+ temp_file.write(", ")
+ temp_file.write("]")
+ else:
+ dataset = resource.export(resource.queryset)
+ temp_file.write(dataset.json)
+
+ temp_file.flush()
+ info = tarfile.TarInfo(name=dest_filename)
+ info.size = os.path.getsize(temp_file.name)
+ with open(temp_file.name, "rb") as fd:
+ the_tarfile.addfile(info, fd)
def export_versions(export, version_info):
diff --git a/pulpcore/app/tasks/importer.py b/pulpcore/app/tasks/importer.py
--- a/pulpcore/app/tasks/importer.py
+++ b/pulpcore/app/tasks/importer.py
@@ -58,7 +58,7 @@ def _import_file(fpath, resource_class, do_raise=True):
try:
log.info(_("Importing file {}.").format(fpath))
with open(fpath, "r") as json_file:
- data = Dataset().load(json_file.read(), format="json")
+ data = Dataset().load(json_file, format="json")
resource = resource_class()
log.info(_("...Importing resource {}.").format(resource.__class__.__name__))
return resource.import_data(data, raise_errors=do_raise)
diff --git a/pulpcore/constants.py b/pulpcore/constants.py
--- a/pulpcore/constants.py
+++ b/pulpcore/constants.py
@@ -63,3 +63,5 @@
(FS_EXPORT_METHODS.HARDLINK, "Export by hardlinking"),
(FS_EXPORT_METHODS.SYMLINK, "Export by symlinking"),
)
+
+EXPORT_BATCH_SIZE = 2000
| System runs out of memory when exporting repository with many packages, such as rhel 7 server rpms repo
Author: @hao-yu (hyu)
Redmine Issue: 9645, https://pulp.plan.io/issues/9645
---
Clone from https://bugzilla.redhat.com/show_bug.cgi?id=2033847
Description of problem:
Pulp raises memory error when exporting repositories with large contents.
hammer content-export incremental version --content-view=rhel --version=2.0 --organization="Default Organization" --lifecycle-environment Library --chunk-size-gb 2
[....................................................................................................................................................................] [100%]
Error: Pulp task error
undefined method `first' for nil:NilClas
~~~ python
1. Traceback in /var/log/messages
pulpcore-worker-7: pulp [029c96b2-78a8-48ca-8605-9d59872f1be0]: pulpcore.tasking.pulpcore_worker:INFO: Task 508915bc-d4f5-49e3-aedb-d838bb0b2d14 failed () <========= Raised error without message which is normally MemoryError
...
pulpcore-worker-7: pulp [029c96b2-78a8-48ca-8605-9d59872f1be0]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 317, in _perform_task
pulpcore-worker-7: result = func(*args, **kwargs)
pulpcore-worker-7: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/export.py", line 204, in pulp_export
pulpcore-worker-7: _do_export(pulp_exporter, tar, the_export)
pulpcore-worker-7: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/export.py", line 313, in _do_export
pulpcore-worker-7: export_content(the_export, version)
pulpcore-worker-7: File "/usr/lib/python3.6/site-packages/pulpcore/app/importexport.py", line 134, in export_content
pulpcore-worker-7: _write_export(export.tarfile, resource, dest_dir)
pulpcore-worker-7: File "/usr/lib/python3.6/site-packages/pulpcore/app/importexport.py", line 41, in _write_export
pulpcore-worker-7: data = dataset.json.encode("utf8")
pulpcore-worker-7: File "/usr/lib/python3.6/site-packages/tablib/formats/__init__.py", line 62, in __get__
pulpcore-worker-7: return self._format.export_set(obj, **kwargs)
pulpcore-worker-7: File "/usr/lib/python3.6/site-packages/tablib/formats/_json.py", line 27, in export_set
pulpcore-worker-7: dataset.dict, default=serialize_objects_handler, ensure_ascii=False
pulpcore-worker-7: File "/usr/lib64/python3.6/json/__init__.py", line 238, in dumps
pulpcore-worker-7: **kw).encode(obj)
pulpcore-worker-7: File "/usr/lib64/python3.6/json/encoder.py", line 202, in encode
pulpcore-worker-7: return ''.join(chunks)
~~~
~~~ python
1. Add a logging to Pulp so that it will print the Exception type;
pulpcore-worker-6: pulp [da8e4925-df12-4fae-851e-f6352b5ae425]: pulpcore.tasking.pulpcore_worker:INFO: Task b246ce3c-a805-4a0f-8393-2fbf7cdd73a7 failed ()
pulpcore-worker-6: pulp [da8e4925-df12-4fae-851e-f6352b5ae425]: pulpcore.tasking.pulpcore_worker:INFO: File "/usr/lib/python3.6/site-packages/pulpcore/tasking/pulpcore_worker.py", line 317, in _perform_task
pulpcore-worker-6: result = func(*args, **kwargs)
pulpcore-worker-6: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/export.py", line 204, in pulp_export
pulpcore-worker-6: _do_export(pulp_exporter, tar, the_export)
pulpcore-worker-6: File "/usr/lib/python3.6/site-packages/pulpcore/app/tasks/export.py", line 313, in _do_export
pulpcore-worker-6: export_content(the_export, version)
pulpcore-worker-6: File "/usr/lib/python3.6/site-packages/pulpcore/app/importexport.py", line 134, in export_content
pulpcore-worker-6: _write_export(export.tarfile, resource, dest_dir)
pulpcore-worker-6: File "/usr/lib/python3.6/site-packages/pulpcore/app/importexport.py", line 41, in _write_export
pulpcore-worker-6: data = dataset.json.encode("utf8")
pulpcore-worker-6: pulp [da8e4925-df12-4fae-851e-f6352b5ae425]: pulpcore.tasking.pulpcore_worker:INFO: <class 'MemoryError'> <=======
~~~
~~~ text
1. While the task is running, we can see that the memory consumption is very high
total used free shared buff/cache available
Mem: 33010716 19796992 221744 579536 12991980 12230452
Swap: 12058620 3917188 8141432
total used free shared buff/cache available
Mem: 33010716 20831688 331752 579536 11847276 11196116
Swap: 12058620 3916932 8141688
total used free shared buff/cache available
Mem: 33010716 21926716 224460 579540 10859540 10100732
Swap: 12058620 3916932 8141688
total used free shared buff/cache available
Mem: 33010716 23173152 219596 579540 9617968 8854304
Swap: 12058620 3916932 8141688
total used free shared buff/cache available
Mem: 33010716 24317448 238176 579660 8455092 7709900
Swap: 12058620 3916932 8141688
total used free shared buff/cache available
Mem: 33010716 25737804 230592 579612 7042320 6289292
Swap: 12058620 3916932 8141688
total used free shared buff/cache available
Mem: 33010716 27344320 226584 579612 5439812 4683004
Swap: 12058620 3916932 8141688
total used free shared buff/cache available
Mem: 33010716 30193200 234504 579612 2583012 1834164 <=============== Observed free memory dropped to 1.8GB from 20GB
Swap: 12058620 3916932 8141688
total used free shared buff/cache available
Mem: 33010716 24422544 7876732 334776 711440 7886420
Swap: 12058620 8729032 3329588 <===================== Free swap dropped 3G
total used free shared buff/cache available
Mem: 33010716 23213200 9030436 336976 767080 9067412
Swap: 12058620 8674504 3384116
total used free shared buff/cache available
Mem: 33010716 22871236 9357400 339484 782080 9400532
Swap: 12058620 8637896 3420724
total used free shared buff/cache available
Mem: 33010716 7216620 25034040 339872 760056 25053004 <======== Memory freed
Swap: 12058620 8431560 3627060
total used free shared buff/cache available
Mem: 33010716 7234448 25014092 339928 762176 25033868
Swap: 12058620 8414408 3644212
~~~
~~~ text
1. As we can see that Pulp worker consumed 22.3G of RAM when writing the resource file
top - 00:31:24 up 1 day, 12:52, 6 users, load average: 60.50, 26.92, 13.02
Tasks: 373 total, 1 running, 372 sleeping, 0 stopped, 0 zombie
KiB Mem : 99.8/33010716 [||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||]
KiB Swap: 52.1/12058620 [|||||||||||||||||||||||||||||||||||||||||||||||||||| ]
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
8442 pulp 20 0 23.9g 22.3g 636 D 17.2 70.7 13:38.63 pulpcore-worker <=============
And it consumes 10G of RAM when writing a smaller file (I think)
KiB Mem : 56.3/33010716 [|||||||||||||||||||||||||||||||||||||||||||||||||||||||| ]
KiB Swap: 34.2/12058620 [|||||||||||||||||||||||||||||||||| ]
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
8442 pulp 20 0 10.4g 10.0g 856 R 99.7 31.7 17:25.61 pulpcore-worker <=============
~~~
~~~ text
Steps to Reproduce:
1. Enable and sync the following 2 repos
Red_Hat_Enterprise_Linux_6_Server_-_Extended_Life_Cycle_Support_RPMs_x86_64
Red_Hat_Enterprise_Linux_7_Server_RPMs_x86_64_7Server
2. Create a content view and add the above repos
3. Publish the content view
4. Export the content view
hammer content-export incremental version --content-view=rhel --version=1.0 --organization="Default Organization" --lifecycle-environment Library --chunk-size-gb 2
Actual results:
Error: Pulp task error
undefined method `first' for nil:NilClass
Expected results:
Success
~~~
| From: pulpbot (pulpbot)
Date: 2021-12-19T12:07:07Z
---
PR: https://github.com/pulp/pulpcore/pull/1782
From: Johndavidson9 (Johndavidson9)
Date: 2022-01-10T12:30:27Z
---
I have the same issue. Any tips for solution?
https://get-9apps.com
https://cartoonhd.onl/index/ | 2022-02-08T16:27:48 |
|
pulp/pulpcore | 2,218 | pulp__pulpcore-2218 | [
"2157"
] | 6a2756e157f638b4de2da09b303e27eeb0dc5db2 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -11,13 +11,13 @@
import gnupg
from itertools import chain
+from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, models, transaction
+from django.db import IntegrityError, connection, models, transaction
from django.forms.models import model_to_dict
-from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
@@ -101,10 +101,44 @@ def touch(self):
"""
Update the ``timestamp_of_interest`` on all objects of the query.
- We order-by-pk here to avoid deadlocking in high-concurrency
- environments.
- """
- return self.order_by("pk").update(timestamp_of_interest=now())
+ Postgres' UPDATE call doesn't support order-by. This can (and does) result in deadlocks in
+ high-concurrency environments, when using touch() on overlapping data sets. In order to
+ prevent this, we choose to SELECT FOR UPDATE with SKIP LOCKS == True, and only update
+ the rows that we were able to get locks on. Since a previously-locked-row implies
+ that updating that row's timestamp-of-interest is the responsibility of whoever currently
+ owns it, this results in correct data, while closing the window on deadlocks.
+ """
+ # Build the list of ids we need to work on, since we're going to be building a
+ # SQL-query "by hand" in a moment.
+ pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
+ if not pulp_ids:
+ return None
+ ids_str = ",".join(pulp_ids)
+ # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
+ # If we are an instance-of Content or its subclasses, we want to update the Content table.
+ # Otherwise, use the table associated w/ the query.
+ db_table = (
+ Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
+ )
+ cursor = connection.cursor()
+ with transaction.atomic():
+ # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
+ # require code calling touch() on a Model whose table-name-str was carefully chosen to
+ # be Bad - but, good habits...
+ stmt = sql.SQL(
+ "UPDATE {table_name} "
+ " SET timestamp_of_interest = NOW() "
+ " WHERE pulp_id IN ("
+ " SELECT pulp_id "
+ " FROM {table_name} "
+ " WHERE pulp_id in ({ids}) "
+ " ORDER BY pulp_id "
+ " FOR UPDATE "
+ " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
+ )
+ rslt = cursor.execute(stmt)
+ cursor.close()
+ return rslt
class QueryMixin:
| touch() path still has a potential deadlock window.
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
| You can force the deadlock by:
- Starting 10 pulpcore-workers
- Start up 2 instances of "pulpcore-manager shell"
- In each instance, execute the following script, at the same time:
```
import _thread
from pulpcore.plugin.models import Content
def update_timestamp(index):
print(">>>in update_timedstamp index {}".format(index))
Content.objects.touch()
print(">>>done {}".format(index))
for i in range(8):
_thread.start_new_thread(update_timestamp, (i,))
```
You will see several/manay of the threads throwing deadlock exceptions. | 2022-02-11T17:18:48 |
|
pulp/pulpcore | 2,219 | pulp__pulpcore-2219 | [
"2157"
] | be1b61a61b3fb25e5017396827f2646239d573b6 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -13,11 +13,12 @@
from functools import lru_cache
from itertools import chain
+from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, models, transaction
+from django.db import IntegrityError, connection, models, transaction
from django.forms.models import model_to_dict
from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
@@ -103,10 +104,44 @@ def touch(self):
"""
Update the ``timestamp_of_interest`` on all objects of the query.
- We order-by-pk here to avoid deadlocking in high-concurrency
- environments.
- """
- return self.order_by("pk").update(timestamp_of_interest=now())
+ Postgres' UPDATE call doesn't support order-by. This can (and does) result in deadlocks in
+ high-concurrency environments, when using touch() on overlapping data sets. In order to
+ prevent this, we choose to SELECT FOR UPDATE with SKIP LOCKS == True, and only update
+ the rows that we were able to get locks on. Since a previously-locked-row implies
+ that updating that row's timestamp-of-interest is the responsibility of whoever currently
+ owns it, this results in correct data, while closing the window on deadlocks.
+ """
+ # Build the list of ids we need to work on, since we're going to be building a
+ # SQL-query "by hand" in a moment.
+ pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
+ if not pulp_ids:
+ return None
+ ids_str = ",".join(pulp_ids)
+ # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
+ # If we are an instance-of Content or its subclasses, we want to update the Content table.
+ # Otherwise, use the table associated w/ the query.
+ db_table = (
+ Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
+ )
+ cursor = connection.cursor()
+ with transaction.atomic():
+ # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
+ # require code calling touch() on a Model whose table-name-str was carefully chosen to
+ # be Bad - but, good habits...
+ stmt = sql.SQL(
+ "UPDATE {table_name} "
+ " SET timestamp_of_interest = NOW() "
+ " WHERE pulp_id IN ("
+ " SELECT pulp_id "
+ " FROM {table_name} "
+ " WHERE pulp_id in ({ids}) "
+ " ORDER BY pulp_id "
+ " FOR UPDATE "
+ " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
+ )
+ rslt = cursor.execute(stmt)
+ cursor.close()
+ return rslt
class QueryMixin:
| touch() path still has a potential deadlock window.
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
| You can force the deadlock by:
- Starting 10 pulpcore-workers
- Start up 2 instances of "pulpcore-manager shell"
- In each instance, execute the following script, at the same time:
```
import _thread
from pulpcore.plugin.models import Content
def update_timestamp(index):
print(">>>in update_timedstamp index {}".format(index))
Content.objects.touch()
print(">>>done {}".format(index))
for i in range(8):
_thread.start_new_thread(update_timestamp, (i,))
```
You will see several/manay of the threads throwing deadlock exceptions. | 2022-02-14T12:18:28 |
|
pulp/pulpcore | 2,220 | pulp__pulpcore-2220 | [
"2157"
] | a6b8aee052fced9e380cc7423b9deb4d516c16f8 | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -14,11 +14,12 @@
from functools import lru_cache
from itertools import chain
+from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, models, transaction
+from django.db import IntegrityError, connection, models, transaction
from django.forms.models import model_to_dict
from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
@@ -104,10 +105,44 @@ def touch(self):
"""
Update the ``timestamp_of_interest`` on all objects of the query.
- We order-by-pk here to avoid deadlocking in high-concurrency
- environments.
- """
- return self.order_by("pk").update(timestamp_of_interest=now())
+ Postgres' UPDATE call doesn't support order-by. This can (and does) result in deadlocks in
+ high-concurrency environments, when using touch() on overlapping data sets. In order to
+ prevent this, we choose to SELECT FOR UPDATE with SKIP LOCKS == True, and only update
+ the rows that we were able to get locks on. Since a previously-locked-row implies
+ that updating that row's timestamp-of-interest is the responsibility of whoever currently
+ owns it, this results in correct data, while closing the window on deadlocks.
+ """
+ # Build the list of ids we need to work on, since we're going to be building a
+ # SQL-query "by hand" in a moment.
+ pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
+ if not pulp_ids:
+ return None
+ ids_str = ",".join(pulp_ids)
+ # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
+ # If we are an instance-of Content or its subclasses, we want to update the Content table.
+ # Otherwise, use the table associated w/ the query.
+ db_table = (
+ Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
+ )
+ cursor = connection.cursor()
+ with transaction.atomic():
+ # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
+ # require code calling touch() on a Model whose table-name-str was carefully chosen to
+ # be Bad - but, good habits...
+ stmt = sql.SQL(
+ "UPDATE {table_name} "
+ " SET timestamp_of_interest = NOW() "
+ " WHERE pulp_id IN ("
+ " SELECT pulp_id "
+ " FROM {table_name} "
+ " WHERE pulp_id in ({ids}) "
+ " ORDER BY pulp_id "
+ " FOR UPDATE "
+ " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
+ )
+ rslt = cursor.execute(stmt)
+ cursor.close()
+ return rslt
class QueryMixin:
| touch() path still has a potential deadlock window.
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
| You can force the deadlock by:
- Starting 10 pulpcore-workers
- Start up 2 instances of "pulpcore-manager shell"
- In each instance, execute the following script, at the same time:
```
import _thread
from pulpcore.plugin.models import Content
def update_timestamp(index):
print(">>>in update_timedstamp index {}".format(index))
Content.objects.touch()
print(">>>done {}".format(index))
for i in range(8):
_thread.start_new_thread(update_timestamp, (i,))
```
You will see several/manay of the threads throwing deadlock exceptions. | 2022-02-14T12:18:29 |
|
pulp/pulpcore | 2,221 | pulp__pulpcore-2221 | [
"2157"
] | f09b4bd46f05208de261b917118354bc01543dec | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -12,11 +12,12 @@
import gnupg
from itertools import chain
+from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, models, transaction
+from django.db import IntegrityError, connection, models, transaction
from django.forms.models import model_to_dict
from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
@@ -100,8 +101,45 @@ class BulkTouchQuerySet(models.QuerySet):
def touch(self):
"""
Update the ``timestamp_of_interest`` on all objects of the query.
- """
- return self.update(timestamp_of_interest=now())
+
+ Postgres' UPDATE call doesn't support order-by. This can (and does) result in deadlocks in
+ high-concurrency environments, when using touch() on overlapping data sets. In order to
+ prevent this, we choose to SELECT FOR UPDATE with SKIP LOCKS == True, and only update
+ the rows that we were able to get locks on. Since a previously-locked-row implies
+ that updating that row's timestamp-of-interest is the responsibility of whoever currently
+ owns it, this results in correct data, while closing the window on deadlocks.
+ """
+ # Build the list of ids we need to work on, since we're going to be building a
+ # SQL-query "by hand" in a moment.
+ pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
+ if not pulp_ids:
+ return None
+ ids_str = ",".join(pulp_ids)
+ # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
+ # If we are an instance-of Content or its subclasses, we want to update the Content table.
+ # Otherwise, use the table associated w/ the query.
+ db_table = (
+ Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
+ )
+ cursor = connection.cursor()
+ with transaction.atomic():
+ # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
+ # require code calling touch() on a Model whose table-name-str was carefully chosen to
+ # be Bad - but, good habits...
+ stmt = sql.SQL(
+ "UPDATE {table_name} "
+ " SET timestamp_of_interest = NOW() "
+ " WHERE pulp_id IN ("
+ " SELECT pulp_id "
+ " FROM {table_name} "
+ " WHERE pulp_id in ({ids}) "
+ " ORDER BY pulp_id "
+ " FOR UPDATE "
+ " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
+ )
+ rslt = cursor.execute(stmt)
+ cursor.close()
+ return rslt
class QueryMixin:
| touch() path still has a potential deadlock window.
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
| You can force the deadlock by:
- Starting 10 pulpcore-workers
- Start up 2 instances of "pulpcore-manager shell"
- In each instance, execute the following script, at the same time:
```
import _thread
from pulpcore.plugin.models import Content
def update_timestamp(index):
print(">>>in update_timedstamp index {}".format(index))
Content.objects.touch()
print(">>>done {}".format(index))
for i in range(8):
_thread.start_new_thread(update_timestamp, (i,))
```
You will see several/manay of the threads throwing deadlock exceptions. | 2022-02-14T12:39:07 |
|
pulp/pulpcore | 2,230 | pulp__pulpcore-2230 | [
"2229"
] | 9a8f66194dbac1fc3561bd3ee01c35290fb19c2e | diff --git a/pulpcore/app/models/content.py b/pulpcore/app/models/content.py
--- a/pulpcore/app/models/content.py
+++ b/pulpcore/app/models/content.py
@@ -14,12 +14,11 @@
from functools import lru_cache
from itertools import chain
-from psycopg2 import sql
from django.conf import settings
from django.core import validators
from django.core.files.storage import default_storage
-from django.db import IntegrityError, connection, models, transaction
+from django.db import IntegrityError, models, transaction
from django.forms.models import model_to_dict
from django.utils.timezone import now
from django_lifecycle import BEFORE_UPDATE, BEFORE_SAVE, hook
@@ -112,37 +111,9 @@ def touch(self):
that updating that row's timestamp-of-interest is the responsibility of whoever currently
owns it, this results in correct data, while closing the window on deadlocks.
"""
- # Build the list of ids we need to work on, since we're going to be building a
- # SQL-query "by hand" in a moment.
- pulp_ids = [f"'{uuid}'" for uuid in self.values_list("pk", flat=True)]
- if not pulp_ids:
- return None
- ids_str = ",".join(pulp_ids)
- # timestamp_of_interest exists on core_content and core_artifact, not on the Detail tables
- # If we are an instance-of Content or its subclasses, we want to update the Content table.
- # Otherwise, use the table associated w/ the query.
- db_table = (
- Content._meta.db_table if issubclass(self.model, Content) else self.model._meta.db_table
- )
- cursor = connection.cursor()
with transaction.atomic():
- # SQL-sanitizing the table-name here is certainly overkill - sql-injection here would
- # require code calling touch() on a Model whose table-name-str was carefully chosen to
- # be Bad - but, good habits...
- stmt = sql.SQL(
- "UPDATE {table_name} "
- " SET timestamp_of_interest = NOW() "
- " WHERE pulp_id IN ("
- " SELECT pulp_id "
- " FROM {table_name} "
- " WHERE pulp_id in ({ids}) "
- " ORDER BY pulp_id "
- " FOR UPDATE "
- " SKIP LOCKED)".format(table_name=sql.Identifier(db_table).string, ids=ids_str)
- )
- rslt = cursor.execute(stmt)
- cursor.close()
- return rslt
+ sub_q = self.order_by("pk").select_for_update(skip_locked=True)
+ return self.filter(pk__in=sub_q).update(timestamp_of_interest=now())
class QueryMixin:
| Update touch() to not have to use raw sql (!!)
**Version**
main
**Describe the bug**
In response to "why doesn't select_for_update() do what we expect", this response from Django team:
https://code.djangoproject.com/ticket/33516#comment:1
showed The Way to invoke it successfully.
Let's get rid of raw-sql-execution in the touch() method.
**Additional context**
https://bugzilla.redhat.com/show_bug.cgi?id=2021406
https://github.com/pulp/pulpcore/issues/2157
| 2022-02-16T15:27:51 |
||
pulp/pulpcore | 2,231 | pulp__pulpcore-2231 | [
"2192"
] | 04099f19f304185ca8d60c8b71befe01d8904cfe | diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -144,10 +144,6 @@ def _combine_content_mappings(map1, map2):
)
)
- # Export the connection between content and artifacts
- resource = ContentArtifactResource(repository_version)
- _write_export(export.tarfile, resource, dest_dir)
-
# content mapping is used by repo versions with subrepos (eg distribution tree repos)
content_mapping = {}
@@ -164,6 +160,10 @@ def _combine_content_mappings(map1, map2):
content_mapping, resource.content_mapping
)
+ # Export the connection between content and artifacts
+ resource = ContentArtifactResource(repository_version, content_mapping)
+ _write_export(export.tarfile, resource, dest_dir)
+
msg = (
f"Exporting content for {plugin_name} "
f"repository-version {repository_version.repository.name}/{repository_version.number}"
diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py
--- a/pulpcore/app/modelresource.py
+++ b/pulpcore/app/modelresource.py
@@ -66,12 +66,19 @@ class ContentArtifactResource(QueryModelResource):
ContentArtifact is different from other import-export entities because it has no 'natural key'
other than a pulp_id, which aren't shared across instances. We do some magic to link up
ContentArtifacts to their matching (already-imported) Content.
+
+ Some plugin-models have sub-repositories. We take advantage of the content-mapping
+ machinery to account for those contentartifacts as well.
"""
artifact = fields.Field(
column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
)
+ def __init__(self, repo_version=None, content_mapping=None):
+ self.content_mapping = content_mapping
+ super().__init__(repo_version)
+
def before_import_row(self, row, **kwargs):
"""
Fixes the content-ptr of an incoming content-artifact row at import time.
@@ -92,9 +99,15 @@ def before_import_row(self, row, **kwargs):
row["content"] = str(linked_content.pulp_id)
def set_up_queryset(self):
- return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(
- "content", "relative_path"
- )
+ vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)
+ if self.content_mapping:
+ all_content = []
+ for content_ids in self.content_mapping.values():
+ all_content.extend(content_ids)
+ vers_content = vers_content.union(
+ ContentArtifact.objects.filter(content__in=all_content)
+ )
+ return vers_content.order_by("content", "relative_path")
class Meta:
model = ContentArtifact
| PulpImport/Export of kickstart repos with subrepos broken
See https://bugzilla.redhat.com/show_bug.cgi?id=2040870 for details.
| The import/export engine in pulpcore needs to expose a hook to plugins to let them describe contained/dependent/sub repos. Content, Artifact, and ContentArtifact processing currently make assumptions that are incomplete in the presence of subrepos, and our testing was insufficient to uncover the problem. | 2022-02-16T16:37:04 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.